From 0aa6951ca3f1fbc31871f7ed9b15e250a8ebaa9f Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 18 Jul 2018 07:46:29 -0400 Subject: [PATCH 001/338] Changed address constructors to explicit --- libraries/app/application.cpp | 2 +- libraries/chain/account_object.cpp | 1 - .../include/graphene/chain/protocol/address.hpp | 14 ++++++++++---- .../include/graphene/chain/protocol/types.hpp | 1 + libraries/chain/protocol/address.cpp | 2 +- libraries/chain/protocol/types.cpp | 5 +++++ libraries/wallet/wallet.cpp | 14 +++++++------- tests/tests/operation_tests2.cpp | 8 ++++---- 8 files changed, 29 insertions(+), 18 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index f1347c80f5..49a6fe2619 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -99,7 +99,7 @@ namespace detail { } initial_state.initial_accounts.emplace_back("nathan", nathan_key.get_public_key()); - initial_state.initial_balances.push_back({nathan_key.get_public_key(), + initial_state.initial_balances.push_back({address(nathan_key.get_public_key()), GRAPHENE_SYMBOL, GRAPHENE_MAX_SHARE_SUPPLY}); initial_state.initial_chain_id = fc::sha256::hash( "BOGUS" ); diff --git a/libraries/chain/account_object.cpp b/libraries/chain/account_object.cpp index efade46fe1..e30a8384b0 100644 --- a/libraries/chain/account_object.cpp +++ b/libraries/chain/account_object.cpp @@ -138,7 +138,6 @@ set
account_member_index::get_address_members(const account_object& a)c result.insert(auth.first); for( auto auth : a.active.address_auths ) result.insert(auth.first); - result.insert( a.options.memo_key ); return result; } diff --git a/libraries/chain/include/graphene/chain/protocol/address.hpp b/libraries/chain/include/graphene/chain/protocol/address.hpp index b225b42caf..7082d8350f 100644 --- a/libraries/chain/include/graphene/chain/protocol/address.hpp +++ b/libraries/chain/include/graphene/chain/protocol/address.hpp @@ -53,10 +53,10 @@ namespace graphene { namespace chain { public: address(); ///< constructs empty / null address explicit address( const std::string& base58str ); ///< converts to binary, validates checksum - address( const fc::ecc::public_key& pub ); ///< converts to binary - explicit address( const fc::ecc::public_key_data& pub ); ///< converts to binary - address( const pts_address& pub ); ///< converts to binary - address( const public_key_type& pubkey ); + explicit address( const fc::ecc::public_key& pub ); ///< converts to address + explicit address( const fc::ecc::public_key_data& pub ); ///< converts to address + explicit address( const pts_address& pub ); ///< converts to address + explicit address( const public_key_type& pubkey ); ///< converts to address static bool is_valid( const std::string& base58str, const std::string& prefix = GRAPHENE_ADDRESS_PREFIX ); @@ -74,6 +74,12 @@ namespace graphene { namespace chain { inline bool operator != ( const address& a, const address& b ) { return a.addr != b.addr; } inline bool operator < ( const address& a, const address& b ) { return a.addr < b.addr; } + inline bool operator == ( const pts_address& a, const address& b ) { return address(a) == b; } + inline bool operator == ( const address& a, const pts_address& b ) { return a == address(b); } + + inline bool operator == ( const public_key_type& a, const address& b ) { return address(a) == b; } + inline bool operator == ( const address& a, const public_key_type& b ) { return a == address(b); } + } } // namespace graphene::chain namespace fc diff --git a/libraries/chain/include/graphene/chain/protocol/types.hpp b/libraries/chain/include/graphene/chain/protocol/types.hpp index 5e0c4a0269..04700c932c 100644 --- a/libraries/chain/include/graphene/chain/protocol/types.hpp +++ b/libraries/chain/include/graphene/chain/protocol/types.hpp @@ -266,6 +266,7 @@ namespace graphene { namespace chain { friend bool operator == ( const public_key_type& p1, const fc::ecc::public_key& p2); friend bool operator == ( const public_key_type& p1, const public_key_type& p2); friend bool operator != ( const public_key_type& p1, const public_key_type& p2); + friend bool operator < ( const public_key_type& p1, const public_key_type& p2); }; struct extended_public_key_type diff --git a/libraries/chain/protocol/address.cpp b/libraries/chain/protocol/address.cpp index 19bb4df569..d4d7a0023f 100644 --- a/libraries/chain/protocol/address.cpp +++ b/libraries/chain/protocol/address.cpp @@ -50,7 +50,7 @@ namespace graphene { std::vector v; try { - v = fc::from_base58( base58str.substr( prefix_len ) ); + v = fc::from_base58( base58str.substr( prefix_len ) ); } catch( const fc::parse_error_exception& e ) { diff --git a/libraries/chain/protocol/types.cpp b/libraries/chain/protocol/types.cpp index a51474f0da..89a72bc276 100644 --- a/libraries/chain/protocol/types.cpp +++ b/libraries/chain/protocol/types.cpp @@ -87,6 +87,11 @@ namespace graphene { namespace chain { return p1.key_data != p2.key_data; } + bool operator < ( const public_key_type& p1, const public_key_type& p2) + { + return address(p1) < address(p2); + } + // extended_public_key_type extended_public_key_type::extended_public_key_type():key_data(){}; diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 1347a26e8f..75aa93a563 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -3187,7 +3187,7 @@ bool wallet_api::import_key(string account_name_or_id, string wif_key) fc::optional optional_private_key = wif_to_key(wif_key); if (!optional_private_key) FC_THROW("Invalid private key"); - string shorthash = detail::address_to_shorthash(optional_private_key->get_public_key()); + string shorthash = detail::address_to_shorthash(address(optional_private_key->get_public_key())); copy_wallet_file( "before-import-key-" + shorthash ); if( my->import_key(account_name_or_id, wif_key) ) @@ -3844,7 +3844,7 @@ vector< signed_transaction > wallet_api_impl::import_balance( string name_or_id, continue; for( const public_key_type& pub : _wallet.extra_keys[ claimer.id ] ) { - addrs.push_back( pub ); + address pub_addr( address(pub) ); auto it = _keys.find( pub ); if( it != _keys.end() ) { @@ -3864,16 +3864,16 @@ vector< signed_transaction > wallet_api_impl::import_balance( string name_or_id, optional< private_key_type > key = wif_to_key( wif_key ); FC_ASSERT( key.valid(), "Invalid private key" ); fc::ecc::public_key pk = key->get_public_key(); - addrs.push_back( pk ); + addrs.push_back( address(pk) ); keys[addrs.back()] = *key; // see chain/balance_evaluator.cpp - addrs.push_back( pts_address( pk, false, 56 ) ); + addrs.push_back( address( pts_address( pk, false, 56 ) ) ); keys[addrs.back()] = *key; - addrs.push_back( pts_address( pk, true, 56 ) ); + addrs.push_back( address( pts_address( pk, true, 56 ) ) ); keys[addrs.back()] = *key; - addrs.push_back( pts_address( pk, false, 0 ) ); + addrs.push_back( address( pts_address( pk, false, 0 ) ) ); keys[addrs.back()] = *key; - addrs.push_back( pts_address( pk, true, 0 ) ); + addrs.push_back( address( pts_address( pk, true, 0 ) ) ); keys[addrs.back()] = *key; } } diff --git a/tests/tests/operation_tests2.cpp b/tests/tests/operation_tests2.cpp index 8b10fd8b2f..a457f8c044 100644 --- a/tests/tests/operation_tests2.cpp +++ b/tests/tests/operation_tests2.cpp @@ -1737,8 +1737,8 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) database db; const uint32_t skip_flags = database::skip_undo_history_check; fc::temp_directory td( graphene::utilities::temp_directory_path() ); - genesis_state.initial_balances.push_back({generate_private_key("n").get_public_key(), GRAPHENE_SYMBOL, 1}); - genesis_state.initial_balances.push_back({generate_private_key("x").get_public_key(), GRAPHENE_SYMBOL, 1}); + genesis_state.initial_balances.push_back({address(generate_private_key("n").get_public_key()), GRAPHENE_SYMBOL, 1}); + genesis_state.initial_balances.push_back({address(generate_private_key("x").get_public_key()), GRAPHENE_SYMBOL, 1}); fc::time_point_sec starting_time = genesis_state.initial_timestamp + 3000; auto n_key = generate_private_key("n"); @@ -1747,14 +1747,14 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) auto v2_key = generate_private_key("v2"); genesis_state_type::initial_vesting_balance_type vest; - vest.owner = v1_key.get_public_key(); + vest.owner = address(v1_key.get_public_key()); vest.asset_symbol = GRAPHENE_SYMBOL; vest.amount = 500; vest.begin_balance = vest.amount; vest.begin_timestamp = starting_time; vest.vesting_duration_seconds = 60; genesis_state.initial_vesting_balances.push_back(vest); - vest.owner = v2_key.get_public_key(); + vest.owner = address(v2_key.get_public_key()); vest.begin_timestamp -= fc::seconds(30); vest.amount = 400; genesis_state.initial_vesting_balances.push_back(vest); From d5d518f6d6d163956de2caa5b2ffb4205dbaef8b Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 26 Jul 2018 16:05:31 -0400 Subject: [PATCH 002/338] Fix newly introduced wallet import_key issue --- libraries/wallet/wallet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 75aa93a563..0ea9e33e07 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -3844,7 +3844,7 @@ vector< signed_transaction > wallet_api_impl::import_balance( string name_or_id, continue; for( const public_key_type& pub : _wallet.extra_keys[ claimer.id ] ) { - address pub_addr( address(pub) ); + addrs.push_back( address(pub) ); auto it = _keys.find( pub ); if( it != _keys.end() ) { From 17b7ac893fe01acd37b9e97955f7d34a095848a4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 11 Aug 2019 08:48:46 -0400 Subject: [PATCH 003/338] Add get_block_operation_history API --- libraries/app/api.cpp | 14 ++++++++++ libraries/app/include/graphene/app/api.hpp | 18 +++++++++++++ .../chain/operation_history_object.hpp | 27 ++++++++++++++----- 3 files changed, 53 insertions(+), 6 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index d661bf4735..f8a123dda0 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -437,6 +437,20 @@ namespace graphene { namespace app { return result; } + vector history_api::get_block_operation_history( + uint32_t block_num, + optional trx_in_block ) const + { + FC_ASSERT(_app.chain_database()); + const auto& db = *_app.chain_database(); + const auto& idx = db.get_index_type().indices().get(); + auto range = trx_in_block.valid() ? idx.equal_range( boost::make_tuple( block_num, *trx_in_block ) ) + : idx.equal_range( block_num ); + vector result; + std::copy( range.first, range.second, std::back_inserter( result ) ); + return result; + } + flat_set history_api::get_market_history_buckets()const { auto hist = _app.get_plugin( "market_history" ); diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index c85fab14b2..b86ad40920 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -187,6 +187,23 @@ namespace graphene { namespace app { unsigned limit = 100, uint64_t start = 0) const; + /** + * @brief Get all operations inside a block or a transaction, including virtual operations + * @param block_num the number (height) of the block to fetch + * @param trx_in_block the sequence of a transaction in the block, starts from @a 0, optional, + * if specified, will return only operations of that transaction; + * if omitted, will return all operations in the specified block + * @return a list of @a operation_history objects ordered by ID + * + * @note the data is fetched from @a account_history plugin, thus the result is possible to + * be incomplete due to the @a partial-operations option configured in the API node. + * For complete data, it is recommended to query from ElasticSearch where data is + * maintained by @a elastic_search plugin. + */ + vector get_block_operation_history( + uint32_t block_num, + optional trx_in_block = {} ) const; + /** * @brief Get details of order executions occurred most recently in a trading pair * @param a Asset symbol or ID in a trading pair @@ -609,6 +626,7 @@ FC_API(graphene::app::history_api, (get_account_history_by_operations) (get_account_history_operations) (get_relative_account_history) + (get_block_operation_history) (get_fill_order_history) (get_market_history) (get_market_history_buckets) diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index 097892d33d..d8dc926640 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -98,10 +98,20 @@ namespace graphene { namespace chain { account_transaction_history_id_type next; }; + struct by_block; + typedef multi_index_container< operation_history_object, indexed_by< - ordered_unique< tag, member< object, object_id_type, &object::id > > + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, + composite_key< operation_history_object, + member< operation_history_object, uint32_t, &operation_history_object::block_num>, + member< operation_history_object, uint16_t, &operation_history_object::trx_in_block>, + member< operation_history_object, uint16_t, &operation_history_object::op_in_trx>, + member< operation_history_object, uint32_t, &operation_history_object::virtual_op> + > + > > > operation_history_multi_index_type; @@ -117,23 +127,28 @@ namespace graphene { namespace chain { ordered_unique< tag, member< object, object_id_type, &object::id > >, ordered_unique< tag, composite_key< account_transaction_history_object, - member< account_transaction_history_object, account_id_type, &account_transaction_history_object::account>, + member< account_transaction_history_object, account_id_type, + &account_transaction_history_object::account>, member< account_transaction_history_object, uint64_t, &account_transaction_history_object::sequence> > >, ordered_unique< tag, composite_key< account_transaction_history_object, - member< account_transaction_history_object, account_id_type, &account_transaction_history_object::account>, - member< account_transaction_history_object, operation_history_id_type, &account_transaction_history_object::operation_id> + member< account_transaction_history_object, account_id_type, + &account_transaction_history_object::account>, + member< account_transaction_history_object, operation_history_id_type, + &account_transaction_history_object::operation_id> > >, ordered_non_unique< tag, - member< account_transaction_history_object, operation_history_id_type, &account_transaction_history_object::operation_id> + member< account_transaction_history_object, operation_history_id_type, + &account_transaction_history_object::operation_id> > > > account_transaction_history_multi_index_type; - typedef generic_index account_transaction_history_index; + typedef generic_index account_transaction_history_index; } } // graphene::chain From d6234717d5ec4a6dd0dd7548fd2057ea85b2465e Mon Sep 17 00:00:00 2001 From: Alfredo Date: Fri, 20 Sep 2019 14:55:21 -0300 Subject: [PATCH 004/338] Fix for ES7 --- .../plugins/elasticsearch/elasticsearch_plugin.cpp | 10 +++++++++- libraries/plugins/es_objects/es_objects.cpp | 11 +++++++++-- libraries/utilities/elasticsearch.cpp | 14 ++++++++++++++ .../include/graphene/utilities/elasticsearch.hpp | 1 + 4 files changed, 33 insertions(+), 3 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 92a5f41f19..0ac5adf33d 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -74,6 +74,7 @@ class elasticsearch_plugin_impl std::string bulk_line; std::string index_name; bool is_sync = false; + bool esge7 = false; private: bool add_elasticsearch( const account_id_type account_id, const optional& oho, const uint32_t block_number ); const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, @@ -286,6 +287,7 @@ bool elasticsearch_plugin_impl::add_elasticsearch( const account_id_type account const auto &stats_obj = getStatsObject(account_id); const auto &ath = addNewEntry(stats_obj, account_id, oho); growStats(stats_obj, ath); + if(block_number > _elasticsearch_start_es_after_block) { createBulkLine(ath); prepareBulk(ath.id); @@ -354,7 +356,8 @@ void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id const std::string _id = fc::json::to_string(ath_id); fc::mutable_variant_object bulk_header; bulk_header["_index"] = index_name; - bulk_header["_type"] = "data"; + if(!esge7) + bulk_header["_type"] = "data"; bulk_header["_id"] = fc::to_string(ath_id.space_id) + "." + fc::to_string(ath_id.type_id) + "." + fc::to_string(ath_id.instance.value); prepare = graphene::utilities::createBulk(bulk_header, std::move(bulk_line)); @@ -513,6 +516,11 @@ void elasticsearch_plugin::plugin_startup() if(!graphene::utilities::checkES(es)) FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url)); + + const auto es_version = graphene::utilities::getVersion(es); + if(std::stoi(es_version.substr(0,1)) >= 7) + my->esge7 = true; + ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); } diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 79b9f1cdba..9c94962ed7 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -71,6 +71,7 @@ class es_objects_plugin_impl uint32_t block_number; fc::time_point_sec block_time; + bool esge7 = false; private: template @@ -224,7 +225,8 @@ void es_objects_plugin_impl::remove_from_database( object_id_type id, std::strin fc::mutable_variant_object delete_line; delete_line["_id"] = string(id); delete_line["_index"] = _es_objects_index_prefix + index; - delete_line["_type"] = "data"; + if(!esge7) + delete_line["_type"] = "data"; fc::mutable_variant_object final_delete_line; final_delete_line["delete"] = delete_line; prepare.push_back(fc::json::to_string(final_delete_line)); @@ -238,7 +240,8 @@ void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_n { fc::mutable_variant_object bulk_header; bulk_header["_index"] = _es_objects_index_prefix + index_name; - bulk_header["_type"] = "data"; + if(!esge7) + bulk_header["_type"] = "data"; if(_es_objects_keep_only_current) { bulk_header["_id"] = string(blockchain_object.id); @@ -403,6 +406,10 @@ void es_objects_plugin::plugin_startup() if(!graphene::utilities::checkES(es)) FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_es_objects_elasticsearch_url)); ilog("elasticsearch OBJECTS: plugin_startup() begin"); + + const auto es_version = graphene::utilities::getVersion(es); + if(std::stoi(es_version.substr(0,1)) >= 7) + my->esge7 = true; } } } \ No newline at end of file diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 0b94de50c9..ed51be1c60 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -49,6 +49,20 @@ bool checkES(ES& es) return true; } + +const std::string getVersion(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + fc::variant response = fc::json::from_string(doCurl(curl_request)); + + return response["version"]["number"].as_string(); +} + const std::string simpleQuery(ES& es) { graphene::utilities::CurlRequest curl_request; diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index e8790c1ea5..4ff9ffe460 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -56,6 +56,7 @@ namespace graphene { namespace utilities { bool SendBulk(ES&& es); const std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); bool checkES(ES& es); + const std::string getVersion(ES& es); const std::string simpleQuery(ES& es); bool deleteAll(ES& es); bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer); From ae067f0999bd209444bde86dd3b8d481e10ff904 Mon Sep 17 00:00:00 2001 From: Alfredo Date: Fri, 20 Sep 2019 20:42:54 -0300 Subject: [PATCH 005/338] change major version check --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 3 ++- libraries/plugins/es_objects/es_objects.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 0ac5adf33d..ea653c1258 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -518,7 +518,8 @@ void elasticsearch_plugin::plugin_startup() FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url)); const auto es_version = graphene::utilities::getVersion(es); - if(std::stoi(es_version.substr(0,1)) >= 7) + auto dot_pos = es_version.find('.'); + if(std::stoi(es_version.substr(0,dot_pos)) >= 7) my->esge7 = true; ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 9c94962ed7..c5875bda0d 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -408,7 +408,8 @@ void es_objects_plugin::plugin_startup() ilog("elasticsearch OBJECTS: plugin_startup() begin"); const auto es_version = graphene::utilities::getVersion(es); - if(std::stoi(es_version.substr(0,1)) >= 7) + auto dot_pos = es_version.find('.'); + if(std::stoi(es_version.substr(0,dot_pos)) >= 7) my->esge7 = true; } From d5cf0ef890c01c4e2a404f69e0f7645f34d069ae Mon Sep 17 00:00:00 2001 From: John Jones Date: Thu, 16 May 2019 17:03:10 -0500 Subject: [PATCH 006/338] Add p2p security options --- libraries/app/application.cpp | 85 +---- libraries/app/application_impl.hxx | 2 - libraries/net/include/graphene/net/node.hpp | 33 +- .../include/graphene/net/peer_connection.hpp | 4 +- libraries/net/node.cpp | 344 +++++++++++------- libraries/net/node_impl.hxx | 103 +++++- 6 files changed, 354 insertions(+), 217 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index de5bf8a334..db7416ab01 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -43,7 +43,6 @@ #include #include #include -#include #include #include @@ -125,41 +124,14 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) if( _options->count("seed-node") ) { auto seeds = _options->at("seed-node").as>(); - for( const string& endpoint_string : seeds ) - { - try { - std::vector endpoints = resolve_string_to_ip_endpoints(endpoint_string); - for (const fc::ip::endpoint& endpoint : endpoints) - { - ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); - _p2p_network->add_node(endpoint); - _p2p_network->connect_to_endpoint(endpoint); - } - } catch( const fc::exception& e ) { - wlog( "caught exception ${e} while adding seed node ${endpoint}", - ("e", e.to_detail_string())("endpoint", endpoint_string) ); - } - } + _p2p_network->add_seed_nodes(seeds); } if( _options->count("seed-nodes") ) { auto seeds_str = _options->at("seed-nodes").as(); auto seeds = fc::json::from_string(seeds_str).as>(2); - for( const string& endpoint_string : seeds ) - { - try { - std::vector endpoints = resolve_string_to_ip_endpoints(endpoint_string); - for (const fc::ip::endpoint& endpoint : endpoints) - { - ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); - _p2p_network->add_node(endpoint); - } - } catch( const fc::exception& e ) { - wlog( "caught exception ${e} while adding seed node ${endpoint}", - ("e", e.to_detail_string())("endpoint", endpoint_string) ); - } - } + _p2p_network->add_seed_nodes(seeds); } else { @@ -167,20 +139,7 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) vector seeds = { #include "../egenesis/seed-nodes.txt" }; - for( const string& endpoint_string : seeds ) - { - try { - std::vector endpoints = resolve_string_to_ip_endpoints(endpoint_string); - for (const fc::ip::endpoint& endpoint : endpoints) - { - ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); - _p2p_network->add_node(endpoint); - } - } catch( const fc::exception& e ) { - wlog( "caught exception ${e} while adding seed node ${endpoint}", - ("e", e.to_detail_string())("endpoint", endpoint_string) ); - } - } + _p2p_network->add_seed_nodes(seeds); } if( _options->count("p2p-endpoint") ) @@ -190,42 +149,15 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) _p2p_network->listen_to_p2p_network(); ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); + if ( _options->count("accept_incoming_connections") ) + _p2p_network->accept_incoming_connections( _options->at("accept_incoming_connections").as() ); + _p2p_network->connect_to_p2p_network(); _p2p_network->sync_from(net::item_id(net::core_message_type_enum::block_message_type, _chain_db->head_block_id()), std::vector()); } FC_CAPTURE_AND_RETHROW() } -std::vector application_impl::resolve_string_to_ip_endpoints(const std::string& endpoint_string) -{ - try - { - string::size_type colon_pos = endpoint_string.find(':'); - if (colon_pos == std::string::npos) - FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", - ("endpoint_string", endpoint_string)); - std::string port_string = endpoint_string.substr(colon_pos + 1); - try - { - uint16_t port = boost::lexical_cast(port_string); - - std::string hostname = endpoint_string.substr(0, colon_pos); - std::vector endpoints = fc::resolve(hostname, port); - if (endpoints.empty()) - FC_THROW_EXCEPTION( fc::unknown_host_exception, - "The host name can not be resolved: ${hostname}", - ("hostname", hostname) ); - return endpoints; - } - catch (const boost::bad_lexical_cast&) - { - FC_THROW("Bad port: ${port}", ("port", port_string)); - } - } - FC_CAPTURE_AND_RETHROW((endpoint_string)) -} - - void application_impl::new_connection( const fc::http::websocket_connection_ptr& c ) { auto wsc = std::make_shared(c, GRAPHENE_NET_MAX_NESTED_OBJECTS); @@ -1100,6 +1032,11 @@ void application::set_program_options(boost::program_options::options_descriptio "For database_api_impl::get_withdraw_permissions_by_giver to set max limit value") ("api-limit-get-withdraw-permissions-by-recipient",boost::program_options::value()->default_value(101), "For database_api_impl::get_withdraw_permissions_by_recipient to set max limit value") + "For database_api_impl::get_order_book to set its default limit value as 50") + ("accept-incoming-connections", bpo::value()->implicit_value(true), "Accept incoming connections") + ("advertise-peer-algorithm", bpo::value()->implicit_value("all"), "Determines which peers are advertised") + ("advertise-peer-list", bpo::value>()->composing(), + "P2P nodes to advertise (may specify multiple times") ; command_line_options.add(configuration_file_options); command_line_options.add_options() diff --git a/libraries/app/application_impl.hxx b/libraries/app/application_impl.hxx index 175648e10f..accc8fe4f1 100644 --- a/libraries/app/application_impl.hxx +++ b/libraries/app/application_impl.hxx @@ -22,8 +22,6 @@ class application_impl : public net::node_delegate void reset_p2p_node(const fc::path& data_dir); - std::vector resolve_string_to_ip_endpoints(const std::string& endpoint_string); - void new_connection( const fc::http::websocket_connection_ptr& c ); void reset_websocket_server(); diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index fe03ac0cb6..f856105dd6 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -23,6 +23,7 @@ */ #pragma once +#include #include #include #include @@ -198,8 +199,12 @@ namespace graphene { namespace net { void close(); void set_node_delegate( node_delegate* del ); + void set_advertise_algorithm( std::string algo, + const fc::optional>& advertise_list = fc::optional>() ); void load_configuration( const fc::path& configuration_directory ); + + void listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); virtual void listen_to_p2p_network(); virtual void connect_to_p2p_network(); @@ -211,16 +216,36 @@ namespace graphene { namespace net { */ void add_node( const fc::ip::endpoint& ep ); + /**** + * @brief Add an endpoint as a seed to the p2p network + * + * @param seed_string the url + * @param connect_immediately will start the connection process immediately + */ + void add_seed_node(const std::string& seed_string); + + /***** + * @brief add a list of nodes to seed the p2p network + * @param seeds a vector of url strings + * @param connect_immediately attempt a connection immediately + */ + void add_seed_nodes(std::vector seeds); + /** * Attempt to connect to the specified endpoint immediately. */ virtual void connect_to_endpoint( const fc::ip::endpoint& ep ); /** - * Specifies the network interface and port upon which incoming - * connections should be accepted. + * @brief Helper to convert a string to a collection of endpoints + * + * This converts a string (i.e. "bitshares.eu:665535" to a collection of endpoints. + * NOTE: Throws an exception if not in correct format or was unable to resolve URL. + * + * @param in the incoming string + * @returns a vector of endpoints */ - void listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); + static std::vector resolve_string_to_ip_endpoints(const std::string& in); /** * Call with true to enable listening for incoming connections @@ -292,7 +317,7 @@ namespace graphene { namespace net { void disable_peer_advertising(); fc::variant_object get_call_statistics() const; - private: + protected: std::unique_ptr my; }; diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index a00e43dcbf..7ff8a96d8f 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -269,7 +269,9 @@ namespace graphene { namespace net unsigned _send_message_queue_tasks_running = 0; // temporary debugging #endif bool _currently_handling_message = false; // true while we're in the middle of handling a message from the remote system + protected: peer_connection(peer_connection_delegate* delegate); + private: void destroy(); public: static peer_connection_ptr make_shared(peer_connection_delegate* delegate); // use this instead of the constructor @@ -283,7 +285,7 @@ namespace graphene { namespace net void on_connection_closed(message_oriented_connection* originating_connection) override; void send_queueable_message(std::unique_ptr&& message_to_send); - void send_message(const message& message_to_send, size_t message_send_time_field_offset = (size_t)-1); + virtual void send_message(const message& message_to_send, size_t message_send_time_field_offset = (size_t)-1); void send_item(const item_id& item_to_send); void close_connection(); void destroy_connection(); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 06aebf6fbd..71134780ba 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -46,14 +47,6 @@ #include #include -#include -#include -#include -#include -#include -#include -#include - #include #include #include @@ -68,8 +61,8 @@ #include #include #include -#include #include +#include #include #include @@ -123,66 +116,9 @@ #define testnetlog(...) do {} while (0) #endif -namespace graphene { namespace net { +#include "node_impl.hxx" - namespace detail - { - namespace bmi = boost::multi_index; - class blockchain_tied_message_cache - { - private: - static const uint32_t cache_duration_in_blocks = GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS; - - struct message_hash_index{}; - struct message_contents_hash_index{}; - struct block_clock_index{}; - struct message_info - { - message_hash_type message_hash; - message message_body; - uint32_t block_clock_when_received; - - // for network performance stats - message_propagation_data propagation_data; - fc::uint160_t message_contents_hash; // hash of whatever the message contains (if it's a transaction, this is the transaction id, if it's a block, it's the block_id) - - message_info( const message_hash_type& message_hash, - const message& message_body, - uint32_t block_clock_when_received, - const message_propagation_data& propagation_data, - fc::uint160_t message_contents_hash ) : - message_hash( message_hash ), - message_body( message_body ), - block_clock_when_received( block_clock_when_received ), - propagation_data( propagation_data ), - message_contents_hash( message_contents_hash ) - {} - }; - typedef boost::multi_index_container - < message_info, - bmi::indexed_by< bmi::ordered_unique< bmi::tag, - bmi::member >, - bmi::ordered_non_unique< bmi::tag, - bmi::member >, - bmi::ordered_non_unique< bmi::tag, - bmi::member > > - > message_cache_container; - - message_cache_container _message_cache; - - uint32_t block_clock; - - public: - blockchain_tied_message_cache() : - block_clock( 0 ) - {} - void block_accepted(); - void cache_message( const message& message_to_cache, const message_hash_type& hash_of_message_to_cache, - const message_propagation_data& propagation_data, const fc::uint160_t& message_content_hash ); - message get_message( const message_hash_type& hash_of_message_to_lookup ); - message_propagation_data get_message_propagation_data( const fc::uint160_t& hash_of_message_contents_to_lookup ) const; - size_t size() const { return _message_cache.size(); } - }; +namespace graphene { namespace net { namespace detail { void blockchain_tied_message_cache::block_accepted() { @@ -191,71 +127,35 @@ namespace graphene { namespace net { _message_cache.get().erase(_message_cache.get().begin(), _message_cache.get().lower_bound(block_clock - cache_duration_in_blocks ) ); } - void blockchain_tied_message_cache::cache_message( const message& message_to_cache, - const message_hash_type& hash_of_message_to_cache, - const message_propagation_data& propagation_data, - const fc::uint160_t& message_content_hash ) + const message_hash_type& hash_of_message_to_cache, + const message_propagation_data& propagation_data, + const fc::uint160_t& message_content_hash ) { _message_cache.insert( message_info(hash_of_message_to_cache, - message_to_cache, - block_clock, - propagation_data, - message_content_hash ) ); + message_to_cache, block_clock, propagation_data, message_content_hash ) ); } message blockchain_tied_message_cache::get_message( const message_hash_type& hash_of_message_to_lookup ) { message_cache_container::index::type::const_iterator iter = - _message_cache.get().find(hash_of_message_to_lookup ); + _message_cache.get().find(hash_of_message_to_lookup ); if( iter != _message_cache.get().end() ) return iter->message_body; FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" ); } - message_propagation_data blockchain_tied_message_cache::get_message_propagation_data( const fc::uint160_t& hash_of_message_contents_to_lookup ) const { if( hash_of_message_contents_to_lookup != fc::uint160_t() ) { message_cache_container::index::type::const_iterator iter = - _message_cache.get().find(hash_of_message_contents_to_lookup ); + _message_cache.get().find(hash_of_message_contents_to_lookup ); if( iter != _message_cache.get().end() ) return iter->propagation_data; } FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" ); } -///////////////////////////////////////////////////////////////////////////////////////////////////////// - - // This specifies configuration info for the local node. It's stored as JSON - // in the configuration directory (application data directory) - struct node_configuration - { - node_configuration() : accept_incoming_connections(true), wait_if_endpoint_is_busy(true) {} - - fc::ip::endpoint listen_endpoint; - bool accept_incoming_connections; - bool wait_if_endpoint_is_busy; - /** - * Originally, our p2p code just had a 'node-id' that was a random number identifying this node - * on the network. This is now a private key/public key pair, where the public key is used - * in place of the old random node-id. The private part is unused, but might be used in - * the future to support some notion of trusted peers. - */ - fc::ecc::private_key private_key; - }; - - -} } } // end namespace graphene::net::detail -FC_REFLECT(graphene::net::detail::node_configuration, (listen_endpoint) - (accept_incoming_connections) - (wait_if_endpoint_is_busy) - (private_key)); - -#include "node_impl.hxx" - -namespace graphene { namespace net { namespace detail { - void node_impl_deleter::operator()(node_impl* impl_to_delete) { #ifdef P2P_IN_DEDICATED_THREAD @@ -285,6 +185,88 @@ namespace graphene { namespace net { namespace detail { #define MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME 200 #define MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH (10 * MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME) + /****** + * Use information passed from command line or config file to advertise nodes + */ + class list_address_builder : public node_impl::address_builder + { + public: + list_address_builder(fc::optional> address_list) + { + FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); + + advertise_list.reserve( address_list->size() ); + auto& list = advertise_list; + std::for_each( address_list->begin(), address_list->end(), [&list]( std::string str ) { + // ignore fc exceptions (like poorly formatted endpoints) + try + { + list.emplace_back( graphene::net::address_info( + fc::ip::endpoint::from_string(str), + fc::time_point_sec(), + fc::microseconds(0), + node_id_t(), + peer_connection_direction::unknown, + firewalled_state::unknown )); + } + catch(const fc::exception& ) { + wlog( "Address ${addr} invalid.", ("addr", str) ); + } + } ); + } + + void build(node_impl* impl, address_message& reply) + { + reply.addresses = advertise_list; + } + private: + std::vector advertise_list; + }; + + /**** + * Advertise all nodes except a predefined list + */ + class exclude_address_builder : public node_impl::address_builder + { + public: + exclude_address_builder(const fc::optional>& address_list) + { + FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); + std::for_each(address_list->begin(), address_list->end(), [&exclude_list = exclude_list](std::string input) + { + exclude_list.insert(input); + }); + } + void build(node_impl* impl, address_message& reply) + { + reply.addresses.reserve(impl->_active_connections.size()); + // filter out those in the exclude list + for(const peer_connection_ptr& active_peer : impl->_active_connections) + { + if (exclude_list.find( *active_peer->get_remote_endpoint() ) == exclude_list.end()) + reply.addresses.emplace_back(update_address_record(impl, active_peer)); + } + reply.addresses.shrink_to_fit(); + } + private: + fc::flat_set exclude_list; + }; + + /*** + * Return all peers when node asks + */ + class all_address_builder : public node_impl::address_builder + { + void build( node_impl* impl, address_message& reply ) + { + reply.addresses.reserve(impl->_active_connections.size()); + for (const peer_connection_ptr& active_peer : impl->_active_connections) + { + reply.addresses.emplace_back(update_address_record(impl, active_peer)); + } + } + }; + node_impl::node_impl(const std::string& user_agent) : #ifdef P2P_IN_DEDICATED_THREAD _thread(std::make_shared("p2p")), @@ -303,10 +285,10 @@ namespace graphene { namespace net { namespace detail { _peer_connection_retry_timeout(GRAPHENE_NET_DEFAULT_PEER_CONNECTION_RETRY_TIME), _peer_inactivity_timeout(GRAPHENE_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT), _most_recent_blocks_accepted(_maximum_number_of_connections), + _sync_item_type(0), _total_number_of_unfetched_items(0), _rate_limiter(0, 0), _last_reported_number_of_connections(0), - _peer_advertising_disabled(false), _average_network_read_speed_seconds(60), _average_network_write_speed_seconds(60), _average_network_read_speed_minutes(60), @@ -320,6 +302,7 @@ namespace graphene { namespace net { namespace detail { _maximum_number_of_sync_blocks_to_prefetch(MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH), _maximum_blocks_per_peer_during_syncing(GRAPHENE_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING) { + _address_builder = std::make_shared(); _rate_limiter.set_actual_rate_time_constant(fc::seconds(2)); fc::rand_bytes((char*) _node_id.data(), (int)_node_id.size()); } @@ -1658,35 +1641,51 @@ namespace graphene { namespace net { namespace detail { FC_THROW( "unexpected connection_rejected_message from peer" ); } + address_info node_impl::address_builder::update_address_record( node_impl* impl, const peer_connection_ptr& active_peer) + { + fc::optional updated_peer_record = + impl->_potential_peer_db.lookup_entry_for_endpoint(*active_peer->get_remote_endpoint()); + + if (updated_peer_record) + { + updated_peer_record->last_seen_time = fc::time_point::now(); + impl->_potential_peer_db.update_entry(*updated_peer_record); + } + + return address_info(*active_peer->get_remote_endpoint(), fc::time_point::now(), active_peer->round_trip_delay, + active_peer->node_id, active_peer->direction, active_peer->is_firewalled); + } + void node_impl::on_address_request_message(peer_connection* originating_peer, const address_request_message& address_request_message_received) { VERIFY_CORRECT_THREAD(); dlog("Received an address request message"); address_message reply; - if (!_peer_advertising_disabled) - { - reply.addresses.reserve(_active_connections.size()); - for (const peer_connection_ptr& active_peer : _active_connections) - { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*active_peer->get_remote_endpoint()); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - - reply.addresses.emplace_back(address_info(*active_peer->get_remote_endpoint(), - fc::time_point::now(), - active_peer->round_trip_delay, - active_peer->node_id, - active_peer->direction, - active_peer->is_firewalled)); - } - } + if (_address_builder != nullptr ) + _address_builder->build( this, reply ); originating_peer->send_message(reply); } + void node_impl::set_advertise_algorithm( std::string algo, + const fc::optional>& advertise_list ) + { + if (algo == "exclude_list") + { + _address_builder = std::make_shared(advertise_list); + } + else if (algo == "list") + { + _address_builder = std::make_shared(advertise_list); + } + else if (algo == "nothing") + { + _address_builder = nullptr; + } + else + _address_builder = std::make_shared(); + } + void node_impl::on_address_message(peer_connection* originating_peer, const address_message& address_message_received) { VERIFY_CORRECT_THREAD(); @@ -2233,7 +2232,7 @@ namespace graphene { namespace net { namespace detail { } } - message node_impl::get_message_for_item(const item_id& item) + graphene::net::message node_impl::get_message_for_item(const item_id& item) { try { @@ -4199,6 +4198,17 @@ namespace graphene { namespace net { namespace detail { trigger_p2p_network_connect_loop(); } + void node_impl::add_seed_node(const std::string& endpoint_string) + { + VERIFY_CORRECT_THREAD(); + std::vector endpoints = graphene::net::node::resolve_string_to_ip_endpoints(endpoint_string); + for (const fc::ip::endpoint& endpoint : endpoints) + { + ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); + add_node(endpoint); + } + } + void node_impl::initiate_connect_to(const peer_connection_ptr& new_peer) { new_peer->get_socket().open(); @@ -4626,7 +4636,7 @@ namespace graphene { namespace net { namespace detail { void node_impl::disable_peer_advertising() { VERIFY_CORRECT_THREAD(); - _peer_advertising_disabled = true; + _address_builder = nullptr; } fc::variant_object node_impl::get_call_statistics() const @@ -5105,4 +5115,72 @@ namespace graphene { namespace net { namespace detail { } // end namespace detail + /*** + * @brief Helper to convert a string to a collection of endpoints + * + * This converts a string (i.e. "bitshares.eu:665535" to a collection of endpoints. + * NOTE: Throws an exception if not in correct format or was unable to resolve URL. + * + * @param in the incoming string + * @returns a vector of endpoints + */ + std::vector node::resolve_string_to_ip_endpoints(const std::string& in) + { + try + { + std::string::size_type colon_pos = in.find(':'); + if (colon_pos == std::string::npos) + FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", + ("endpoint_string", in)); + std::string port_string = in.substr(colon_pos + 1); + try + { + uint16_t port = boost::lexical_cast(port_string); + + std::string hostname = in.substr(0, colon_pos); + std::vector endpoints = fc::resolve(hostname, port); + if (endpoints.empty()) + FC_THROW_EXCEPTION( fc::unknown_host_exception, + "The host name can not be resolved: ${hostname}", + ("hostname", hostname) ); + return endpoints; + } + catch (const boost::bad_lexical_cast&) + { + FC_THROW("Bad port: ${port}", ("port", port_string)); + } + } + FC_CAPTURE_AND_RETHROW((in)) + } + + void node::add_seed_node(const std::string& endpoint_string) + { + INVOKE_IN_IMPL(add_seed_node, endpoint_string); + } + + /***** + * @brief add a list of nodes to seed the p2p network + * @param seeds a vector of url strings + * @param connect_immediately attempt a connection immediately + */ + void node::add_seed_nodes(std::vector seeds) + { + for(const std::string& endpoint_string : seeds ) + { + try { + INVOKE_IN_IMPL(add_seed_node, endpoint_string); + } catch( const fc::exception& e ) { + wlog( "caught exception ${e} while adding seed node ${endpoint}", + ("e", e.to_detail_string())("endpoint", endpoint_string) ); + } + } + + } + + void node::set_advertise_algorithm( std::string algo, const fc::optional>& advertise_list ) + { + my->set_advertise_algorithm( algo, advertise_list ); + } + + } } // end namespace graphene::net diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 7d31d16eea..961366027b 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -1,8 +1,12 @@ #pragma once #include +#include +#include +#include #include #include #include +#include #include #include #include @@ -11,6 +15,81 @@ namespace graphene { namespace net { namespace detail { +namespace bmi = boost::multi_index; +class blockchain_tied_message_cache +{ +private: + static const uint32_t cache_duration_in_blocks = GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS; + + struct message_hash_index{}; + struct message_contents_hash_index{}; + struct block_clock_index{}; + struct message_info + { + message_hash_type message_hash; + message message_body; + uint32_t block_clock_when_received; + + // for network performance stats + message_propagation_data propagation_data; + fc::uint160_t message_contents_hash; // hash of whatever the message contains (if it's a transaction, this is the transaction id, if it's a block, it's the block_id) + + message_info( const message_hash_type& message_hash, + const message& message_body, + uint32_t block_clock_when_received, + const message_propagation_data& propagation_data, + fc::uint160_t message_contents_hash ) : + message_hash( message_hash ), + message_body( message_body ), + block_clock_when_received( block_clock_when_received ), + propagation_data( propagation_data ), + message_contents_hash( message_contents_hash ) + {} + }; + typedef boost::multi_index_container + < message_info, + bmi::indexed_by< bmi::ordered_unique< bmi::tag, + bmi::member >, + bmi::ordered_non_unique< bmi::tag, + bmi::member >, + bmi::ordered_non_unique< bmi::tag, + bmi::member > > + > message_cache_container; + + message_cache_container _message_cache; + + uint32_t block_clock; + +public: + blockchain_tied_message_cache() : + block_clock( 0 ) + {} + void block_accepted(); + void cache_message( const message& message_to_cache, const message_hash_type& hash_of_message_to_cache, + const message_propagation_data& propagation_data, const fc::uint160_t& message_content_hash ); + message get_message( const message_hash_type& hash_of_message_to_lookup ); + message_propagation_data get_message_propagation_data( const fc::uint160_t& hash_of_message_contents_to_lookup ) const; + size_t size() const { return _message_cache.size(); } +}; + +// This specifies configuration info for the local node. It's stored as JSON +// in the configuration directory (application data directory) +struct node_configuration +{ + node_configuration() : accept_incoming_connections(true), wait_if_endpoint_is_busy(true) {} + + fc::ip::endpoint listen_endpoint; + bool accept_incoming_connections; + bool wait_if_endpoint_is_busy; + /** + * Originally, our p2p code just had a 'node-id' that was a random number identifying this node + * on the network. This is now a private key/public key pair, where the public key is used + * in place of the old random node-id. The private part is unused, but might be used in + * the future to support some notion of trusted peers. + */ + fc::ecc::private_key private_key; +}; + // when requesting items from peers, we want to prioritize any blocks before // transactions, but otherwise request items in the order we heard about them struct prioritized_item_id @@ -166,8 +245,16 @@ private: class node_impl : public peer_connection_delegate { public: + class address_builder + { + public: + virtual void build( node_impl* impl, address_message& ) = 0; + protected: + address_info update_address_record( node_impl* impl, const peer_connection_ptr& active_peer); + }; #ifdef P2P_IN_DEDICATED_THREAD std::shared_ptr _thread; + std::shared_ptr get_thread() { return _thread; } #endif // P2P_IN_DEDICATED_THREAD std::unique_ptr _delegate; fc::sha256 _chain_id; @@ -292,8 +379,6 @@ class node_impl : public peer_connection_delegate uint32_t _last_reported_number_of_connections; // number of connections last reported to the client (to avoid sending duplicate messages) - bool _peer_advertising_disabled; - fc::future _fetch_updated_peer_lists_loop_done; boost::circular_buffer _average_network_read_speed_seconds; @@ -382,6 +467,9 @@ class node_impl : public peer_connection_delegate void on_message( peer_connection* originating_peer, const message& received_message ) override; + + void call_by_message_type( peer_connection* originating_peer, + const message& received_message ); void on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ); @@ -394,6 +482,8 @@ class node_impl : public peer_connection_delegate void on_address_request_message( peer_connection* originating_peer, const address_request_message& address_request_message_received ); + + std::shared_ptr _address_builder = nullptr; void on_address_message( peer_connection* originating_peer, const address_message& address_message_received ); @@ -482,6 +572,8 @@ class node_impl : public peer_connection_delegate void listen_to_p2p_network(); void connect_to_p2p_network(); void add_node( const fc::ip::endpoint& ep ); + void set_advertise_algorithm( std::string algo, const fc::optional>& advertise_list ); + void add_seed_node( const std::string& seed_string ); void initiate_connect_to(const peer_connection_ptr& peer); void connect_to_endpoint(const fc::ip::endpoint& ep); void listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); @@ -509,7 +601,7 @@ class node_impl : public peer_connection_delegate void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second ); void disable_peer_advertising(); fc::variant_object get_call_statistics() const; - message get_message_for_item(const item_id& item) override; + graphene::net::message get_message_for_item(const item_id& item) override; fc::variant_object network_get_info() const; fc::variant_object network_get_usage_stats() const; @@ -519,3 +611,8 @@ class node_impl : public peer_connection_delegate }; // end class node_impl }}} // end of namespace graphene::net::detail + +FC_REFLECT(graphene::net::detail::node_configuration, (listen_endpoint) + (accept_incoming_connections) + (wait_if_endpoint_is_busy) + (private_key)); From 16c70f60d75e76bf52742c123770446fa5ee2e54 Mon Sep 17 00:00:00 2001 From: John Jones Date: Fri, 17 May 2019 12:25:12 -0500 Subject: [PATCH 007/338] Add tests --- libraries/net/node_impl.hxx | 3 - tests/cli/main.cpp | 46 ----- tests/common/genesis_file_util.hpp | 64 +++++- tests/tests/p2p_node_tests.cpp | 305 +++++++++++++++++++++++++++++ 4 files changed, 364 insertions(+), 54 deletions(-) create mode 100644 tests/tests/p2p_node_tests.cpp diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 961366027b..4c96410500 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -468,9 +468,6 @@ class node_impl : public peer_connection_delegate void on_message( peer_connection* originating_peer, const message& received_message ) override; - void call_by_message_type( peer_connection* originating_peer, - const message& received_message ); - void on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ); diff --git a/tests/cli/main.cpp b/tests/cli/main.cpp index baf3852ba7..46914ae3ed 100644 --- a/tests/cli/main.cpp +++ b/tests/cli/main.cpp @@ -62,26 +62,6 @@ #define BOOST_TEST_MODULE Test Application #include -/***** - * Global Initialization for Windows - * ( sets up Winsock stuf ) - */ -#ifdef _WIN32 -int sockInit(void) -{ - WSADATA wsa_data; - return WSAStartup(MAKEWORD(1,1), &wsa_data); -} -int sockQuit(void) -{ - return WSACleanup(); -} -#endif - -/********************* - * Helper Methods - *********************/ - #include "../common/genesis_file_util.hpp" using std::exception; @@ -89,32 +69,6 @@ using std::cerr; #define INVOKE(test) ((struct test*)this)->test_method(); -////// -/// @brief attempt to find an available port on localhost -/// @returns an available port number, or -1 on error -///// -int get_available_port() -{ - struct sockaddr_in sin; - int socket_fd = socket(AF_INET, SOCK_STREAM, 0); - if (socket_fd == -1) - return -1; - sin.sin_family = AF_INET; - sin.sin_port = 0; - sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - if (::bind(socket_fd, (struct sockaddr*)&sin, sizeof(struct sockaddr_in)) == -1) - return -1; - socklen_t len = sizeof(sin); - if (getsockname(socket_fd, (struct sockaddr *)&sin, &len) == -1) - return -1; -#ifdef _WIN32 - closesocket(socket_fd); -#else - close(socket_fd); -#endif - return ntohs(sin.sin_port); -} - /////////// /// @brief Start the application /// @param app_dir the temporary directory to use diff --git a/tests/common/genesis_file_util.hpp b/tests/common/genesis_file_util.hpp index a87d9585af..9fd19367f9 100644 --- a/tests/common/genesis_file_util.hpp +++ b/tests/common/genesis_file_util.hpp @@ -1,10 +1,64 @@ #pragma once -///////// -/// @brief forward declaration, using as a hack to generate a genesis.json file -/// for testing -///////// -namespace graphene { namespace app { namespace detail { +#include +#include +#include + + +#ifdef _WIN32 + #ifndef _WIN32_WINNT + #define _WIN32_WINNT 0x0501 + #endif + #include + #include +int sockInit(void) +{ + WSADATA wsa_data; + return WSAStartup(MAKEWORD(1,1), &wsa_data); +} +int sockQuit(void) +{ + return WSACleanup(); +} +#else + #include + #include + #include +#endif + +namespace graphene { namespace app { + + ////// + /// @brief attempt to find an available port on localhost + /// @returns an available port number, or -1 on error + ///// + int get_available_port() + { + struct sockaddr_in sin; + int socket_fd = socket(AF_INET, SOCK_STREAM, 0); + if (socket_fd == -1) + return -1; + sin.sin_family = AF_INET; + sin.sin_port = 0; + sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + if (::bind(socket_fd, (struct sockaddr*)&sin, sizeof(struct sockaddr_in)) == -1) + return -1; + socklen_t len = sizeof(sin); + if (getsockname(socket_fd, (struct sockaddr *)&sin, &len) == -1) + return -1; + #ifdef _WIN32 + closesocket(socket_fd); + #else + close(socket_fd); + #endif + return ntohs(sin.sin_port); + } + + namespace detail { + ///////// + /// @brief forward declaration, using as a hack to generate a genesis.json file + /// for testing + ///////// graphene::chain::genesis_state_type create_example_genesis(); } } } // graphene::app::detail diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp new file mode 100644 index 0000000000..73800f9637 --- /dev/null +++ b/tests/tests/p2p_node_tests.cpp @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2019 Bitshares Foundation, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#define P2P_IN_DEDICATED_THREAD 1 +#include "../../libraries/net/node_impl.hxx" + +#include "../common/genesis_file_util.hpp" + +/*** + * A peer connection delegate + */ +class test_delegate : public graphene::net::peer_connection_delegate +{ + public: + test_delegate() + { + } + void on_message(graphene::net::peer_connection* originating_peer, + const graphene::net::message& received_message) + { + elog("on_message was called with ${msg}", ("msg",received_message)); + try { + graphene::net::address_request_message m = received_message.as(); + std::shared_ptr m_ptr = std::make_shared( m ); + last_message = m_ptr; + } catch (...) + { + } + } + void on_connection_closed(graphene::net::peer_connection* originating_peer) override {} + graphene::net::message get_message_for_item(const graphene::net::item_id& item) override + { + return graphene::net::message(); + } + std::shared_ptr last_message = nullptr; +}; + +class test_node : public graphene::net::node, public graphene::net::node_delegate +{ +public: + test_node(const std::string& name, const fc::path& config_dir, int port, int seed_port = -1) : node(name) + { + node_name = name; + } + ~test_node() + { + close(); + } + + void on_message(graphene::net::peer_connection_ptr originating_peer, const graphene::net::message& received_message) + { + my->get_thread()->async([&]() { + my->on_message( originating_peer.get(), received_message ); + }).wait(); + } + + std::pair, graphene::net::peer_connection_ptr> create_peer_connection(std::string url) + { + std::pair, graphene::net::peer_connection_ptr> ret_val; + ret_val = this->my->get_thread()->async([&, &url = url](){ + std::shared_ptr d{}; + graphene::net::peer_connection_ptr peer = graphene::net::peer_connection::make_shared(d.get()); + peer->set_remote_endpoint(fc::optional(fc::ip::endpoint::from_string(url))); + my->move_peer_to_active_list(peer); + return std::pair, graphene::net::peer_connection_ptr>(d, peer); + }).wait(); + return ret_val; + } + + /**** + * Implementation methods of node_delegate + */ + bool has_item( const graphene::net::item_id& id ) { return false; } + bool handle_block( const graphene::net::block_message& blk_msg, bool sync_mode, + std::vector& contained_transaction_message_ids ) + { return false; } + void handle_transaction( const graphene::net::trx_message& trx_msg ) + { + elog("${name} was asked to handle a transaction", ("name", node_name)); + } + void handle_message( const graphene::net::message& message_to_process ) + { + elog("${name} received a message", ("name",node_name)); + } + std::vector get_block_ids( + const std::vector& blockchain_synopsis, + uint32_t& remaining_item_count, uint32_t limit = 2000) + { return std::vector(); } + graphene::net::message get_item( const graphene::net::item_id& id ) + { + elog("${name} get_item was called", ("name",node_name)); + return graphene::net::message(); + } + graphene::net::chain_id_type get_chain_id()const + { + elog("${name} get_chain_id was called", ("name",node_name)); + return graphene::net::chain_id_type(); + } + std::vector get_blockchain_synopsis( + const graphene::net::item_hash_t& reference_point, + uint32_t number_of_blocks_after_reference_point) + { return std::vector(); } + void sync_status( uint32_t item_type, uint32_t item_count ) {} + void connection_count_changed( uint32_t c ) + { + elog("${name} connection_count_change was called", ("name",node_name)); + } + uint32_t get_block_number(const graphene::net::item_hash_t& block_id) + { + elog("${name} get_block_number was called", ("name",node_name)); + return 0; + } + fc::time_point_sec get_block_time(const graphene::net::item_hash_t& block_id) + { + elog("${name} get_block_time was called", ("name",node_name)); + return fc::time_point_sec(); + } + graphene::net::item_hash_t get_head_block_id() const + { + elog("${name} get_head_block_id was called", ("name",node_name)); + return graphene::net::item_hash_t(); + } + uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const + { return 0; } + void error_encountered(const std::string& message, const fc::oexception& error) + { + elog("${name} error_encountered was called. Message: ${msg}", ("name",node_name)("msg", message)); + } + uint8_t get_current_block_interval_in_seconds() const + { + elog("${name} get_current_block_interval_in_seconds was called", ("name",node_name)); + return 0; + } + + private: + std::string node_name; +}; + +class test_peer : public graphene::net::peer_connection +{ +public: + std::shared_ptr message_received; + void send_message(const graphene::net::message& message_to_send, size_t message_send_time_field_offset = (size_t)-1) override + { + try { + // make a copy + graphene::net::address_message m = message_to_send.as(); + std::shared_ptr msg_ptr = std::make_shared(m); + // store it for later + message_received = msg_ptr; + return; + } catch (...) {} + message_received = nullptr; + } +public: + test_peer(graphene::net::peer_connection_delegate* del) : graphene::net::peer_connection(del) { + message_received = nullptr; + } +}; + +void test_address_message( std::shared_ptr msg, std::size_t num_elements) +{ + if (msg != nullptr) + { + graphene::net::address_message addr_msg = static_cast( msg->as() ); + BOOST_CHECK_EQUAL(addr_msg.addresses.size(), num_elements); + } + else + { + BOOST_FAIL( "address_message was null" ); + } +} + +BOOST_AUTO_TEST_SUITE( p2p_node_tests ) + +/**** + * Assure that when disable_peer_advertising is set, + * the node does not share its peer list + */ +BOOST_AUTO_TEST_CASE( disable_peer_advertising ) +{ + // create a node + int node1_port = graphene::app::get_available_port(); + fc::temp_directory node1_dir; + test_node node1("Node1", node1_dir.path(), node1_port); + node1.disable_peer_advertising(); + + // get something in their list of connections + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = node1.create_peer_connection( "127.0.0.1:8090" ); + + // verify that they do not share it with others + test_delegate peer3_delegate{}; + std::shared_ptr peer3_ptr = std::make_shared(&peer3_delegate); + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); + + // check the results + std::shared_ptr msg = peer3_ptr->message_received; + test_address_message(msg, 0); +} + +BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) +{ + // create a node + int node1_port = graphene::app::get_available_port(); + fc::temp_directory node1_dir; + test_node node1("Node1", node1_dir.path(), node1_port); + node1.set_advertise_algorithm( "nothing" ); + + // get something in their list of connections + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = node1.create_peer_connection( "127.0.0.1:8090" ); + + // verify that they do not share it with others + test_delegate peer3_delegate{}; + std::shared_ptr peer3_ptr = std::make_shared(&peer3_delegate); + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); + + // check the results + std::shared_ptr msg = peer3_ptr->message_received; + test_address_message(msg, 0); +} + +BOOST_AUTO_TEST_CASE( advertise_list ) +{ + std::vector advert_list = { "127.0.0.1:8090"}; + // set up my node + int my_node_port = graphene::app::get_available_port(); + fc::temp_directory my_node_dir; + test_node my_node("Hello", my_node_dir.path(), my_node_port); + my_node.set_advertise_algorithm( "list", advert_list ); + test_delegate del{}; + // a fake peer + std::shared_ptr my_peer(new test_peer{&del}); + + // act like my_node received an address_request message from my_peer + graphene::net::address_request_message address_request_message_received; + my_node.on_message( my_peer, address_request_message_received ); + // check the results + std::shared_ptr msg = my_peer->message_received; + test_address_message( msg, 1 ); +} + +BOOST_AUTO_TEST_CASE( exclude_list ) +{ + std::vector ex_list = { "127.0.0.1:8090"}; + // set up my node + int my_node_port = graphene::app::get_available_port(); + fc::temp_directory my_node_dir; + test_node my_node("Hello", my_node_dir.path(), my_node_port); + my_node.set_advertise_algorithm( "exclude_list", ex_list ); + // some peers + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = my_node.create_peer_connection("127.0.0.1:8089"); + std::pair, graphene::net::peer_connection_ptr> node3_rslts + = my_node.create_peer_connection("127.0.0.1:8090"); + std::pair, graphene::net::peer_connection_ptr> node4_rslts + = my_node.create_peer_connection("127.0.0.1:8091"); + + // act like my_node received an address_request message from my_peer + test_delegate del_4{}; + std::shared_ptr peer_4( new test_peer(&del_4) ); + graphene::net::address_request_message address_request_message_received; + my_node.on_message( peer_4, address_request_message_received ); + // check the results + std::shared_ptr msg = peer_4->message_received; + test_address_message( msg, 2 ); +} + +BOOST_AUTO_TEST_SUITE_END() From 28d08f7df4e984322def5bcd1663231d382d9591 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 20 May 2019 12:27:13 -0500 Subject: [PATCH 008/338] Allow changes to be accessed from the command line --- libraries/app/application.cpp | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index db7416ab01..5260bfb6f4 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -142,6 +142,27 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) _p2p_network->add_seed_nodes(seeds); } + if ( _options->count( "disable-peer-advertising" ) ) + { + if ( _options->at( "disable-peer-advertisng").as() ) + { + _p2p_network->disable_peer_advertising(); + } + } + else + { + if( _options->count( "advertise-peer-algorithm" ) ) + { + std::string algo = _options->at("advertise-peer-algorithm").as(); + fc::optional> list; + if ( algo == "list" && _options->count("advertise-list") ) + list = _options->at("advertise-list").as>(); + else if ( algo == "exclude-list" && _options->count("exclude-list") ) + list = _options->at("exclude-list").as>(); + _p2p_network->set_advertise_algorithm( _options->at("advertise-peer-algorithm").as(), list); + } + } + if( _options->count("p2p-endpoint") ) _p2p_network->listen_on_endpoint(fc::ip::endpoint::from_string(_options->at("p2p-endpoint").as()), true); else @@ -1034,9 +1055,14 @@ void application::set_program_options(boost::program_options::options_descriptio "For database_api_impl::get_withdraw_permissions_by_recipient to set max limit value") "For database_api_impl::get_order_book to set its default limit value as 50") ("accept-incoming-connections", bpo::value()->implicit_value(true), "Accept incoming connections") - ("advertise-peer-algorithm", bpo::value()->implicit_value("all"), "Determines which peers are advertised") + ("advertise-peer-algorithm", bpo::value()->implicit_value("all"), + "Determines which peers are advertised. Algorithms: 'all', 'nothing', 'list', exclude_list'") ("advertise-peer-list", bpo::value>()->composing(), "P2P nodes to advertise (may specify multiple times") + ("exclude-peer-list", bpo::value>()->composing(), + "P2P nodes to not advertise (may specify multiple times") + ("disable-peer-advertising", bpo::value()->implicit_value(false), + "Disable advertising your peers. Note: Overrides any advertise-peer-algorithm settings") ; command_line_options.add(configuration_file_options); command_line_options.add_options() From 3ed2b8990725771c996bb5fc5b68f3964d7fa006 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 20 May 2019 14:57:55 -0500 Subject: [PATCH 009/338] Fix typos --- libraries/app/application.cpp | 10 +++++----- libraries/net/node.cpp | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 5260bfb6f4..eac0ab14be 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -155,11 +155,11 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) { std::string algo = _options->at("advertise-peer-algorithm").as(); fc::optional> list; - if ( algo == "list" && _options->count("advertise-list") ) - list = _options->at("advertise-list").as>(); - else if ( algo == "exclude-list" && _options->count("exclude-list") ) - list = _options->at("exclude-list").as>(); - _p2p_network->set_advertise_algorithm( _options->at("advertise-peer-algorithm").as(), list); + if ( algo == "list" && _options->count("advertise-peer-list") ) + list = _options->at("advertise-peer-list").as>(); + else if ( algo == "exclude_list" && _options->count("exclude-peer-list") ) + list = _options->at("exclude-peer-list").as>(); + _p2p_network->set_advertise_algorithm( algo, list); } } diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 71134780ba..cca7b89338 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -231,7 +231,7 @@ namespace graphene { namespace net { namespace detail { public: exclude_address_builder(const fc::optional>& address_list) { - FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); + FC_ASSERT( address_list.valid(), "exclude-peer-list must be included" ); std::for_each(address_list->begin(), address_list->end(), [&exclude_list = exclude_list](std::string input) { exclude_list.insert(input); From a38a1ff258c0a025767e9151fd96de6b0aa7e003 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 20 May 2019 16:45:56 -0500 Subject: [PATCH 010/338] Do not add to db if !accept_incoming_connections --- libraries/net/node.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index cca7b89338..5c477c6041 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1697,9 +1697,11 @@ namespace graphene { namespace net { namespace detail { std::vector updated_addresses = address_message_received.addresses; for (address_info& address : updated_addresses) address.last_seen_time = fc::time_point_sec(fc::time_point::now()); - bool new_information_received = merge_address_info_with_potential_peer_database(updated_addresses); - if (new_information_received) - trigger_p2p_network_connect_loop(); + if ( _node_configuration.accept_incoming_connections ) + { + if ( merge_address_info_with_potential_peer_database(updated_addresses)) + trigger_p2p_network_connect_loop(); + } if (_handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) { From 6e879c31f70d361d48499200bddfd464f0278772 Mon Sep 17 00:00:00 2001 From: John Jones Date: Fri, 5 Jul 2019 09:35:32 -0500 Subject: [PATCH 011/338] Fix spacing, add node_util to tests --- libraries/net/include/graphene/net/node.hpp | 8 +- .../include/graphene/net/peer_connection.hpp | 2 +- libraries/net/node.cpp | 361 +++++++++--------- libraries/net/node_impl.hxx | 149 ++++---- tests/cli/main.cpp | 1 + tests/common/genesis_file_util.hpp | 52 +-- tests/common/node_util.hpp | 51 +++ tests/tests/p2p_node_tests.cpp | 134 ++++--- 8 files changed, 389 insertions(+), 369 deletions(-) create mode 100644 tests/common/node_util.hpp diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index f856105dd6..7fe73fa943 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -222,14 +222,14 @@ namespace graphene { namespace net { * @param seed_string the url * @param connect_immediately will start the connection process immediately */ - void add_seed_node(const std::string& seed_string); + void add_seed_node( const std::string& seed_string ); /***** * @brief add a list of nodes to seed the p2p network * @param seeds a vector of url strings * @param connect_immediately attempt a connection immediately */ - void add_seed_nodes(std::vector seeds); + void add_seed_nodes( std::vector seeds ); /** * Attempt to connect to the specified endpoint immediately. @@ -245,12 +245,12 @@ namespace graphene { namespace net { * @param in the incoming string * @returns a vector of endpoints */ - static std::vector resolve_string_to_ip_endpoints(const std::string& in); + static std::vector resolve_string_to_ip_endpoints( const std::string& in ); /** * Call with true to enable listening for incoming connections */ - void accept_incoming_connections(bool accept); + void accept_incoming_connections( bool accept ); /** * Specifies the port upon which incoming connections should be accepted. diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 7ff8a96d8f..452bafecc0 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -285,7 +285,7 @@ namespace graphene { namespace net void on_connection_closed(message_oriented_connection* originating_connection) override; void send_queueable_message(std::unique_ptr&& message_to_send); - virtual void send_message(const message& message_to_send, size_t message_send_time_field_offset = (size_t)-1); + virtual void send_message( const message& message_to_send, size_t message_send_time_field_offset = (size_t)-1 ); void send_item(const item_id& item_to_send); void close_connection(); void destroy_connection(); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 5c477c6041..8d2d947c02 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -133,13 +133,13 @@ namespace graphene { namespace net { namespace detail { const fc::uint160_t& message_content_hash ) { _message_cache.insert( message_info(hash_of_message_to_cache, - message_to_cache, block_clock, propagation_data, message_content_hash ) ); + message_to_cache, block_clock, propagation_data, message_content_hash ) ); } message blockchain_tied_message_cache::get_message( const message_hash_type& hash_of_message_to_lookup ) { message_cache_container::index::type::const_iterator iter = - _message_cache.get().find(hash_of_message_to_lookup ); + _message_cache.get().find(hash_of_message_to_lookup ); if( iter != _message_cache.get().end() ) return iter->message_body; FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" ); @@ -148,10 +148,10 @@ namespace graphene { namespace net { namespace detail { { if( hash_of_message_contents_to_lookup != fc::uint160_t() ) { - message_cache_container::index::type::const_iterator iter = + message_cache_container::index::type::const_iterator iter = _message_cache.get().find(hash_of_message_contents_to_lookup ); - if( iter != _message_cache.get().end() ) - return iter->propagation_data; + if( iter != _message_cache.get().end() ) + return iter->propagation_data; } FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" ); } @@ -185,87 +185,88 @@ namespace graphene { namespace net { namespace detail { #define MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME 200 #define MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH (10 * MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME) - /****** - * Use information passed from command line or config file to advertise nodes - */ - class list_address_builder : public node_impl::address_builder - { - public: - list_address_builder(fc::optional> address_list) - { - FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); - - advertise_list.reserve( address_list->size() ); - auto& list = advertise_list; - std::for_each( address_list->begin(), address_list->end(), [&list]( std::string str ) { - // ignore fc exceptions (like poorly formatted endpoints) - try - { + /****** + * Use information passed from command line or config file to advertise nodes + */ + class list_address_builder : public node_impl::address_builder + { + public: + list_address_builder(fc::optional> address_list) + { + FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); + + advertise_list.reserve( address_list->size() ); + auto& list = advertise_list; + std::for_each( address_list->begin(), address_list->end(), [&list]( std::string str ) { + // ignore fc exceptions (like poorly formatted endpoints) + try + { list.emplace_back( graphene::net::address_info( - fc::ip::endpoint::from_string(str), - fc::time_point_sec(), - fc::microseconds(0), - node_id_t(), - peer_connection_direction::unknown, - firewalled_state::unknown )); - } - catch(const fc::exception& ) { + fc::ip::endpoint::from_string(str), + fc::time_point_sec(), + fc::microseconds(0), + node_id_t(), + peer_connection_direction::unknown, + firewalled_state::unknown )); + } + catch(const fc::exception& ) + { wlog( "Address ${addr} invalid.", ("addr", str) ); - } + } } ); - } + } - void build(node_impl* impl, address_message& reply) - { - reply.addresses = advertise_list; - } - private: - std::vector advertise_list; - }; + void build(node_impl* impl, address_message& reply) + { + reply.addresses = advertise_list; + } + private: + std::vector advertise_list; + }; - /**** - * Advertise all nodes except a predefined list - */ - class exclude_address_builder : public node_impl::address_builder - { - public: - exclude_address_builder(const fc::optional>& address_list) - { - FC_ASSERT( address_list.valid(), "exclude-peer-list must be included" ); - std::for_each(address_list->begin(), address_list->end(), [&exclude_list = exclude_list](std::string input) - { - exclude_list.insert(input); - }); - } - void build(node_impl* impl, address_message& reply) - { - reply.addresses.reserve(impl->_active_connections.size()); - // filter out those in the exclude list - for(const peer_connection_ptr& active_peer : impl->_active_connections) - { + /**** + * Advertise all nodes except a predefined list + */ + class exclude_address_builder : public node_impl::address_builder + { + public: + exclude_address_builder(const fc::optional>& address_list) + { + FC_ASSERT( address_list.valid(), "exclude-peer-list must be included" ); + std::for_each(address_list->begin(), address_list->end(), [&exclude_list = exclude_list](std::string input) + { + exclude_list.insert(input); + }); + } + void build(node_impl* impl, address_message& reply) + { + reply.addresses.reserve(impl->_active_connections.size()); + // filter out those in the exclude list + for(const peer_connection_ptr& active_peer : impl->_active_connections) + { if (exclude_list.find( *active_peer->get_remote_endpoint() ) == exclude_list.end()) - reply.addresses.emplace_back(update_address_record(impl, active_peer)); - } - reply.addresses.shrink_to_fit(); - } - private: - fc::flat_set exclude_list; - }; + reply.addresses.emplace_back(update_address_record(impl, active_peer)); + } + reply.addresses.shrink_to_fit(); + } + private: + fc::flat_set exclude_list; + }; - /*** - * Return all peers when node asks - */ - class all_address_builder : public node_impl::address_builder - { - void build( node_impl* impl, address_message& reply ) - { - reply.addresses.reserve(impl->_active_connections.size()); - for (const peer_connection_ptr& active_peer : impl->_active_connections) - { - reply.addresses.emplace_back(update_address_record(impl, active_peer)); - } - } - }; + /*** + * Return all peers when node asks + */ + class all_address_builder : public node_impl::address_builder + { + void build( node_impl* impl, address_message& reply ) + { + reply.addresses.reserve(impl->_active_connections.size()); + for (const peer_connection_ptr& active_peer : impl->_active_connections) + { + reply.addresses.emplace_back(update_address_record(impl, active_peer)); + } + } + }; node_impl::node_impl(const std::string& user_agent) : #ifdef P2P_IN_DEDICATED_THREAD @@ -1641,20 +1642,20 @@ namespace graphene { namespace net { namespace detail { FC_THROW( "unexpected connection_rejected_message from peer" ); } - address_info node_impl::address_builder::update_address_record( node_impl* impl, const peer_connection_ptr& active_peer) - { + address_info node_impl::address_builder::update_address_record( node_impl* impl, const peer_connection_ptr& active_peer) + { fc::optional updated_peer_record = - impl->_potential_peer_db.lookup_entry_for_endpoint(*active_peer->get_remote_endpoint()); + impl->_potential_peer_db.lookup_entry_for_endpoint(*active_peer->get_remote_endpoint()); if (updated_peer_record) { - updated_peer_record->last_seen_time = fc::time_point::now(); - impl->_potential_peer_db.update_entry(*updated_peer_record); + updated_peer_record->last_seen_time = fc::time_point::now(); + impl->_potential_peer_db.update_entry(*updated_peer_record); } return address_info(*active_peer->get_remote_endpoint(), fc::time_point::now(), active_peer->round_trip_delay, - active_peer->node_id, active_peer->direction, active_peer->is_firewalled); - } + active_peer->node_id, active_peer->direction, active_peer->is_firewalled); + } void node_impl::on_address_request_message(peer_connection* originating_peer, const address_request_message& address_request_message_received) { @@ -1663,28 +1664,28 @@ namespace graphene { namespace net { namespace detail { address_message reply; if (_address_builder != nullptr ) - _address_builder->build( this, reply ); + _address_builder->build( this, reply ); originating_peer->send_message(reply); } - void node_impl::set_advertise_algorithm( std::string algo, + void node_impl::set_advertise_algorithm( std::string algo, const fc::optional>& advertise_list ) - { - if (algo == "exclude_list") - { - _address_builder = std::make_shared(advertise_list); - } - else if (algo == "list") - { - _address_builder = std::make_shared(advertise_list); - } - else if (algo == "nothing") - { - _address_builder = nullptr; - } - else - _address_builder = std::make_shared(); - } + { + if (algo == "exclude_list") + { + _address_builder = std::make_shared(advertise_list); + } + else if (algo == "list") + { + _address_builder = std::make_shared(advertise_list); + } + else if (algo == "nothing") + { + _address_builder = nullptr; + } + else + _address_builder = std::make_shared(); + } void node_impl::on_address_message(peer_connection* originating_peer, const address_message& address_message_received) { @@ -1699,8 +1700,8 @@ namespace graphene { namespace net { namespace detail { address.last_seen_time = fc::time_point_sec(fc::time_point::now()); if ( _node_configuration.accept_incoming_connections ) { - if ( merge_address_info_with_potential_peer_database(updated_addresses)) - trigger_p2p_network_connect_loop(); + if ( merge_address_info_with_potential_peer_database(updated_addresses)) + trigger_p2p_network_connect_loop(); } if (_handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) @@ -4200,16 +4201,16 @@ namespace graphene { namespace net { namespace detail { trigger_p2p_network_connect_loop(); } - void node_impl::add_seed_node(const std::string& endpoint_string) - { - VERIFY_CORRECT_THREAD(); - std::vector endpoints = graphene::net::node::resolve_string_to_ip_endpoints(endpoint_string); - for (const fc::ip::endpoint& endpoint : endpoints) - { - ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); - add_node(endpoint); - } - } + void node_impl::add_seed_node(const std::string& endpoint_string) + { + VERIFY_CORRECT_THREAD(); + std::vector endpoints = graphene::net::node::resolve_string_to_ip_endpoints(endpoint_string); + for (const fc::ip::endpoint& endpoint : endpoints) + { + ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); + add_node(endpoint); + } + } void node_impl::initiate_connect_to(const peer_connection_ptr& new_peer) { @@ -5117,72 +5118,70 @@ namespace graphene { namespace net { namespace detail { } // end namespace detail - /*** - * @brief Helper to convert a string to a collection of endpoints - * - * This converts a string (i.e. "bitshares.eu:665535" to a collection of endpoints. - * NOTE: Throws an exception if not in correct format or was unable to resolve URL. - * - * @param in the incoming string - * @returns a vector of endpoints - */ - std::vector node::resolve_string_to_ip_endpoints(const std::string& in) - { - try - { - std::string::size_type colon_pos = in.find(':'); - if (colon_pos == std::string::npos) - FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", - ("endpoint_string", in)); - std::string port_string = in.substr(colon_pos + 1); - try - { - uint16_t port = boost::lexical_cast(port_string); - - std::string hostname = in.substr(0, colon_pos); - std::vector endpoints = fc::resolve(hostname, port); - if (endpoints.empty()) - FC_THROW_EXCEPTION( fc::unknown_host_exception, - "The host name can not be resolved: ${hostname}", - ("hostname", hostname) ); - return endpoints; - } - catch (const boost::bad_lexical_cast&) - { - FC_THROW("Bad port: ${port}", ("port", port_string)); - } - } - FC_CAPTURE_AND_RETHROW((in)) - } - - void node::add_seed_node(const std::string& endpoint_string) - { - INVOKE_IN_IMPL(add_seed_node, endpoint_string); - } - - /***** - * @brief add a list of nodes to seed the p2p network - * @param seeds a vector of url strings - * @param connect_immediately attempt a connection immediately + /*** + * @brief Helper to convert a string to a collection of endpoints + * + * This converts a string (i.e. "bitshares.eu:665535" to a collection of endpoints. + * NOTE: Throws an exception if not in correct format or was unable to resolve URL. + * + * @param in the incoming string + * @returns a vector of endpoints */ - void node::add_seed_nodes(std::vector seeds) + std::vector node::resolve_string_to_ip_endpoints(const std::string& in) { - for(const std::string& endpoint_string : seeds ) - { - try { - INVOKE_IN_IMPL(add_seed_node, endpoint_string); - } catch( const fc::exception& e ) { - wlog( "caught exception ${e} while adding seed node ${endpoint}", - ("e", e.to_detail_string())("endpoint", endpoint_string) ); - } - } - - } - - void node::set_advertise_algorithm( std::string algo, const fc::optional>& advertise_list ) - { - my->set_advertise_algorithm( algo, advertise_list ); - } - + try + { + std::string::size_type colon_pos = in.find(':'); + if (colon_pos == std::string::npos) + FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", + ("endpoint_string", in)); + std::string port_string = in.substr(colon_pos + 1); + try + { + uint16_t port = boost::lexical_cast(port_string); + + std::string hostname = in.substr(0, colon_pos); + std::vector endpoints = fc::resolve(hostname, port); + if (endpoints.empty()) + FC_THROW_EXCEPTION( fc::unknown_host_exception, + "The host name can not be resolved: ${hostname}", + ("hostname", hostname) ); + return endpoints; + } + catch (const boost::bad_lexical_cast&) + { + FC_THROW("Bad port: ${port}", ("port", port_string)); + } + } + FC_CAPTURE_AND_RETHROW((in)) + } + + void node::add_seed_node(const std::string& endpoint_string) + { + INVOKE_IN_IMPL(add_seed_node, endpoint_string); + } + + /***** + * @brief add a list of nodes to seed the p2p network + * @param seeds a vector of url strings + * @param connect_immediately attempt a connection immediately + */ + void node::add_seed_nodes(std::vector seeds) + { + for(const std::string& endpoint_string : seeds ) + { + try { + INVOKE_IN_IMPL(add_seed_node, endpoint_string); + } catch( const fc::exception& e ) { + wlog( "caught exception ${e} while adding seed node ${endpoint}", + ("e", e.to_detail_string())("endpoint", endpoint_string) ); + } + } + } + + void node::set_advertise_algorithm( std::string algo, const fc::optional>& advertise_list ) + { + my->set_advertise_algorithm( algo, advertise_list ); + } } } // end namespace graphene::net diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 4c96410500..10809a2af9 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -16,78 +16,82 @@ namespace graphene { namespace net { namespace detail { namespace bmi = boost::multi_index; + class blockchain_tied_message_cache { -private: - static const uint32_t cache_duration_in_blocks = GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS; - - struct message_hash_index{}; - struct message_contents_hash_index{}; - struct block_clock_index{}; - struct message_info - { - message_hash_type message_hash; - message message_body; - uint32_t block_clock_when_received; - - // for network performance stats - message_propagation_data propagation_data; - fc::uint160_t message_contents_hash; // hash of whatever the message contains (if it's a transaction, this is the transaction id, if it's a block, it's the block_id) - - message_info( const message_hash_type& message_hash, - const message& message_body, - uint32_t block_clock_when_received, - const message_propagation_data& propagation_data, - fc::uint160_t message_contents_hash ) : + private: + static const uint32_t cache_duration_in_blocks = GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS; + + struct message_hash_index{}; + struct message_contents_hash_index{}; + struct block_clock_index{}; + struct message_info + { + message_hash_type message_hash; + message message_body; + uint32_t block_clock_when_received; + + // for network performance stats + message_propagation_data propagation_data; + // hash of whatever the message contains (if it's a transaction, this + // is the transaction id, if it's a block, it's the block_id) + fc::uint160_t message_contents_hash; + + message_info( const message_hash_type& message_hash, + const message& message_body, + uint32_t block_clock_when_received, + const message_propagation_data& propagation_data, + fc::uint160_t message_contents_hash ) : message_hash( message_hash ), message_body( message_body ), block_clock_when_received( block_clock_when_received ), propagation_data( propagation_data ), - message_contents_hash( message_contents_hash ) - {} - }; - typedef boost::multi_index_container - < message_info, - bmi::indexed_by< bmi::ordered_unique< bmi::tag, - bmi::member >, - bmi::ordered_non_unique< bmi::tag, - bmi::member >, - bmi::ordered_non_unique< bmi::tag, - bmi::member > > - > message_cache_container; - - message_cache_container _message_cache; - - uint32_t block_clock; - -public: - blockchain_tied_message_cache() : - block_clock( 0 ) - {} - void block_accepted(); - void cache_message( const message& message_to_cache, const message_hash_type& hash_of_message_to_cache, - const message_propagation_data& propagation_data, const fc::uint160_t& message_content_hash ); - message get_message( const message_hash_type& hash_of_message_to_lookup ); - message_propagation_data get_message_propagation_data( const fc::uint160_t& hash_of_message_contents_to_lookup ) const; - size_t size() const { return _message_cache.size(); } + message_contents_hash( message_contents_hash ) {} + }; + + typedef boost::multi_index_container + < message_info, + bmi::indexed_by< bmi::ordered_unique< + bmi::tag, + bmi::member >, + bmi::ordered_non_unique< bmi::tag, + bmi::member >, + bmi::ordered_non_unique< bmi::tag, + bmi::member > > + > message_cache_container; + + message_cache_container _message_cache; + + uint32_t block_clock; + + public: + blockchain_tied_message_cache() : + block_clock( 0 ) {} + void block_accepted(); + void cache_message( const message& message_to_cache, const message_hash_type& hash_of_message_to_cache, + const message_propagation_data& propagation_data, const fc::uint160_t& message_content_hash ); + message get_message( const message_hash_type& hash_of_message_to_lookup ); + message_propagation_data get_message_propagation_data( + const fc::uint160_t& hash_of_message_contents_to_lookup ) const; + size_t size() const { return _message_cache.size(); } }; // This specifies configuration info for the local node. It's stored as JSON // in the configuration directory (application data directory) struct node_configuration { - node_configuration() : accept_incoming_connections(true), wait_if_endpoint_is_busy(true) {} - - fc::ip::endpoint listen_endpoint; - bool accept_incoming_connections; - bool wait_if_endpoint_is_busy; - /** - * Originally, our p2p code just had a 'node-id' that was a random number identifying this node - * on the network. This is now a private key/public key pair, where the public key is used - * in place of the old random node-id. The private part is unused, but might be used in - * the future to support some notion of trusted peers. + node_configuration() : accept_incoming_connections(true), wait_if_endpoint_is_busy(true) {} + + fc::ip::endpoint listen_endpoint; + bool accept_incoming_connections; + bool wait_if_endpoint_is_busy; + /** + * Originally, our p2p code just had a 'node-id' that was a random number identifying this node + * on the network. This is now a private key/public key pair, where the public key is used + * in place of the old random node-id. The private part is unused, but might be used in + * the future to support some notion of trusted peers. */ - fc::ecc::private_key private_key; + fc::ecc::private_key private_key; }; // when requesting items from peers, we want to prioritize any blocks before @@ -244,14 +248,14 @@ private: class node_impl : public peer_connection_delegate { - public: - class address_builder - { - public: - virtual void build( node_impl* impl, address_message& ) = 0; - protected: - address_info update_address_record( node_impl* impl, const peer_connection_ptr& active_peer); - }; + public: + class address_builder + { + public: + virtual void build( node_impl* impl, address_message& ) = 0; + protected: + address_info update_address_record( node_impl* impl, const peer_connection_ptr& active_peer); + }; #ifdef P2P_IN_DEDICATED_THREAD std::shared_ptr _thread; std::shared_ptr get_thread() { return _thread; } @@ -467,7 +471,7 @@ class node_impl : public peer_connection_delegate void on_message( peer_connection* originating_peer, const message& received_message ) override; - + void on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ); @@ -609,7 +613,8 @@ class node_impl : public peer_connection_delegate }}} // end of namespace graphene::net::detail -FC_REFLECT(graphene::net::detail::node_configuration, (listen_endpoint) - (accept_incoming_connections) - (wait_if_endpoint_is_busy) - (private_key)); +FC_REFLECT(graphene::net::detail::node_configuration, + (listen_endpoint) + (accept_incoming_connections) + (wait_if_endpoint_is_busy) + (private_key)); diff --git a/tests/cli/main.cpp b/tests/cli/main.cpp index 46914ae3ed..f152b8ddde 100644 --- a/tests/cli/main.cpp +++ b/tests/cli/main.cpp @@ -63,6 +63,7 @@ #include #include "../common/genesis_file_util.hpp" +#include "../common/node_util.hpp" using std::exception; using std::cerr; diff --git a/tests/common/genesis_file_util.hpp b/tests/common/genesis_file_util.hpp index 9fd19367f9..8918e35b3b 100644 --- a/tests/common/genesis_file_util.hpp +++ b/tests/common/genesis_file_util.hpp @@ -4,57 +4,7 @@ #include #include - -#ifdef _WIN32 - #ifndef _WIN32_WINNT - #define _WIN32_WINNT 0x0501 - #endif - #include - #include -int sockInit(void) -{ - WSADATA wsa_data; - return WSAStartup(MAKEWORD(1,1), &wsa_data); -} -int sockQuit(void) -{ - return WSACleanup(); -} -#else - #include - #include - #include -#endif - -namespace graphene { namespace app { - - ////// - /// @brief attempt to find an available port on localhost - /// @returns an available port number, or -1 on error - ///// - int get_available_port() - { - struct sockaddr_in sin; - int socket_fd = socket(AF_INET, SOCK_STREAM, 0); - if (socket_fd == -1) - return -1; - sin.sin_family = AF_INET; - sin.sin_port = 0; - sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - if (::bind(socket_fd, (struct sockaddr*)&sin, sizeof(struct sockaddr_in)) == -1) - return -1; - socklen_t len = sizeof(sin); - if (getsockname(socket_fd, (struct sockaddr *)&sin, &len) == -1) - return -1; - #ifdef _WIN32 - closesocket(socket_fd); - #else - close(socket_fd); - #endif - return ntohs(sin.sin_port); - } - - namespace detail { +namespace graphene { namespace app { namespace detail { ///////// /// @brief forward declaration, using as a hack to generate a genesis.json file /// for testing diff --git a/tests/common/node_util.hpp b/tests/common/node_util.hpp new file mode 100644 index 0000000000..9cd9ac533e --- /dev/null +++ b/tests/common/node_util.hpp @@ -0,0 +1,51 @@ +#pragma once + +#ifdef _WIN32 + #ifndef _WIN32_WINNT + #define _WIN32_WINNT 0x0501 + #endif + #include + #include + int sockInit(void) + { + WSADATA wsa_data; + return WSAStartup(MAKEWORD(1,1), &wsa_data); + } + int sockQuit(void) + { + return WSACleanup(); + } +#else + #include + #include + #include +#endif + + +namespace graphene { namespace app { + /* + * @brief attempt to find an available port on localhost + * @returns an available port number, or -1 on error + */ + int get_available_port() + { + struct sockaddr_in sin; + int socket_fd = socket(AF_INET, SOCK_STREAM, 0); + if (socket_fd == -1) + return -1; + sin.sin_family = AF_INET; + sin.sin_port = 0; + sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + if (::bind(socket_fd, (struct sockaddr*)&sin, sizeof(struct sockaddr_in)) == -1) + return -1; + socklen_t len = sizeof(sin); + if (getsockname(socket_fd, (struct sockaddr *)&sin, &len) == -1) + return -1; +#ifdef _WIN32 + closesocket(socket_fd); +#else + close(socket_fd); +#endif + return ntohs(sin.sin_port); + } +} } // namespace graphene::app diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 73800f9637..7972b94095 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -38,6 +38,7 @@ #include "../../libraries/net/node_impl.hxx" #include "../common/genesis_file_util.hpp" +#include "../common/node_util.hpp" /*** * A peer connection delegate @@ -48,30 +49,31 @@ class test_delegate : public graphene::net::peer_connection_delegate test_delegate() { } - void on_message(graphene::net::peer_connection* originating_peer, - const graphene::net::message& received_message) + void on_message( graphene::net::peer_connection* originating_peer, + const graphene::net::message& received_message ) { - elog("on_message was called with ${msg}", ("msg",received_message)); + elog( "on_message was called with ${msg}", ("msg",received_message) ); try { - graphene::net::address_request_message m = received_message.as(); - std::shared_ptr m_ptr = std::make_shared( m ); + graphene::net::address_request_message m = received_message.as< graphene::net::address_request_message >(); + std::shared_ptr m_ptr = std::make_shared< graphene::net::message >( m ); last_message = m_ptr; } catch (...) { } } - void on_connection_closed(graphene::net::peer_connection* originating_peer) override {} - graphene::net::message get_message_for_item(const graphene::net::item_id& item) override + void on_connection_closed( graphene::net::peer_connection* originating_peer ) override {} + graphene::net::message get_message_for_item( const graphene::net::item_id& item ) override { return graphene::net::message(); } - std::shared_ptr last_message = nullptr; + std::shared_ptr< graphene::net::message > last_message = nullptr; }; class test_node : public graphene::net::node, public graphene::net::node_delegate { -public: - test_node(const std::string& name, const fc::path& config_dir, int port, int seed_port = -1) : node(name) + public: + test_node( const std::string& name, const fc::path& config_dir, int port, int seed_port = -1 ) + : node( name ) { node_name = name; } @@ -80,22 +82,23 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat close(); } - void on_message(graphene::net::peer_connection_ptr originating_peer, const graphene::net::message& received_message) + void on_message( graphene::net::peer_connection_ptr originating_peer, const graphene::net::message& received_message ) { - my->get_thread()->async([&]() { + my->get_thread()->async( [&]() { my->on_message( originating_peer.get(), received_message ); }).wait(); } - std::pair, graphene::net::peer_connection_ptr> create_peer_connection(std::string url) + std::pair, graphene::net::peer_connection_ptr> + create_peer_connection( std::string url ) { std::pair, graphene::net::peer_connection_ptr> ret_val; - ret_val = this->my->get_thread()->async([&, &url = url](){ + ret_val = this->my->get_thread()->async( [&, &url = url](){ std::shared_ptr d{}; - graphene::net::peer_connection_ptr peer = graphene::net::peer_connection::make_shared(d.get()); - peer->set_remote_endpoint(fc::optional(fc::ip::endpoint::from_string(url))); - my->move_peer_to_active_list(peer); - return std::pair, graphene::net::peer_connection_ptr>(d, peer); + graphene::net::peer_connection_ptr peer = graphene::net::peer_connection::make_shared( d.get() ); + peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); + my->move_peer_to_active_list( peer ); + return std::pair, graphene::net::peer_connection_ptr>( d, peer ); }).wait(); return ret_val; } @@ -109,22 +112,24 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat { return false; } void handle_transaction( const graphene::net::trx_message& trx_msg ) { - elog("${name} was asked to handle a transaction", ("name", node_name)); + elog( "${name} was asked to handle a transaction", ("name", node_name) ); } void handle_message( const graphene::net::message& message_to_process ) { - elog("${name} received a message", ("name",node_name)); + elog( "${name} received a message", ("name",node_name) ); } std::vector get_block_ids( const std::vector& blockchain_synopsis, - uint32_t& remaining_item_count, uint32_t limit = 2000) - { return std::vector(); } + uint32_t& remaining_item_count, uint32_t limit = 2000 ) + { + return std::vector(); + } graphene::net::message get_item( const graphene::net::item_id& id ) { elog("${name} get_item was called", ("name",node_name)); return graphene::net::message(); } - graphene::net::chain_id_type get_chain_id()const + graphene::net::chain_id_type get_chain_id() const { elog("${name} get_chain_id was called", ("name",node_name)); return graphene::net::chain_id_type(); @@ -132,36 +137,40 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat std::vector get_blockchain_synopsis( const graphene::net::item_hash_t& reference_point, uint32_t number_of_blocks_after_reference_point) - { return std::vector(); } + { + return std::vector(); + } void sync_status( uint32_t item_type, uint32_t item_count ) {} void connection_count_changed( uint32_t c ) { - elog("${name} connection_count_change was called", ("name",node_name)); + elog( "${name} connection_count_change was called", ("name",node_name) ); } - uint32_t get_block_number(const graphene::net::item_hash_t& block_id) + uint32_t get_block_number( const graphene::net::item_hash_t& block_id ) { - elog("${name} get_block_number was called", ("name",node_name)); - return 0; + elog( "${name} get_block_number was called", ("name",node_name) ); + return 0; } - fc::time_point_sec get_block_time(const graphene::net::item_hash_t& block_id) + fc::time_point_sec get_block_time( const graphene::net::item_hash_t& block_id ) { - elog("${name} get_block_time was called", ("name",node_name)); + elog( "${name} get_block_time was called", ("name",node_name) ); return fc::time_point_sec(); } - graphene::net::item_hash_t get_head_block_id() const + graphene::net::item_hash_t get_head_block_id() const { - elog("${name} get_head_block_id was called", ("name",node_name)); + elog( "${name} get_head_block_id was called", ("name",node_name) ); return graphene::net::item_hash_t(); } - uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const - { return 0; } - void error_encountered(const std::string& message, const fc::oexception& error) + uint32_t estimate_last_known_fork_from_git_revision_timestamp( uint32_t unix_timestamp ) const + { + return 0; + } + void error_encountered( const std::string& message, const fc::oexception& error ) { - elog("${name} error_encountered was called. Message: ${msg}", ("name",node_name)("msg", message)); + elog( "${name} error_encountered was called. Message: ${msg}", ("name",node_name)("msg", message) ); } uint8_t get_current_block_interval_in_seconds() const { - elog("${name} get_current_block_interval_in_seconds was called", ("name",node_name)); + elog( "${name} get_current_block_interval_in_seconds was called", ("name",node_name) ); return 0; } @@ -171,11 +180,19 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat class test_peer : public graphene::net::peer_connection { -public: + public: + test_peer(graphene::net::peer_connection_delegate* del) : graphene::net::peer_connection(del) + { + message_received = nullptr; + } + std::shared_ptr message_received; - void send_message(const graphene::net::message& message_to_send, size_t message_send_time_field_offset = (size_t)-1) override + + void send_message( const graphene::net::message& message_to_send, + size_t message_send_time_field_offset = (size_t)-1 ) override { - try { + try + { // make a copy graphene::net::address_message m = message_to_send.as(); std::shared_ptr msg_ptr = std::make_shared(m); @@ -185,18 +202,15 @@ class test_peer : public graphene::net::peer_connection } catch (...) {} message_received = nullptr; } -public: - test_peer(graphene::net::peer_connection_delegate* del) : graphene::net::peer_connection(del) { - message_received = nullptr; - } }; -void test_address_message( std::shared_ptr msg, std::size_t num_elements) +void test_address_message( std::shared_ptr msg, std::size_t num_elements ) { if (msg != nullptr) { - graphene::net::address_message addr_msg = static_cast( msg->as() ); - BOOST_CHECK_EQUAL(addr_msg.addresses.size(), num_elements); + graphene::net::address_message addr_msg = static_cast( + msg->as() ); + BOOST_CHECK_EQUAL( addr_msg.addresses.size(), num_elements ); } else { @@ -215,7 +229,7 @@ BOOST_AUTO_TEST_CASE( disable_peer_advertising ) // create a node int node1_port = graphene::app::get_available_port(); fc::temp_directory node1_dir; - test_node node1("Node1", node1_dir.path(), node1_port); + test_node node1( "Node1", node1_dir.path(), node1_port ); node1.disable_peer_advertising(); // get something in their list of connections @@ -224,13 +238,13 @@ BOOST_AUTO_TEST_CASE( disable_peer_advertising ) // verify that they do not share it with others test_delegate peer3_delegate{}; - std::shared_ptr peer3_ptr = std::make_shared(&peer3_delegate); + std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); graphene::net::address_request_message req; node1.on_message( peer3_ptr, req ); // check the results std::shared_ptr msg = peer3_ptr->message_received; - test_address_message(msg, 0); + test_address_message( msg, 0 ); } BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) @@ -238,7 +252,7 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) // create a node int node1_port = graphene::app::get_available_port(); fc::temp_directory node1_dir; - test_node node1("Node1", node1_dir.path(), node1_port); + test_node node1( "Node1", node1_dir.path(), node1_port ); node1.set_advertise_algorithm( "nothing" ); // get something in their list of connections @@ -247,13 +261,13 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) // verify that they do not share it with others test_delegate peer3_delegate{}; - std::shared_ptr peer3_ptr = std::make_shared(&peer3_delegate); + std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); graphene::net::address_request_message req; node1.on_message( peer3_ptr, req ); // check the results std::shared_ptr msg = peer3_ptr->message_received; - test_address_message(msg, 0); + test_address_message( msg, 0 ); } BOOST_AUTO_TEST_CASE( advertise_list ) @@ -262,11 +276,11 @@ BOOST_AUTO_TEST_CASE( advertise_list ) // set up my node int my_node_port = graphene::app::get_available_port(); fc::temp_directory my_node_dir; - test_node my_node("Hello", my_node_dir.path(), my_node_port); + test_node my_node( "Hello", my_node_dir.path(), my_node_port ); my_node.set_advertise_algorithm( "list", advert_list ); test_delegate del{}; // a fake peer - std::shared_ptr my_peer(new test_peer{&del}); + std::shared_ptr my_peer( new test_peer{&del} ); // act like my_node received an address_request message from my_peer graphene::net::address_request_message address_request_message_received; @@ -278,19 +292,19 @@ BOOST_AUTO_TEST_CASE( advertise_list ) BOOST_AUTO_TEST_CASE( exclude_list ) { - std::vector ex_list = { "127.0.0.1:8090"}; + std::vector ex_list = { "127.0.0.1:8090" }; // set up my node int my_node_port = graphene::app::get_available_port(); fc::temp_directory my_node_dir; - test_node my_node("Hello", my_node_dir.path(), my_node_port); + test_node my_node( "Hello", my_node_dir.path(), my_node_port ); my_node.set_advertise_algorithm( "exclude_list", ex_list ); // some peers std::pair, graphene::net::peer_connection_ptr> node2_rslts - = my_node.create_peer_connection("127.0.0.1:8089"); + = my_node.create_peer_connection( "127.0.0.1:8089" ); std::pair, graphene::net::peer_connection_ptr> node3_rslts - = my_node.create_peer_connection("127.0.0.1:8090"); + = my_node.create_peer_connection( "127.0.0.1:8090" ); std::pair, graphene::net::peer_connection_ptr> node4_rslts - = my_node.create_peer_connection("127.0.0.1:8091"); + = my_node.create_peer_connection( "127.0.0.1:8091" ); // act like my_node received an address_request message from my_peer test_delegate del_4{}; From a12538417c49066d0d804169b769b7c645dfd4c8 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 8 Jul 2019 05:48:23 -0500 Subject: [PATCH 012/338] rename advertise_list for clarity --- libraries/net/include/graphene/net/node.hpp | 2 +- libraries/net/node.cpp | 18 +++++++++--------- libraries/net/node_impl.hxx | 2 +- tests/tests/p2p_node_tests.cpp | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 7fe73fa943..6907d1c2c0 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -200,7 +200,7 @@ namespace graphene { namespace net { void set_node_delegate( node_delegate* del ); void set_advertise_algorithm( std::string algo, - const fc::optional>& advertise_list = fc::optional>() ); + const fc::optional>& advertise_or_exclude_list = fc::optional>() ); void load_configuration( const fc::path& configuration_directory ); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 8d2d947c02..4da089b82a 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -195,8 +195,8 @@ namespace graphene { namespace net { namespace detail { { FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); - advertise_list.reserve( address_list->size() ); - auto& list = advertise_list; + advertise_or_exclude_list.reserve( address_list->size() ); + auto& list = advertise_or_exclude_list; std::for_each( address_list->begin(), address_list->end(), [&list]( std::string str ) { // ignore fc exceptions (like poorly formatted endpoints) try @@ -218,10 +218,10 @@ namespace graphene { namespace net { namespace detail { void build(node_impl* impl, address_message& reply) { - reply.addresses = advertise_list; + reply.addresses = advertise_or_exclude_list; } private: - std::vector advertise_list; + std::vector advertise_or_exclude_list; }; /**** @@ -1669,15 +1669,15 @@ namespace graphene { namespace net { namespace detail { } void node_impl::set_advertise_algorithm( std::string algo, - const fc::optional>& advertise_list ) + const fc::optional>& advertise_or_exclude_list ) { if (algo == "exclude_list") { - _address_builder = std::make_shared(advertise_list); + _address_builder = std::make_shared(advertise_or_exclude_list); } else if (algo == "list") { - _address_builder = std::make_shared(advertise_list); + _address_builder = std::make_shared(advertise_or_exclude_list); } else if (algo == "nothing") { @@ -5179,9 +5179,9 @@ namespace graphene { namespace net { namespace detail { } } - void node::set_advertise_algorithm( std::string algo, const fc::optional>& advertise_list ) + void node::set_advertise_algorithm( std::string algo, const fc::optional>& advertise_or_exclude_list ) { - my->set_advertise_algorithm( algo, advertise_list ); + my->set_advertise_algorithm( algo, advertise_or_exclude_list ); } } } // end namespace graphene::net diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 10809a2af9..3711a74265 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -573,7 +573,7 @@ class node_impl : public peer_connection_delegate void listen_to_p2p_network(); void connect_to_p2p_network(); void add_node( const fc::ip::endpoint& ep ); - void set_advertise_algorithm( std::string algo, const fc::optional>& advertise_list ); + void set_advertise_algorithm( std::string algo, const fc::optional>& advertise_or_exclude_list ); void add_seed_node( const std::string& seed_string ); void initiate_connect_to(const peer_connection_ptr& peer); void connect_to_endpoint(const fc::ip::endpoint& ep); diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 7972b94095..0d8c0391dc 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -270,7 +270,7 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) test_address_message( msg, 0 ); } -BOOST_AUTO_TEST_CASE( advertise_list ) +BOOST_AUTO_TEST_CASE( advertise_or_exclude_list ) { std::vector advert_list = { "127.0.0.1:8090"}; // set up my node From e0f7a1fa5be51506e8a60b783ef5a9d501d90cdb Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 8 Jul 2019 06:14:05 -0500 Subject: [PATCH 013/338] clarify / fix comments --- libraries/net/include/graphene/net/node.hpp | 16 +++++++++++++--- libraries/net/node.cpp | 16 +--------------- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 6907d1c2c0..b3c8a7a29f 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -199,12 +199,24 @@ namespace graphene { namespace net { void close(); void set_node_delegate( node_delegate* del ); + /*** + * Allows the caller to determine how to respond to requests for peers + * @param algo the algorithm to use ("exclude_list", "list", "nothing", "all") + * @param advertise_or_exclude_list a list of nodes to + * advertise (if algo = "list") or exclude (if algo is "exclude") + */ void set_advertise_algorithm( std::string algo, const fc::optional>& advertise_or_exclude_list = fc::optional>() ); void load_configuration( const fc::path& configuration_directory ); - void listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); + /** + * Specifies the network interface and port upon which incoming + * connections should be accepted. + * @param ep the endpoint (network interface and port) + * @param wait_if_not_available keep retrying if port is not available + */ + void listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); virtual void listen_to_p2p_network(); virtual void connect_to_p2p_network(); @@ -220,14 +232,12 @@ namespace graphene { namespace net { * @brief Add an endpoint as a seed to the p2p network * * @param seed_string the url - * @param connect_immediately will start the connection process immediately */ void add_seed_node( const std::string& seed_string ); /***** * @brief add a list of nodes to seed the p2p network * @param seeds a vector of url strings - * @param connect_immediately attempt a connection immediately */ void add_seed_nodes( std::vector seeds ); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 4da089b82a..2e09b6ea13 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -5118,17 +5118,8 @@ namespace graphene { namespace net { namespace detail { } // end namespace detail - /*** - * @brief Helper to convert a string to a collection of endpoints - * - * This converts a string (i.e. "bitshares.eu:665535" to a collection of endpoints. - * NOTE: Throws an exception if not in correct format or was unable to resolve URL. - * - * @param in the incoming string - * @returns a vector of endpoints - */ std::vector node::resolve_string_to_ip_endpoints(const std::string& in) - { + { try { std::string::size_type colon_pos = in.find(':'); @@ -5161,11 +5152,6 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(add_seed_node, endpoint_string); } - /***** - * @brief add a list of nodes to seed the p2p network - * @param seeds a vector of url strings - * @param connect_immediately attempt a connection immediately - */ void node::add_seed_nodes(std::vector seeds) { for(const std::string& endpoint_string : seeds ) From 6bc930c0225f4df28412433eb06bef0edb690fa4 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 8 Jul 2019 08:35:59 -0500 Subject: [PATCH 014/338] Advertise actual connected nodes, not fake ones --- libraries/net/include/graphene/net/node.hpp | 7 ---- libraries/net/node.cpp | 36 ++++++++++++++------- libraries/net/node_impl.hxx | 11 +++++++ tests/tests/p2p_node_tests.cpp | 9 +++++- 4 files changed, 44 insertions(+), 19 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index b3c8a7a29f..5fb7adc321 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -228,13 +228,6 @@ namespace graphene { namespace net { */ void add_node( const fc::ip::endpoint& ep ); - /**** - * @brief Add an endpoint as a seed to the p2p network - * - * @param seed_string the url - */ - void add_seed_node( const std::string& seed_string ); - /***** * @brief add a list of nodes to seed the p2p network * @param seeds a vector of url strings diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 2e09b6ea13..3d93d878b9 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -218,7 +218,17 @@ namespace graphene { namespace net { namespace detail { void build(node_impl* impl, address_message& reply) { - reply.addresses = advertise_or_exclude_list; + std::vector ret_val; + // only pass those that are in the list AND we are connected to + std::for_each(advertise_or_exclude_list.begin(), advertise_or_exclude_list.end(), + [&impl = impl, reply=reply, &ret_val=ret_val] + (const graphene::net::address_info& addr) + { + graphene::net::peer_connection_ptr peer_conn = impl->get_active_connection_to_endpoint(addr.remote_endpoint); + if ( peer_conn != peer_connection_ptr() ) + ret_val.push_back(addr); + }); + reply.addresses = ret_val; } private: std::vector advertise_or_exclude_list; @@ -4246,15 +4256,24 @@ namespace graphene { namespace net { namespace detail { initiate_connect_to(new_peer); } - peer_connection_ptr node_impl::get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ) - { + peer_connection_ptr node_impl::get_active_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint) + { VERIFY_CORRECT_THREAD(); for( const peer_connection_ptr& active_peer : _active_connections ) { - fc::optional endpoint_for_this_peer( active_peer->get_remote_endpoint() ); - if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) - return active_peer; + fc::optional endpoint_for_this_peer( active_peer->get_remote_endpoint() ); + if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) + return active_peer; } + return peer_connection_ptr(); + } + + peer_connection_ptr node_impl::get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ) + { + VERIFY_CORRECT_THREAD(); + peer_connection_ptr active_ptr = get_active_connection_to_endpoint( remote_endpoint ); + if ( active_ptr != peer_connection_ptr() ) + return active_ptr; for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) { fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); @@ -5147,11 +5166,6 @@ namespace graphene { namespace net { namespace detail { FC_CAPTURE_AND_RETHROW((in)) } - void node::add_seed_node(const std::string& endpoint_string) - { - INVOKE_IN_IMPL(add_seed_node, endpoint_string); - } - void node::add_seed_nodes(std::vector seeds) { for(const std::string& endpoint_string : seeds ) diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 3711a74265..172340947d 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -555,6 +555,17 @@ class node_impl : public peer_connection_delegate void move_peer_to_closing_list(const peer_connection_ptr& peer); void move_peer_to_terminating_list(const peer_connection_ptr& peer); + /*** + * Look for an active connection at the given address + * @param remote_endpoint the address we are interested in + * @returns the connection, or peer_connection_ptr() if not found + */ + peer_connection_ptr get_active_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ); + /*** + * Look for a connection that is either active or currently in the handshaking process + * @param remote_endpoint the address we are interested in + * @returns the connection, or peer_connection_ptr() if not found + */ peer_connection_ptr get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ); void dump_node_status(); diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 0d8c0391dc..cb642ee84b 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -278,10 +278,17 @@ BOOST_AUTO_TEST_CASE( advertise_or_exclude_list ) fc::temp_directory my_node_dir; test_node my_node( "Hello", my_node_dir.path(), my_node_port ); my_node.set_advertise_algorithm( "list", advert_list ); - test_delegate del{}; + // a fake peer + test_delegate del{}; std::shared_ptr my_peer( new test_peer{&del} ); + // add 2 connections, 1 of which appears on the advertise_list + std::pair, graphene::net::peer_connection_ptr> node1_rslts + = my_node.create_peer_connection("127.0.0.1:8089"); + std::pair, graphene::net::peer_connection_ptr> node2_reslts + = my_node.create_peer_connection("127.0.0.1:8090"); + // act like my_node received an address_request message from my_peer graphene::net::address_request_message address_request_message_received; my_node.on_message( my_peer, address_request_message_received ); From 40bd1bdc582c925f31e87840ca56ee7b56f878cd Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 8 Jul 2019 10:21:30 -0500 Subject: [PATCH 015/338] Add separate parameter for connect to new peers --- libraries/app/application.cpp | 4 ++++ libraries/net/include/graphene/net/node.hpp | 13 ++++++++++--- libraries/net/node.cpp | 17 ++++++++++++++--- libraries/net/node_impl.hxx | 2 ++ 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index eac0ab14be..31f87293d8 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -173,6 +173,9 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) if ( _options->count("accept_incoming_connections") ) _p2p_network->accept_incoming_connections( _options->at("accept_incoming_connections").as() ); + if ( _options->count("connect-to-new-peers") ) + _p2p_network->connect_to_new_peers( _options->at( "connect_to_new_peers" ).as() ); + _p2p_network->connect_to_p2p_network(); _p2p_network->sync_from(net::item_id(net::core_message_type_enum::block_message_type, _chain_db->head_block_id()), @@ -1055,6 +1058,7 @@ void application::set_program_options(boost::program_options::options_descriptio "For database_api_impl::get_withdraw_permissions_by_recipient to set max limit value") "For database_api_impl::get_order_book to set its default limit value as 50") ("accept-incoming-connections", bpo::value()->implicit_value(true), "Accept incoming connections") + ("connect-to-new-peers", bpo::value()->implicit_value(true), "Connect to new peers") ("advertise-peer-algorithm", bpo::value()->implicit_value("all"), "Determines which peers are advertised. Algorithms: 'all', 'nothing', 'list', exclude_list'") ("advertise-peer-list", bpo::value>()->composing(), diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 5fb7adc321..46d1059d5c 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -250,10 +250,17 @@ namespace graphene { namespace net { */ static std::vector resolve_string_to_ip_endpoints( const std::string& in ); - /** - * Call with true to enable listening for incoming connections + /** + * listen for incoming connections + * @param accept set to true to listen for incoming connections, false otherwise */ - void accept_incoming_connections( bool accept ); + void accept_incoming_connections( bool accept ); + + /*** + * When new connections are advertised, attempt a connection + * @param connect true to attempt new connections, false otherwise + */ + void connect_to_new_peers( bool connect ); /** * Specifies the port upon which incoming connections should be accepted. diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 3d93d878b9..e3cf28c8c7 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1708,12 +1708,11 @@ namespace graphene { namespace net { namespace detail { std::vector updated_addresses = address_message_received.addresses; for (address_info& address : updated_addresses) address.last_seen_time = fc::time_point_sec(fc::time_point::now()); - if ( _node_configuration.accept_incoming_connections ) + if ( _node_configuration.connect_to_new_peers ) { - if ( merge_address_info_with_potential_peer_database(updated_addresses)) + if ( merge_address_info_with_potential_peer_database(updated_addresses) ) trigger_p2p_network_connect_loop(); } - if (_handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) { // if we were handshaking, we need to continue with the next step in handshaking (which is either @@ -4426,6 +4425,13 @@ namespace graphene { namespace net { namespace detail { save_node_configuration(); } + void node_impl::connect_to_new_peers( bool connect ) + { + VERIFY_CORRECT_THREAD(); + _node_configuration.connect_to_new_peers = connect; + save_node_configuration(); + } + void node_impl::listen_on_port( uint16_t port, bool wait_if_not_available ) { VERIFY_CORRECT_THREAD(); @@ -4784,6 +4790,11 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(accept_incoming_connections, accept); } + void node::connect_to_new_peers( bool connect ) + { + INVOKE_IN_IMPL( connect_to_new_peers, connect ); + } + void node::listen_on_port( uint16_t port, bool wait_if_not_available ) { INVOKE_IN_IMPL(listen_on_port, port, wait_if_not_available); diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 172340947d..95e6a650c3 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -84,6 +84,7 @@ struct node_configuration fc::ip::endpoint listen_endpoint; bool accept_incoming_connections; + bool connect_to_new_peers; bool wait_if_endpoint_is_busy; /** * Originally, our p2p code just had a 'node-id' that was a random number identifying this node @@ -590,6 +591,7 @@ class node_impl : public peer_connection_delegate void connect_to_endpoint(const fc::ip::endpoint& ep); void listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); void accept_incoming_connections(bool accept); + void connect_to_new_peers( bool connect ); void listen_on_port( uint16_t port, bool wait_if_not_available ); fc::ip::endpoint get_actual_listening_endpoint() const; From 8e223312468fb4de7f592cef80c1208937a823ac Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 8 Jul 2019 12:39:29 -0500 Subject: [PATCH 016/338] disable filewall check when !connect_to_new_peers --- libraries/net/node.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index e3cf28c8c7..d4ba26e0d7 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3219,8 +3219,9 @@ namespace graphene { namespace net { namespace detail { // we're being asked to check another node // first, find out if we're currently connected to that node. If we are, we // can't perform the test - if (is_already_connected_to_id(check_firewall_message_received.node_id) || - is_connection_to_endpoint_in_progress(check_firewall_message_received.endpoint_to_check)) + if ( !_node_configuration.connect_to_new_peers || + ( is_already_connected_to_id(check_firewall_message_received.node_id) || + is_connection_to_endpoint_in_progress(check_firewall_message_received.endpoint_to_check ))) { check_firewall_reply_message reply; reply.node_id = check_firewall_message_received.node_id; From ffa871b51e3129278cf37e8148f75e21e59671d7 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 8 Jul 2019 14:20:25 -0500 Subject: [PATCH 017/338] adjust set_ method names, code cleanup --- libraries/app/application.cpp | 10 ++++----- libraries/net/include/graphene/net/node.hpp | 17 ++++++++------- libraries/net/node.cpp | 24 ++++++++++----------- libraries/net/node_impl.hxx | 11 +++++----- 4 files changed, 32 insertions(+), 30 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 31f87293d8..fe5842c4a0 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -164,17 +164,17 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) } if( _options->count("p2p-endpoint") ) - _p2p_network->listen_on_endpoint(fc::ip::endpoint::from_string(_options->at("p2p-endpoint").as()), true); + _p2p_network->set_listen_endpoint(fc::ip::endpoint::from_string(_options->at("p2p-endpoint").as()), true); else - _p2p_network->listen_on_port(0, false); + _p2p_network->set_listen_port(0, false); _p2p_network->listen_to_p2p_network(); ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); - if ( _options->count("accept_incoming_connections") ) - _p2p_network->accept_incoming_connections( _options->at("accept_incoming_connections").as() ); + if ( _options->count("accept-incoming-connections") ) + _p2p_network->set_accept_incoming_connections( _options->at("accept-incoming-connections").as() ); if ( _options->count("connect-to-new-peers") ) - _p2p_network->connect_to_new_peers( _options->at( "connect_to_new_peers" ).as() ); + _p2p_network->set_connect_to_new_peers( _options->at( "connect-to-new-peers" ).as() ); _p2p_network->connect_to_p2p_network(); _p2p_network->sync_from(net::item_id(net::core_message_type_enum::block_message_type, diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 46d1059d5c..bc2c80be8a 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -206,7 +206,8 @@ namespace graphene { namespace net { * advertise (if algo = "list") or exclude (if algo is "exclude") */ void set_advertise_algorithm( std::string algo, - const fc::optional>& advertise_or_exclude_list = fc::optional>() ); + const fc::optional>& advertise_or_exclude_list + = fc::optional>() ); void load_configuration( const fc::path& configuration_directory ); @@ -216,7 +217,7 @@ namespace graphene { namespace net { * @param ep the endpoint (network interface and port) * @param wait_if_not_available keep retrying if port is not available */ - void listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); + void set_listen_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); virtual void listen_to_p2p_network(); virtual void connect_to_p2p_network(); @@ -251,16 +252,16 @@ namespace graphene { namespace net { static std::vector resolve_string_to_ip_endpoints( const std::string& in ); /** - * listen for incoming connections + * enable/disable listening for incoming connections * @param accept set to true to listen for incoming connections, false otherwise */ - void accept_incoming_connections( bool accept ); + void set_accept_incoming_connections( bool accept ); /*** - * When new connections are advertised, attempt a connection + * enable/disable connection attempts when new connections are advertised * @param connect true to attempt new connections, false otherwise */ - void connect_to_new_peers( bool connect ); + void set_connect_to_new_peers( bool connect ); /** * Specifies the port upon which incoming connections should be accepted. @@ -270,11 +271,11 @@ namespace graphene { namespace net { * available. If false and the port is not available, * just choose a random available port */ - void listen_on_port(uint16_t port, bool wait_if_not_available); + void set_listen_port( uint16_t port, bool wait_if_not_available ); /** * Returns the endpoint the node is listening on. This is usually the same - * as the value previously passed in to listen_on_endpoint, unless we + * as the value previously passed in to set_listen_endpoint, unless we * were unable to bind to that port. */ virtual fc::ip::endpoint get_actual_listening_endpoint() const; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index d4ba26e0d7..4e24bf393d 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4411,7 +4411,7 @@ namespace graphene { namespace net { namespace detail { dlog("Disconnecting from ${peer} for ${reason}", ("peer",peer_to_disconnect->get_remote_endpoint()) ("reason",reason_for_disconnect)); } - void node_impl::listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ) + void node_impl::set_listen_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ) { VERIFY_CORRECT_THREAD(); _node_configuration.listen_endpoint = ep; @@ -4419,21 +4419,21 @@ namespace graphene { namespace net { namespace detail { save_node_configuration(); } - void node_impl::accept_incoming_connections(bool accept) + void node_impl::set_accept_incoming_connections(bool accept) { VERIFY_CORRECT_THREAD(); _node_configuration.accept_incoming_connections = accept; save_node_configuration(); } - void node_impl::connect_to_new_peers( bool connect ) + void node_impl::set_connect_to_new_peers( bool connect ) { VERIFY_CORRECT_THREAD(); _node_configuration.connect_to_new_peers = connect; save_node_configuration(); } - void node_impl::listen_on_port( uint16_t port, bool wait_if_not_available ) + void node_impl::set_listen_port( uint16_t port, bool wait_if_not_available ) { VERIFY_CORRECT_THREAD(); _node_configuration.listen_endpoint = fc::ip::endpoint( fc::ip::address(), port ); @@ -4781,24 +4781,24 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(connect_to_endpoint, remote_endpoint); } - void node::listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available) + void node::set_listen_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available) { - INVOKE_IN_IMPL(listen_on_endpoint, ep, wait_if_not_available); + INVOKE_IN_IMPL(set_listen_endpoint, ep, wait_if_not_available); } - void node::accept_incoming_connections(bool accept) + void node::set_accept_incoming_connections(bool accept) { - INVOKE_IN_IMPL(accept_incoming_connections, accept); + INVOKE_IN_IMPL(set_accept_incoming_connections, accept); } - void node::connect_to_new_peers( bool connect ) + void node::set_connect_to_new_peers( bool connect ) { - INVOKE_IN_IMPL( connect_to_new_peers, connect ); + INVOKE_IN_IMPL( set_connect_to_new_peers, connect ); } - void node::listen_on_port( uint16_t port, bool wait_if_not_available ) + void node::set_listen_port( uint16_t port, bool wait_if_not_available ) { - INVOKE_IN_IMPL(listen_on_port, port, wait_if_not_available); + INVOKE_IN_IMPL(set_listen_port, port, wait_if_not_available); } fc::ip::endpoint node::get_actual_listening_endpoint() const diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 95e6a650c3..cc669e437f 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -585,14 +585,15 @@ class node_impl : public peer_connection_delegate void listen_to_p2p_network(); void connect_to_p2p_network(); void add_node( const fc::ip::endpoint& ep ); - void set_advertise_algorithm( std::string algo, const fc::optional>& advertise_or_exclude_list ); + void set_advertise_algorithm( std::string algo, + const fc::optional>& advertise_or_exclude_list ); void add_seed_node( const std::string& seed_string ); void initiate_connect_to(const peer_connection_ptr& peer); void connect_to_endpoint(const fc::ip::endpoint& ep); - void listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); - void accept_incoming_connections(bool accept); - void connect_to_new_peers( bool connect ); - void listen_on_port( uint16_t port, bool wait_if_not_available ); + void set_listen_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); + void set_accept_incoming_connections(bool accept); + void set_connect_to_new_peers( bool connect ); + void set_listen_port( uint16_t port, bool wait_if_not_available ); fc::ip::endpoint get_actual_listening_endpoint() const; std::vector get_connected_peers() const; From 6ec8161835d3da5e26e12232aafd68c2b9b15d25 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 8 Jul 2019 15:42:04 -0500 Subject: [PATCH 018/338] Remove duplicates from advertise-peer-list --- libraries/net/node.cpp | 64 ++++++++++++++++++---------------- libraries/net/node_impl.hxx | 4 +-- tests/tests/p2p_node_tests.cpp | 4 +-- 3 files changed, 38 insertions(+), 34 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 4e24bf393d..54508507e8 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -195,24 +195,29 @@ namespace graphene { namespace net { namespace detail { { FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); - advertise_or_exclude_list.reserve( address_list->size() ); - auto& list = advertise_or_exclude_list; - std::for_each( address_list->begin(), address_list->end(), [&list]( std::string str ) { - // ignore fc exceptions (like poorly formatted endpoints) - try + advertise_list.reserve( address_list->size() ); + std::for_each( address_list->begin(), address_list->end(), [&list = advertise_list]( std::string str ) + { + // filter out duplicates + if ( list.find(str) == list.end() ) { - list.emplace_back( graphene::net::address_info( - fc::ip::endpoint::from_string(str), - fc::time_point_sec(), - fc::microseconds(0), - node_id_t(), - peer_connection_direction::unknown, - firewalled_state::unknown )); + // ignore fc exceptions (like poorly formatted endpoints) + try + { + graphene::net::address_info tmp( + fc::ip::endpoint::from_string(str), + fc::time_point_sec(), + fc::microseconds(0), + node_id_t(), + peer_connection_direction::unknown, + firewalled_state::unknown ); + list[str] = tmp; + } + catch(const fc::exception& ) + { + wlog( "Address ${addr} invalid.", ("addr", str) ); + } } - catch(const fc::exception& ) - { - wlog( "Address ${addr} invalid.", ("addr", str) ); - } } ); } @@ -220,18 +225,17 @@ namespace graphene { namespace net { namespace detail { { std::vector ret_val; // only pass those that are in the list AND we are connected to - std::for_each(advertise_or_exclude_list.begin(), advertise_or_exclude_list.end(), - [&impl = impl, reply=reply, &ret_val=ret_val] - (const graphene::net::address_info& addr) - { - graphene::net::peer_connection_ptr peer_conn = impl->get_active_connection_to_endpoint(addr.remote_endpoint); - if ( peer_conn != peer_connection_ptr() ) - ret_val.push_back(addr); - }); + for(auto& it : advertise_list) + { + graphene::net::peer_connection_ptr peer_conn + = impl->get_active_connection_for_endpoint(it.second.remote_endpoint); + if ( peer_conn != peer_connection_ptr() ) + ret_val.push_back(it.second); + } reply.addresses = ret_val; } private: - std::vector advertise_or_exclude_list; + std::unordered_map advertise_list; }; /**** @@ -396,7 +400,7 @@ namespace graphene { namespace net { namespace detail { { // see if we have an existing connection to that peer. If we do, disconnect them and // then try to connect the next time through the loop - peer_connection_ptr existing_connection_ptr = get_connection_to_endpoint( add_once_peer.endpoint ); + peer_connection_ptr existing_connection_ptr = get_connection_for_endpoint( add_once_peer.endpoint ); if(!existing_connection_ptr) connect_to_endpoint(add_once_peer.endpoint); } @@ -4256,7 +4260,7 @@ namespace graphene { namespace net { namespace detail { initiate_connect_to(new_peer); } - peer_connection_ptr node_impl::get_active_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint) + peer_connection_ptr node_impl::get_active_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint) { VERIFY_CORRECT_THREAD(); for( const peer_connection_ptr& active_peer : _active_connections ) @@ -4268,10 +4272,10 @@ namespace graphene { namespace net { namespace detail { return peer_connection_ptr(); } - peer_connection_ptr node_impl::get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ) + peer_connection_ptr node_impl::get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ) { VERIFY_CORRECT_THREAD(); - peer_connection_ptr active_ptr = get_active_connection_to_endpoint( remote_endpoint ); + peer_connection_ptr active_ptr = get_active_connection_for_endpoint( remote_endpoint ); if ( active_ptr != peer_connection_ptr() ) return active_ptr; for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) @@ -4286,7 +4290,7 @@ namespace graphene { namespace net { namespace detail { bool node_impl::is_connection_to_endpoint_in_progress( const fc::ip::endpoint& remote_endpoint ) { VERIFY_CORRECT_THREAD(); - return get_connection_to_endpoint( remote_endpoint ) != peer_connection_ptr(); + return get_connection_for_endpoint( remote_endpoint ) != peer_connection_ptr(); } void node_impl::move_peer_to_active_list(const peer_connection_ptr& peer) diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index cc669e437f..fafb39f268 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -561,13 +561,13 @@ class node_impl : public peer_connection_delegate * @param remote_endpoint the address we are interested in * @returns the connection, or peer_connection_ptr() if not found */ - peer_connection_ptr get_active_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ); + peer_connection_ptr get_active_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ); /*** * Look for a connection that is either active or currently in the handshaking process * @param remote_endpoint the address we are interested in * @returns the connection, or peer_connection_ptr() if not found */ - peer_connection_ptr get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ); + peer_connection_ptr get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ); void dump_node_status(); diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index cb642ee84b..af24433909 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -270,9 +270,9 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) test_address_message( msg, 0 ); } -BOOST_AUTO_TEST_CASE( advertise_or_exclude_list ) +BOOST_AUTO_TEST_CASE( advertise_list_test ) { - std::vector advert_list = { "127.0.0.1:8090"}; + std::vector advert_list = { "127.0.0.1:8090" }; // set up my node int my_node_port = graphene::app::get_available_port(); fc::temp_directory my_node_dir; From a0d659c109517f37703aca0ddcca6ee9c94be7cb Mon Sep 17 00:00:00 2001 From: John Jones Date: Fri, 26 Jul 2019 13:09:03 -0500 Subject: [PATCH 019/338] Added log msg for bad endpoint, add add_seed_node --- libraries/net/include/graphene/net/node.hpp | 6 ++++++ libraries/net/node.cpp | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index bc2c80be8a..d071268524 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -235,6 +235,12 @@ namespace graphene { namespace net { */ void add_seed_nodes( std::vector seeds ); + /**** + * @brief add a node to seed the p2p network + * @param in the url as a string + */ + void add_seed_node( const std::string& in); + /** * Attempt to connect to the specified endpoint immediately. */ diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 54508507e8..04b399ccf2 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4218,7 +4218,15 @@ namespace graphene { namespace net { namespace detail { void node_impl::add_seed_node(const std::string& endpoint_string) { VERIFY_CORRECT_THREAD(); - std::vector endpoints = graphene::net::node::resolve_string_to_ip_endpoints(endpoint_string); + std::vector endpoints; + try + { + endpoints = graphene::net::node::resolve_string_to_ip_endpoints(endpoint_string); + } + catch(...) + { + elog( "Unable to resolve endpoint during attempt to add seed node ${ep}", ("ep", endpoint_string) ); + } for (const fc::ip::endpoint& endpoint : endpoints) { ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); @@ -5187,7 +5195,7 @@ namespace graphene { namespace net { namespace detail { for(const std::string& endpoint_string : seeds ) { try { - INVOKE_IN_IMPL(add_seed_node, endpoint_string); + add_seed_node(endpoint_string); } catch( const fc::exception& e ) { wlog( "caught exception ${e} while adding seed node ${endpoint}", ("e", e.to_detail_string())("endpoint", endpoint_string) ); @@ -5195,6 +5203,11 @@ namespace graphene { namespace net { namespace detail { } } + void node::add_seed_node(const std::string& in) + { + INVOKE_IN_IMPL(add_seed_node, in); + } + void node::set_advertise_algorithm( std::string algo, const fc::optional>& advertise_or_exclude_list ) { my->set_advertise_algorithm( algo, advertise_or_exclude_list ); From 510e3f0201b279cb8e4c4f2d08a65980280f5f8e Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 29 Jul 2019 08:54:56 -0500 Subject: [PATCH 020/338] Switch message from error to warning --- libraries/net/node.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 04b399ccf2..0f342e79e9 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4225,7 +4225,7 @@ namespace graphene { namespace net { namespace detail { } catch(...) { - elog( "Unable to resolve endpoint during attempt to add seed node ${ep}", ("ep", endpoint_string) ); + wlog( "Unable to resolve endpoint during attempt to add seed node ${ep}", ("ep", endpoint_string) ); } for (const fc::ip::endpoint& endpoint : endpoints) { From a76c20b9f9c0b50fb0c5db303c241c19c89c67ca Mon Sep 17 00:00:00 2001 From: John Jones Date: Fri, 2 Aug 2019 10:13:05 -0500 Subject: [PATCH 021/338] advertise lists and firewall checking --- libraries/net/node.cpp | 88 +++++++++++++++++++++++++++---------- libraries/net/node_impl.hxx | 3 ++ 2 files changed, 68 insertions(+), 23 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 0f342e79e9..e9747532ab 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -234,6 +234,16 @@ namespace graphene { namespace net { namespace detail { } reply.addresses = ret_val; } + + bool should_advertise( const fc::ip::endpoint& in ) + { + for (auto& it : advertise_list ) + { + if ( in == it.second.remote_endpoint ) + return true; + } + return false; + } private: std::unordered_map advertise_list; }; @@ -263,6 +273,12 @@ namespace graphene { namespace net { namespace detail { } reply.addresses.shrink_to_fit(); } + bool should_advertise( const fc::ip::endpoint& in ) + { + if (exclude_list.find( in ) == exclude_list.end() ) + return true; + return false; + } private: fc::flat_set exclude_list; }; @@ -280,6 +296,10 @@ namespace graphene { namespace net { namespace detail { reply.addresses.emplace_back(update_address_record(impl, active_peer)); } } + bool should_advertise( const fc::ip::endpoint& in ) + { + return true; + } }; node_impl::node_impl(const std::string& user_agent) : @@ -3163,7 +3183,11 @@ namespace graphene { namespace net { namespace detail { { if (firewall_check_state->expected_node_id != peer->node_id && // it's not the node who is asking us to test !peer->firewall_check_state && // the peer isn't already performing a check for another node - firewall_check_state->nodes_already_tested.find(peer->node_id) == firewall_check_state->nodes_already_tested.end() && + _address_builder != nullptr && + peer->get_remote_endpoint().valid() && + _address_builder->should_advertise(*peer->get_remote_endpoint()) && // can advertise who is about to be asked + firewall_check_state->nodes_already_tested.find(peer->node_id) + == firewall_check_state->nodes_already_tested.end() && // we haven't already asked peer->core_protocol_version >= 106) { wlog("forwarding firewall check for node ${to_check} to peer ${checker}", @@ -3193,30 +3217,48 @@ namespace graphene { namespace net { namespace detail { delete firewall_check_state; } + fc::ip::endpoint node_impl::get_endpoint_to_check( peer_connection* originating_peer, + const check_firewall_message& message ) + { + fc::ip::endpoint ret_val; + if (message.node_id == node_id_t() && + message.endpoint_to_check == fc::ip::endpoint() ) + { + // if they are using the same inbound and outbound port, try connecting to their outbound endpoint. + // if they are using a different inbound port, use their outbound address but the inbound port they reported + ret_val = originating_peer->get_socket().remote_endpoint(); + if (originating_peer->inbound_port != originating_peer->outbound_port) + ret_val = fc::ip::endpoint(ret_val.get_address(), originating_peer->inbound_port); + } + ret_val = message.endpoint_to_check; + return ret_val; + } + void node_impl::on_check_firewall_message(peer_connection* originating_peer, const check_firewall_message& check_firewall_message_received) { VERIFY_CORRECT_THREAD(); + const fc::ip::endpoint endpoint_to_check = get_endpoint_to_check(originating_peer, check_firewall_message_received ); + if (check_firewall_message_received.node_id == node_id_t() && - check_firewall_message_received.endpoint_to_check == fc::ip::endpoint()) - { - // originating_peer is asking us to test whether it is firewalled - // we're not going to try to connect back to the originating peer directly, - // instead, we're going to coordinate requests by asking some of our peers - // to try to connect to the originating peer, and relay the results back - wlog("Peer ${peer} wants us to check whether it is firewalled", ("peer", originating_peer->get_remote_endpoint())); - firewall_check_state_data* firewall_check_state = new firewall_check_state_data; - // if they are using the same inbound and outbound port, try connecting to their outbound endpoint. - // if they are using a different inbound port, use their outbound address but the inbound port they reported - fc::ip::endpoint endpoint_to_check = originating_peer->get_socket().remote_endpoint(); - if (originating_peer->inbound_port != originating_peer->outbound_port) - endpoint_to_check = fc::ip::endpoint(endpoint_to_check.get_address(), originating_peer->inbound_port); - firewall_check_state->endpoint_to_test = endpoint_to_check; - firewall_check_state->expected_node_id = originating_peer->node_id; - firewall_check_state->requesting_peer = originating_peer->node_id; - - forward_firewall_check_to_next_available_peer(firewall_check_state); + check_firewall_message_received.endpoint_to_check == fc::ip::endpoint()) + { + // originating_peer is asking us to test whether it is firewalled + // do not bother if this peer should not be advertised. + if ( _address_builder != nullptr + && !_address_builder->should_advertise( endpoint_to_check )) + return; + // we're not going to try to connect back to the originating peer directly, + // instead, we're going to coordinate requests by asking some of our peers + // to try to connect to the originating peer, and relay the results back + wlog("Peer ${peer} wants us to check whether it is firewalled", ("peer", originating_peer->get_remote_endpoint())); + firewall_check_state_data* firewall_check_state = new firewall_check_state_data; + firewall_check_state->endpoint_to_test = endpoint_to_check; + firewall_check_state->expected_node_id = originating_peer->node_id; + firewall_check_state->requesting_peer = originating_peer->node_id; + + forward_firewall_check_to_next_available_peer(firewall_check_state); } else { @@ -3225,11 +3267,11 @@ namespace graphene { namespace net { namespace detail { // can't perform the test if ( !_node_configuration.connect_to_new_peers || ( is_already_connected_to_id(check_firewall_message_received.node_id) || - is_connection_to_endpoint_in_progress(check_firewall_message_received.endpoint_to_check ))) + is_connection_to_endpoint_in_progress(endpoint_to_check ))) { check_firewall_reply_message reply; reply.node_id = check_firewall_message_received.node_id; - reply.endpoint_checked = check_firewall_message_received.endpoint_to_check; + reply.endpoint_checked = endpoint_to_check; reply.result = firewall_check_result::unable_to_check; } else @@ -3238,10 +3280,10 @@ namespace graphene { namespace net { namespace detail { // to test. peer_connection_ptr peer_for_testing(peer_connection::make_shared(this)); peer_for_testing->firewall_check_state = new firewall_check_state_data; - peer_for_testing->firewall_check_state->endpoint_to_test = check_firewall_message_received.endpoint_to_check; + peer_for_testing->firewall_check_state->endpoint_to_test = endpoint_to_check; peer_for_testing->firewall_check_state->expected_node_id = check_firewall_message_received.node_id; peer_for_testing->firewall_check_state->requesting_peer = originating_peer->node_id; - peer_for_testing->set_remote_endpoint(check_firewall_message_received.endpoint_to_check); + peer_for_testing->set_remote_endpoint(endpoint_to_check); initiate_connect_to(peer_for_testing); } } diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index fafb39f268..fd61726346 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -254,6 +254,7 @@ class node_impl : public peer_connection_delegate { public: virtual void build( node_impl* impl, address_message& ) = 0; + virtual bool should_advertise(const fc::ip::endpoint& in ) = 0; protected: address_info update_address_record( node_impl* impl, const peer_connection_ptr& active_peer); }; @@ -623,6 +624,8 @@ class node_impl : public peer_connection_delegate bool is_hard_fork_block(uint32_t block_number) const; uint32_t get_next_known_hard_fork_block_number(uint32_t block_number) const; + fc::ip::endpoint get_endpoint_to_check( peer_connection* originating_peer, + const check_firewall_message& message ); }; // end class node_impl }}} // end of namespace graphene::net::detail From 833c60d979fbb5983f6d57ea8af16f64f5c75e7b Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 5 Aug 2019 13:53:53 -0500 Subject: [PATCH 022/338] refactor advertise_list, return unable_to_check --- .../include/graphene/net/core_messages.hpp | 13 ++ libraries/net/node.cpp | 136 ++++++++++-------- 2 files changed, 87 insertions(+), 62 deletions(-) diff --git a/libraries/net/include/graphene/net/core_messages.hpp b/libraries/net/include/graphene/net/core_messages.hpp index 88d563e683..b8b9115671 100644 --- a/libraries/net/include/graphene/net/core_messages.hpp +++ b/libraries/net/include/graphene/net/core_messages.hpp @@ -293,6 +293,19 @@ namespace graphene { namespace net { direction(direction), firewalled(firewalled) {} + inline bool operator==(const fc::ip::endpoint& in) + { + return in == remote_endpoint; + } + inline bool operator==(const address_info& in) + { + return remote_endpoint == in.remote_endpoint && + last_seen_time == in.last_seen_time && + latency == in.latency && + node_id == in.node_id && + direction == in.direction && + firewalled == in.firewalled; + } }; struct address_message diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index e9747532ab..59866c1309 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -198,27 +198,25 @@ namespace graphene { namespace net { namespace detail { advertise_list.reserve( address_list->size() ); std::for_each( address_list->begin(), address_list->end(), [&list = advertise_list]( std::string str ) { - // filter out duplicates - if ( list.find(str) == list.end() ) + // ignore fc exceptions (like poorly formatted endpoints) + try { - // ignore fc exceptions (like poorly formatted endpoints) - try - { - graphene::net::address_info tmp( - fc::ip::endpoint::from_string(str), - fc::time_point_sec(), - fc::microseconds(0), - node_id_t(), - peer_connection_direction::unknown, - firewalled_state::unknown ); - list[str] = tmp; - } - catch(const fc::exception& ) - { - wlog( "Address ${addr} invalid.", ("addr", str) ); - } + graphene::net::address_info tmp( + fc::ip::endpoint::from_string(str), + fc::time_point_sec(), + fc::microseconds(0), + node_id_t(), + peer_connection_direction::unknown, + firewalled_state::unknown ); + if ( std::find( list.begin(), list.end(), tmp ) == list.end() ) + list.emplace_back(tmp); } + catch(const fc::exception& ) + { + wlog( "Address ${addr} invalid.", ("addr", str) ); + } } ); + advertise_list.shrink_to_fit(); } void build(node_impl* impl, address_message& reply) @@ -228,24 +226,22 @@ namespace graphene { namespace net { namespace detail { for(auto& it : advertise_list) { graphene::net::peer_connection_ptr peer_conn - = impl->get_active_connection_for_endpoint(it.second.remote_endpoint); + = impl->get_active_connection_for_endpoint( it.remote_endpoint ); if ( peer_conn != peer_connection_ptr() ) - ret_val.push_back(it.second); + ret_val.push_back( it ); } reply.addresses = ret_val; } bool should_advertise( const fc::ip::endpoint& in ) { - for (auto& it : advertise_list ) - { - if ( in == it.second.remote_endpoint ) - return true; - } - return false; + if (std::find(advertise_list.begin(), advertise_list.end(), in) + == advertise_list.end()) + return false; + return true; } private: - std::unordered_map advertise_list; + std::vector advertise_list; }; /**** @@ -3179,30 +3175,33 @@ namespace graphene { namespace net { namespace detail { void node_impl::forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state) { - for (const peer_connection_ptr& peer : _active_connections) + // if we aren't advertising anyone, don't bother with the loop + if (_address_builder != nullptr ) { - if (firewall_check_state->expected_node_id != peer->node_id && // it's not the node who is asking us to test - !peer->firewall_check_state && // the peer isn't already performing a check for another node - _address_builder != nullptr && - peer->get_remote_endpoint().valid() && - _address_builder->should_advertise(*peer->get_remote_endpoint()) && // can advertise who is about to be asked - firewall_check_state->nodes_already_tested.find(peer->node_id) - == firewall_check_state->nodes_already_tested.end() && // we haven't already asked - peer->core_protocol_version >= 106) - { - wlog("forwarding firewall check for node ${to_check} to peer ${checker}", - ("to_check", firewall_check_state->endpoint_to_test) - ("checker", peer->get_remote_endpoint())); - firewall_check_state->nodes_already_tested.insert(peer->node_id); - peer->firewall_check_state = firewall_check_state; - check_firewall_message check_request; - check_request.endpoint_to_check = firewall_check_state->endpoint_to_test; - check_request.node_id = firewall_check_state->expected_node_id; - peer->send_message(check_request); - return; - } + for (const peer_connection_ptr& peer : _active_connections) + { + if (firewall_check_state->expected_node_id != peer->node_id && // it's not the node who is asking us to test + !peer->firewall_check_state && // the peer isn't already performing a check for another node + peer->get_remote_endpoint().valid() && + _address_builder->should_advertise(*peer->get_remote_endpoint()) && // can adv. who is about to be asked + firewall_check_state->nodes_already_tested.find(peer->node_id) + == firewall_check_state->nodes_already_tested.end() && // we haven't already asked + peer->core_protocol_version >= 106) + { + ilog("forwarding firewall check for node ${to_check} to peer ${checker}", + ("to_check", firewall_check_state->endpoint_to_test) + ("checker", peer->get_remote_endpoint())); + firewall_check_state->nodes_already_tested.insert(peer->node_id); + peer->firewall_check_state = firewall_check_state; + check_firewall_message check_request; + check_request.endpoint_to_check = firewall_check_state->endpoint_to_test; + check_request.node_id = firewall_check_state->expected_node_id; + peer->send_message(check_request); + return; + } + } } - wlog("Unable to forward firewall check for node ${to_check} to any other peers, returning 'unable'", + ilog("Unable to forward firewall check for node ${to_check} to any other peers, returning 'unable'", ("to_check", firewall_check_state->endpoint_to_test)); peer_connection_ptr originating_peer = get_peer_by_node_id(firewall_check_state->expected_node_id); @@ -3230,7 +3229,10 @@ namespace graphene { namespace net { namespace detail { if (originating_peer->inbound_port != originating_peer->outbound_port) ret_val = fc::ip::endpoint(ret_val.get_address(), originating_peer->inbound_port); } - ret_val = message.endpoint_to_check; + else + { + ret_val = message.endpoint_to_check; + } return ret_val; } @@ -3248,17 +3250,26 @@ namespace graphene { namespace net { namespace detail { // do not bother if this peer should not be advertised. if ( _address_builder != nullptr && !_address_builder->should_advertise( endpoint_to_check )) - return; - // we're not going to try to connect back to the originating peer directly, - // instead, we're going to coordinate requests by asking some of our peers - // to try to connect to the originating peer, and relay the results back - wlog("Peer ${peer} wants us to check whether it is firewalled", ("peer", originating_peer->get_remote_endpoint())); - firewall_check_state_data* firewall_check_state = new firewall_check_state_data; - firewall_check_state->endpoint_to_test = endpoint_to_check; - firewall_check_state->expected_node_id = originating_peer->node_id; - firewall_check_state->requesting_peer = originating_peer->node_id; - - forward_firewall_check_to_next_available_peer(firewall_check_state); + { + check_firewall_reply_message reply; + reply.node_id = check_firewall_message_received.node_id; + reply.endpoint_checked = endpoint_to_check; + reply.result = firewall_check_result::unable_to_check; + originating_peer->send_message(reply); + } + else + { + // we're not going to try to connect back to the originating peer directly, + // instead, we're going to coordinate requests by asking some of our peers + // to try to connect to the originating peer, and relay the results back + ilog("Peer ${peer} wants us to check whether it is firewalled", ("peer", originating_peer->get_remote_endpoint())); + firewall_check_state_data* firewall_check_state = new firewall_check_state_data; + firewall_check_state->endpoint_to_test = endpoint_to_check; + firewall_check_state->expected_node_id = originating_peer->node_id; + firewall_check_state->requesting_peer = originating_peer->node_id; + + forward_firewall_check_to_next_available_peer(firewall_check_state); + } } else { @@ -3267,12 +3278,13 @@ namespace graphene { namespace net { namespace detail { // can't perform the test if ( !_node_configuration.connect_to_new_peers || ( is_already_connected_to_id(check_firewall_message_received.node_id) || - is_connection_to_endpoint_in_progress(endpoint_to_check ))) + is_connection_to_endpoint_in_progress( endpoint_to_check ))) { check_firewall_reply_message reply; reply.node_id = check_firewall_message_received.node_id; reply.endpoint_checked = endpoint_to_check; reply.result = firewall_check_result::unable_to_check; + originating_peer->send_message(reply); } else { From 3f6408b363f412cc61346f1228d79e65956f5680 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 5 Aug 2019 15:08:25 -0500 Subject: [PATCH 023/338] search set by endpoint --- .../include/graphene/net/core_messages.hpp | 30 +++++++++++-------- libraries/net/node.cpp | 28 ++++++++--------- 2 files changed, 31 insertions(+), 27 deletions(-) diff --git a/libraries/net/include/graphene/net/core_messages.hpp b/libraries/net/include/graphene/net/core_messages.hpp index b8b9115671..5e71958851 100644 --- a/libraries/net/include/graphene/net/core_messages.hpp +++ b/libraries/net/include/graphene/net/core_messages.hpp @@ -293,21 +293,25 @@ namespace graphene { namespace net { direction(direction), firewalled(firewalled) {} - inline bool operator==(const fc::ip::endpoint& in) - { - return in == remote_endpoint; - } - inline bool operator==(const address_info& in) - { - return remote_endpoint == in.remote_endpoint && - last_seen_time == in.last_seen_time && - latency == in.latency && - node_id == in.node_id && - direction == in.direction && - firewalled == in.firewalled; - } }; + struct address_endpoint_comparator + { + using is_transparent = void; + bool operator()(const address_info& lhs, const address_info& rhs) const + { + return lhs.remote_endpoint < rhs.remote_endpoint; + } + bool operator()(const fc::ip::endpoint in, const address_info& addr) const + { + return in < addr.remote_endpoint; + } + bool operator()(const address_info& addr, const fc::ip::endpoint& in) const + { + return addr.remote_endpoint < in; + } + }; + struct address_message { static const core_message_type_enum type; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 59866c1309..eb41328c19 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -195,28 +195,29 @@ namespace graphene { namespace net { namespace detail { { FC_ASSERT( address_list.valid(), "advertise-peer-list must be included" ); - advertise_list.reserve( address_list->size() ); std::for_each( address_list->begin(), address_list->end(), [&list = advertise_list]( std::string str ) { // ignore fc exceptions (like poorly formatted endpoints) try { - graphene::net::address_info tmp( - fc::ip::endpoint::from_string(str), - fc::time_point_sec(), - fc::microseconds(0), - node_id_t(), - peer_connection_direction::unknown, - firewalled_state::unknown ); - if ( std::find( list.begin(), list.end(), tmp ) == list.end() ) - list.emplace_back(tmp); + fc::ip::endpoint ep = fc::ip::endpoint::from_string(str); + if (list.find(ep) == list.end() ) + { + graphene::net::address_info tmp( + ep, + fc::time_point_sec(), + fc::microseconds(0), + node_id_t(), + peer_connection_direction::unknown, + firewalled_state::unknown ); + list.insert(tmp); + } } catch(const fc::exception& ) { wlog( "Address ${addr} invalid.", ("addr", str) ); } } ); - advertise_list.shrink_to_fit(); } void build(node_impl* impl, address_message& reply) @@ -235,13 +236,12 @@ namespace graphene { namespace net { namespace detail { bool should_advertise( const fc::ip::endpoint& in ) { - if (std::find(advertise_list.begin(), advertise_list.end(), in) - == advertise_list.end()) + if (advertise_list.find(in) == advertise_list.end()) return false; return true; } private: - std::vector advertise_list; + std::set advertise_list; }; /**** From 19ac1dadb3964117946b1bf1b03d5e56353637e8 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 5 Aug 2019 16:01:23 -0500 Subject: [PATCH 024/338] Improve readability --- libraries/net/node.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index eb41328c19..2f53f619de 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3278,7 +3278,7 @@ namespace graphene { namespace net { namespace detail { // can't perform the test if ( !_node_configuration.connect_to_new_peers || ( is_already_connected_to_id(check_firewall_message_received.node_id) || - is_connection_to_endpoint_in_progress( endpoint_to_check ))) + is_connection_to_endpoint_in_progress( check_firewall_message_received.endpoint_to_check ))) { check_firewall_reply_message reply; reply.node_id = check_firewall_message_received.node_id; @@ -3292,10 +3292,10 @@ namespace graphene { namespace net { namespace detail { // to test. peer_connection_ptr peer_for_testing(peer_connection::make_shared(this)); peer_for_testing->firewall_check_state = new firewall_check_state_data; - peer_for_testing->firewall_check_state->endpoint_to_test = endpoint_to_check; + peer_for_testing->firewall_check_state->endpoint_to_test = check_firewall_message_received.endpoint_to_check; peer_for_testing->firewall_check_state->expected_node_id = check_firewall_message_received.node_id; peer_for_testing->firewall_check_state->requesting_peer = originating_peer->node_id; - peer_for_testing->set_remote_endpoint(endpoint_to_check); + peer_for_testing->set_remote_endpoint( check_firewall_message_received.endpoint_to_check); initiate_connect_to(peer_for_testing); } } From 59cb73f1f5970442ef79582f0810f9f6d749ef13 Mon Sep 17 00:00:00 2001 From: John Jones Date: Wed, 7 Aug 2019 06:54:22 -0500 Subject: [PATCH 025/338] Prevent connections when unwanted Do not create new connections during firewall check when connect_to_new_peers, do not advertise peers during get_current_connections --- libraries/net/node.cpp | 130 +++++++++++++++++++++++------------------ 1 file changed, 73 insertions(+), 57 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 2f53f619de..55f1c947ca 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3273,31 +3273,39 @@ namespace graphene { namespace net { namespace detail { } else { - // we're being asked to check another node - // first, find out if we're currently connected to that node. If we are, we - // can't perform the test - if ( !_node_configuration.connect_to_new_peers || - ( is_already_connected_to_id(check_firewall_message_received.node_id) || - is_connection_to_endpoint_in_progress( check_firewall_message_received.endpoint_to_check ))) - { - check_firewall_reply_message reply; - reply.node_id = check_firewall_message_received.node_id; - reply.endpoint_checked = endpoint_to_check; - reply.result = firewall_check_result::unable_to_check; - originating_peer->send_message(reply); - } - else - { - // we're not connected to them, so we need to set up a connection to them - // to test. - peer_connection_ptr peer_for_testing(peer_connection::make_shared(this)); - peer_for_testing->firewall_check_state = new firewall_check_state_data; - peer_for_testing->firewall_check_state->endpoint_to_test = check_firewall_message_received.endpoint_to_check; - peer_for_testing->firewall_check_state->expected_node_id = check_firewall_message_received.node_id; - peer_for_testing->firewall_check_state->requesting_peer = originating_peer->node_id; - peer_for_testing->set_remote_endpoint( check_firewall_message_received.endpoint_to_check); - initiate_connect_to(peer_for_testing); - } + // we're being asked to check another node + // first, find out if we're currently connected to that node. If we are, we + // can't perform the test + if ( !_node_configuration.connect_to_new_peers || + ( is_already_connected_to_id(check_firewall_message_received.node_id) || + is_connection_to_endpoint_in_progress( check_firewall_message_received.endpoint_to_check ))) + { + check_firewall_reply_message reply; + reply.node_id = check_firewall_message_received.node_id; + reply.endpoint_checked = endpoint_to_check; + reply.result = firewall_check_result::unable_to_check; + originating_peer->send_message(reply); + } + else + { + if ( !_node_configuration.connect_to_new_peers ) + { + check_firewall_reply_message reply; + reply.node_id = check_firewall_message_received.node_id; + reply.endpoint_checked = endpoint_to_check; + reply.result = firewall_check_result::unable_to_check; + originating_peer->send_message(reply); + } + // we're not connected to them, so we need to set up a connection to them + // to test. + peer_connection_ptr peer_for_testing(peer_connection::make_shared(this)); + peer_for_testing->firewall_check_state = new firewall_check_state_data; + peer_for_testing->firewall_check_state->endpoint_to_test = check_firewall_message_received.endpoint_to_check; + peer_for_testing->firewall_check_state->expected_node_id = check_firewall_message_received.node_id; + peer_for_testing->firewall_check_state->requesting_peer = originating_peer->node_id; + peer_for_testing->set_remote_endpoint( check_firewall_message_received.endpoint_to_check); + initiate_connect_to(peer_for_testing); + } } } @@ -3405,40 +3413,48 @@ namespace graphene { namespace net { namespace detail { } fc::time_point now = fc::time_point::now(); - for (const peer_connection_ptr& peer : _active_connections) + if ( _address_builder != nullptr ) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - - current_connection_data data_for_this_peer; - data_for_this_peer.connection_duration = now.sec_since_epoch() - peer->connection_initiation_time.sec_since_epoch(); - if (peer->get_remote_endpoint()) // should always be set for anyone we're actively connected to - data_for_this_peer.remote_endpoint = *peer->get_remote_endpoint(); - data_for_this_peer.clock_offset = peer->clock_offset; - data_for_this_peer.round_trip_delay = peer->round_trip_delay; - data_for_this_peer.node_id = peer->node_id; - data_for_this_peer.connection_direction = peer->direction; - data_for_this_peer.firewalled = peer->is_firewalled; - fc::mutable_variant_object user_data; - if (peer->graphene_git_revision_sha) - user_data["graphene_git_revision_sha"] = *peer->graphene_git_revision_sha; - if (peer->graphene_git_revision_unix_timestamp) - user_data["graphene_git_revision_unix_timestamp"] = *peer->graphene_git_revision_unix_timestamp; - if (peer->fc_git_revision_sha) - user_data["fc_git_revision_sha"] = *peer->fc_git_revision_sha; - if (peer->fc_git_revision_unix_timestamp) - user_data["fc_git_revision_unix_timestamp"] = *peer->fc_git_revision_unix_timestamp; - if (peer->platform) - user_data["platform"] = *peer->platform; - if (peer->bitness) - user_data["bitness"] = *peer->bitness; - user_data["user_agent"] = peer->user_agent; - - user_data["last_known_block_hash"] = fc::variant( peer->last_block_delegate_has_seen, 1 ); - user_data["last_known_block_number"] = _delegate->get_block_number(peer->last_block_delegate_has_seen); - user_data["last_known_block_time"] = peer->last_block_time_delegate_has_seen; + for (const peer_connection_ptr& peer : _active_connections) + { + ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - data_for_this_peer.user_data = user_data; - reply.current_connections.emplace_back(data_for_this_peer); + if ( peer->get_remote_endpoint().valid() && + _address_builder->should_advertise( *peer->get_remote_endpoint() ) ) + { + current_connection_data data_for_this_peer; + data_for_this_peer.connection_duration = + now.sec_since_epoch() - peer->connection_initiation_time.sec_since_epoch(); + if (peer->get_remote_endpoint()) // should always be set for anyone we're actively connected to + data_for_this_peer.remote_endpoint = *peer->get_remote_endpoint(); + data_for_this_peer.clock_offset = peer->clock_offset; + data_for_this_peer.round_trip_delay = peer->round_trip_delay; + data_for_this_peer.node_id = peer->node_id; + data_for_this_peer.connection_direction = peer->direction; + data_for_this_peer.firewalled = peer->is_firewalled; + fc::mutable_variant_object user_data; + if (peer->graphene_git_revision_sha) + user_data["graphene_git_revision_sha"] = *peer->graphene_git_revision_sha; + if (peer->graphene_git_revision_unix_timestamp) + user_data["graphene_git_revision_unix_timestamp"] = *peer->graphene_git_revision_unix_timestamp; + if (peer->fc_git_revision_sha) + user_data["fc_git_revision_sha"] = *peer->fc_git_revision_sha; + if (peer->fc_git_revision_unix_timestamp) + user_data["fc_git_revision_unix_timestamp"] = *peer->fc_git_revision_unix_timestamp; + if (peer->platform) + user_data["platform"] = *peer->platform; + if (peer->bitness) + user_data["bitness"] = *peer->bitness; + user_data["user_agent"] = peer->user_agent; + + user_data["last_known_block_hash"] = fc::variant( peer->last_block_delegate_has_seen, 1 ); + user_data["last_known_block_number"] = _delegate->get_block_number(peer->last_block_delegate_has_seen); + user_data["last_known_block_time"] = peer->last_block_time_delegate_has_seen; + + data_for_this_peer.user_data = user_data; + reply.current_connections.emplace_back(data_for_this_peer); + } + } } originating_peer->send_message(reply); } From 12991027711d13b17a0465808fd3d6df43ecf8dc Mon Sep 17 00:00:00 2001 From: John Jones Date: Fri, 9 Aug 2019 14:43:41 -0500 Subject: [PATCH 026/338] do not continue once unable_to_check is sent --- libraries/net/node.cpp | 29 ++++++++++++++--------------- libraries/net/node_impl.hxx | 1 + 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 55f1c947ca..6e30438b8e 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3236,6 +3236,16 @@ namespace graphene { namespace net { namespace detail { return ret_val; } + void node_impl::send_unable_to_check(peer_connection* peer, const node_id_t& node_id, + const fc::ip::endpoint& endpoint ) + { + check_firewall_reply_message reply; + reply.node_id = node_id; + reply.endpoint_checked = endpoint; + reply.result = firewall_check_result::unable_to_check; + peer->send_message(reply); + } + void node_impl::on_check_firewall_message(peer_connection* originating_peer, const check_firewall_message& check_firewall_message_received) { @@ -3251,11 +3261,7 @@ namespace graphene { namespace net { namespace detail { if ( _address_builder != nullptr && !_address_builder->should_advertise( endpoint_to_check )) { - check_firewall_reply_message reply; - reply.node_id = check_firewall_message_received.node_id; - reply.endpoint_checked = endpoint_to_check; - reply.result = firewall_check_result::unable_to_check; - originating_peer->send_message(reply); + send_unable_to_check( originating_peer, check_firewall_message_received.node_id, endpoint_to_check ); } else { @@ -3280,21 +3286,14 @@ namespace graphene { namespace net { namespace detail { ( is_already_connected_to_id(check_firewall_message_received.node_id) || is_connection_to_endpoint_in_progress( check_firewall_message_received.endpoint_to_check ))) { - check_firewall_reply_message reply; - reply.node_id = check_firewall_message_received.node_id; - reply.endpoint_checked = endpoint_to_check; - reply.result = firewall_check_result::unable_to_check; - originating_peer->send_message(reply); + send_unable_to_check( originating_peer, check_firewall_message_received.node_id, endpoint_to_check ); } else { if ( !_node_configuration.connect_to_new_peers ) { - check_firewall_reply_message reply; - reply.node_id = check_firewall_message_received.node_id; - reply.endpoint_checked = endpoint_to_check; - reply.result = firewall_check_result::unable_to_check; - originating_peer->send_message(reply); + send_unable_to_check( originating_peer, check_firewall_message_received.node_id, endpoint_to_check ); + return; } // we're not connected to them, so we need to set up a connection to them // to test. diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index fd61726346..206f9c96d5 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -517,6 +517,7 @@ class node_impl : public peer_connection_delegate void forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state); + void send_unable_to_check( peer_connection* peer, const node_id_t& node_id, const fc::ip::endpoint& endpoint ); void on_check_firewall_message(peer_connection* originating_peer, const check_firewall_message& check_firewall_message_received); From 5c03d20970b03a356b0a4cf36bb3115ee9446e1d Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 12 Aug 2019 08:08:44 -0500 Subject: [PATCH 027/338] Add test for firwall check changes --- tests/tests/p2p_node_tests.cpp | 65 +++++++++++++++++++++++++++------- 1 file changed, 52 insertions(+), 13 deletions(-) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index af24433909..6b23240281 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -191,16 +191,28 @@ class test_peer : public graphene::net::peer_connection void send_message( const graphene::net::message& message_to_send, size_t message_send_time_field_offset = (size_t)-1 ) override { + message_received = nullptr; try { - // make a copy - graphene::net::address_message m = message_to_send.as(); - std::shared_ptr msg_ptr = std::make_shared(m); - // store it for later - message_received = msg_ptr; - return; + switch ( message_to_send.msg_type.value() ) + { + case graphene::net::core_message_type_enum::address_message_type : + { + graphene::net::address_message m = message_to_send.as(); + message_received = std::make_shared(m); + break; + } + case graphene::net::core_message_type_enum::check_firewall_reply_message_type : + { + graphene::net::check_firewall_reply_message m = message_to_send.as(); + message_received = std::make_shared(m); + break; + } + default: + break; + } } catch (...) {} - message_received = nullptr; + } }; @@ -218,6 +230,23 @@ void test_address_message( std::shared_ptr msg, std::siz } } +void test_firewall_message( std::shared_ptr msg ) +{ + if (msg != nullptr) + { + graphene::net::check_firewall_reply_message fw_msg = + static_cast( + msg->as() ); + if (fw_msg.result != graphene::net::firewall_check_result::unable_to_check ) + BOOST_FAIL( "Expected \"Unable to check\"" ); + } + else + { + BOOST_FAIL( "firewall_message was null" ); + } + +} + BOOST_AUTO_TEST_SUITE( p2p_node_tests ) /**** @@ -259,15 +288,25 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) std::pair, graphene::net::peer_connection_ptr> node2_rslts = node1.create_peer_connection( "127.0.0.1:8090" ); - // verify that they do not share it with others test_delegate peer3_delegate{}; std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); - graphene::net::address_request_message req; - node1.on_message( peer3_ptr, req ); + // verify that they do not share it with others + { + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); + // check the results + std::shared_ptr msg = peer3_ptr->message_received; + test_address_message( msg, 0 ); + } - // check the results - std::shared_ptr msg = peer3_ptr->message_received; - test_address_message( msg, 0 ); + // attempt a firewall check, which should return "unable_to_check" + { + graphene::net::check_firewall_message req; + req.endpoint_to_check = fc::ip::endpoint::from_string("127.0.0.1:8080"); + node1.on_message( peer3_ptr, req ); + std::shared_ptr msg = peer3_ptr->message_received; + test_firewall_message( msg ); + } } BOOST_AUTO_TEST_CASE( advertise_list_test ) From 1d67a968a8350a339e2d397b6382500e861b293b Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 19 Aug 2019 07:18:12 -0500 Subject: [PATCH 028/338] Move info firewall msgs from warning to info --- libraries/net/node.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 6e30438b8e..0253a5f034 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1627,7 +1627,7 @@ namespace graphene { namespace net { namespace detail { _last_firewall_check_message_sent < now - fc::minutes(5) && originating_peer->core_protocol_version >= 106) { - wlog("I don't know if I'm firewalled. Sending a firewall check message to peer ${peer}", + ilog("I don't know if I'm firewalled. Sending a firewall check message to peer ${peer}", ("peer", originating_peer->get_remote_endpoint())); originating_peer->firewall_check_state = new firewall_check_state_data; @@ -3320,7 +3320,7 @@ namespace graphene { namespace net { namespace detail { // and they're reporting back // if they got a result, return it to the original peer. if they were unable to check, // we'll try another peer. - wlog("Peer ${reporter} reports firewall check status ${status} for ${peer}", + ilog("Peer ${reporter} reports firewall check status ${status} for ${peer}", ("reporter", originating_peer->get_remote_endpoint()) ("status", check_firewall_reply_message_received.result) ("peer", check_firewall_reply_message_received.endpoint_checked)); @@ -3364,7 +3364,7 @@ namespace graphene { namespace net { namespace detail { else if (originating_peer->firewall_check_state) { // this is a reply to a firewall check we initiated. - wlog("Firewall check we initiated has returned with result: ${result}, endpoint = ${endpoint}", + ilog("Firewall check we initiated has returned with result: ${result}, endpoint = ${endpoint}", ("result", check_firewall_reply_message_received.result) ("endpoint", check_firewall_reply_message_received.endpoint_checked)); if (check_firewall_reply_message_received.result == firewall_check_result::connection_successful) @@ -3972,7 +3972,7 @@ namespace graphene { namespace net { namespace detail { } else { - wlog("connecting to peer ${peer} for firewall check", ("peer", new_peer->get_remote_endpoint())); + ilog("connecting to peer ${peer} for firewall check", ("peer", new_peer->get_remote_endpoint())); } fc::oexception connect_failed_exception; @@ -4022,7 +4022,7 @@ namespace graphene { namespace net { namespace detail { reply.result = connect_failed_exception ? firewall_check_result::unable_to_connect : firewall_check_result::connection_successful; - wlog("firewall check of ${peer_checked} ${success_or_failure}, sending reply to ${requester}", + ilog("firewall check of ${peer_checked} ${success_or_failure}, sending reply to ${requester}", ("peer_checked", new_peer->get_remote_endpoint()) ("success_or_failure", connect_failed_exception ? "failed" : "succeeded" ) ("requester", requesting_peer->get_remote_endpoint())); From f7bc15368630437775398c2ce45358b257abdb06 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 19 Aug 2019 07:26:27 -0500 Subject: [PATCH 029/338] remove unnecessary validity check --- libraries/net/node.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 0253a5f034..463fa88caa 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3182,7 +3182,6 @@ namespace graphene { namespace net { namespace detail { { if (firewall_check_state->expected_node_id != peer->node_id && // it's not the node who is asking us to test !peer->firewall_check_state && // the peer isn't already performing a check for another node - peer->get_remote_endpoint().valid() && _address_builder->should_advertise(*peer->get_remote_endpoint()) && // can adv. who is about to be asked firewall_check_state->nodes_already_tested.find(peer->node_id) == firewall_check_state->nodes_already_tested.end() && // we haven't already asked @@ -3418,8 +3417,7 @@ namespace graphene { namespace net { namespace detail { { ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - if ( peer->get_remote_endpoint().valid() && - _address_builder->should_advertise( *peer->get_remote_endpoint() ) ) + if ( _address_builder->should_advertise( *peer->get_remote_endpoint() ) ) { current_connection_data data_for_this_peer; data_for_this_peer.connection_duration = From b09e222df47e0ef1b063491172daa666f7e06912 Mon Sep 17 00:00:00 2001 From: John Jones Date: Sun, 25 Aug 2019 19:31:07 -0500 Subject: [PATCH 030/338] add missing include --- tests/tests/p2p_node_tests.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 6b23240281..cb6c45c255 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -29,6 +29,7 @@ #include #include +#include #include #include From 4dcb4b2e263587261178563f2e059e71cb3895c1 Mon Sep 17 00:00:00 2001 From: John Jones Date: Mon, 26 Aug 2019 06:14:12 -0500 Subject: [PATCH 031/338] fix poor merge from rebase --- libraries/app/application.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index fe5842c4a0..5650517246 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -1056,7 +1056,6 @@ void application::set_program_options(boost::program_options::options_descriptio "For database_api_impl::get_withdraw_permissions_by_giver to set max limit value") ("api-limit-get-withdraw-permissions-by-recipient",boost::program_options::value()->default_value(101), "For database_api_impl::get_withdraw_permissions_by_recipient to set max limit value") - "For database_api_impl::get_order_book to set its default limit value as 50") ("accept-incoming-connections", bpo::value()->implicit_value(true), "Accept incoming connections") ("connect-to-new-peers", bpo::value()->implicit_value(true), "Connect to new peers") ("advertise-peer-algorithm", bpo::value()->implicit_value("all"), From b900a77f6de4162e635180608ede254e69227893 Mon Sep 17 00:00:00 2001 From: John Jones Date: Fri, 30 Aug 2019 09:43:04 -0500 Subject: [PATCH 032/338] Remove firewall checks and get_current_connections --- .../include/graphene/net/peer_connection.hpp | 3 +- libraries/net/node.cpp | 265 ++---------------- libraries/net/node_impl.hxx | 3 - libraries/net/peer_connection.cpp | 1 - programs/network_mapper/network_mapper.cpp | 8 +- tests/tests/p2p_node_tests.cpp | 9 - 6 files changed, 20 insertions(+), 269 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 452bafecc0..6ea7744576 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -172,8 +172,7 @@ namespace graphene { namespace net fc::time_point connection_closed_time; fc::time_point connection_terminated_time; peer_connection_direction direction = peer_connection_direction::unknown; - //connection_state state; - firewalled_state is_firewalled = firewalled_state::unknown; + peer_connection_direction direction; fc::microseconds clock_offset; fc::microseconds round_trip_delay; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 463fa88caa..0cf5946a10 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -303,7 +303,6 @@ namespace graphene { namespace net { namespace detail { _thread(std::make_shared("p2p")), #endif // P2P_IN_DEDICATED_THREAD _delegate(nullptr), - _is_firewalled(firewalled_state::unknown), _potential_peer_database_updated(false), _sync_items_to_fetch_updated(false), _suspend_fetching_sync_blocks(false), @@ -1231,14 +1230,11 @@ namespace graphene { namespace net { namespace detail { bool new_information_received = false; for (const address_info& address : addresses) { - if (address.firewalled == graphene::net::firewalled_state::not_firewalled) - { - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); - if (address.last_seen_time > updated_peer_record.last_seen_time) - new_information_received = true; - updated_peer_record.last_seen_time = std::max(address.last_seen_time, updated_peer_record.last_seen_time); - _potential_peer_db.update_entry(updated_peer_record); - } + potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); + if (address.last_seen_time > updated_peer_record.last_seen_time) + new_information_received = true; + updated_peer_record.last_seen_time = std::max(address.last_seen_time, updated_peer_record.last_seen_time); + _potential_peer_db.update_entry(updated_peer_record); } return new_information_received; } @@ -1549,33 +1545,19 @@ namespace graphene { namespace net { namespace detail { // so add them to our database if they're not firewalled // in the hello message, the peer sent us the IP address and port it thought it was connecting from. - // If they match the IP and port we see, we assume that they're actually on the internet and they're not - // firewalled. + // If they match the IP and port we see, we assume that they're actually on the internet. fc::ip::endpoint peers_actual_outbound_endpoint = originating_peer->get_socket().remote_endpoint(); if( peers_actual_outbound_endpoint.get_address() == originating_peer->inbound_address && peers_actual_outbound_endpoint.port() == originating_peer->outbound_port ) { - if( originating_peer->inbound_port == 0 ) - { - dlog( "peer does not appear to be firewalled, but they did not give an inbound port so I'm treating them as if they are." ); - originating_peer->is_firewalled = firewalled_state::firewalled; - } - else + if( originating_peer->inbound_port != 0 ) { - // peer is not firewalled, add it to our database + // add to the peer database fc::ip::endpoint peers_inbound_endpoint(originating_peer->inbound_address, originating_peer->inbound_port); potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(peers_inbound_endpoint); _potential_peer_db.update_entry(updated_peer_record); - originating_peer->is_firewalled = firewalled_state::not_firewalled; } } - else - { - dlog("peer is firewalled: they think their outbound endpoint is ${reported_endpoint}, but I see it as ${actual_endpoint}", - ("reported_endpoint", fc::ip::endpoint(originating_peer->inbound_address, originating_peer->outbound_port)) - ("actual_endpoint", peers_actual_outbound_endpoint)); - originating_peer->is_firewalled = firewalled_state::firewalled; - } if (!is_accepting_new_connections()) { @@ -1622,18 +1604,6 @@ namespace graphene { namespace net { namespace detail { originating_peer->negotiation_status = peer_connection::connection_negotiation_status::peer_connection_accepted; originating_peer->our_state = peer_connection::our_connection_state::connection_accepted; originating_peer->send_message(address_request_message()); - fc::time_point now = fc::time_point::now(); - if (_is_firewalled == firewalled_state::unknown && - _last_firewall_check_message_sent < now - fc::minutes(5) && - originating_peer->core_protocol_version >= 106) - { - ilog("I don't know if I'm firewalled. Sending a firewall check message to peer ${peer}", - ("peer", originating_peer->get_remote_endpoint())); - originating_peer->firewall_check_state = new firewall_check_state_data; - - originating_peer->send_message(check_firewall_message()); - _last_firewall_check_message_sent = now; - } } void node_impl::on_connection_rejected_message(peer_connection* originating_peer, const connection_rejected_message& connection_rejected_message_received) @@ -1684,7 +1654,7 @@ namespace graphene { namespace net { namespace detail { } return address_info(*active_peer->get_remote_endpoint(), fc::time_point::now(), active_peer->round_trip_delay, - active_peer->node_id, active_peer->direction, active_peer->is_firewalled); + active_peer->node_id, active_peer->direction, firewalled_state::unknown); } void node_impl::on_address_request_message(peer_connection* originating_peer, const address_request_message& address_request_message_received) @@ -3248,218 +3218,28 @@ namespace graphene { namespace net { namespace detail { void node_impl::on_check_firewall_message(peer_connection* originating_peer, const check_firewall_message& check_firewall_message_received) { - VERIFY_CORRECT_THREAD(); - - const fc::ip::endpoint endpoint_to_check = get_endpoint_to_check(originating_peer, check_firewall_message_received ); - - if (check_firewall_message_received.node_id == node_id_t() && - check_firewall_message_received.endpoint_to_check == fc::ip::endpoint()) - { - // originating_peer is asking us to test whether it is firewalled - // do not bother if this peer should not be advertised. - if ( _address_builder != nullptr - && !_address_builder->should_advertise( endpoint_to_check )) - { - send_unable_to_check( originating_peer, check_firewall_message_received.node_id, endpoint_to_check ); - } - else - { - // we're not going to try to connect back to the originating peer directly, - // instead, we're going to coordinate requests by asking some of our peers - // to try to connect to the originating peer, and relay the results back - ilog("Peer ${peer} wants us to check whether it is firewalled", ("peer", originating_peer->get_remote_endpoint())); - firewall_check_state_data* firewall_check_state = new firewall_check_state_data; - firewall_check_state->endpoint_to_test = endpoint_to_check; - firewall_check_state->expected_node_id = originating_peer->node_id; - firewall_check_state->requesting_peer = originating_peer->node_id; - - forward_firewall_check_to_next_available_peer(firewall_check_state); - } - } - else - { - // we're being asked to check another node - // first, find out if we're currently connected to that node. If we are, we - // can't perform the test - if ( !_node_configuration.connect_to_new_peers || - ( is_already_connected_to_id(check_firewall_message_received.node_id) || - is_connection_to_endpoint_in_progress( check_firewall_message_received.endpoint_to_check ))) - { - send_unable_to_check( originating_peer, check_firewall_message_received.node_id, endpoint_to_check ); - } - else - { - if ( !_node_configuration.connect_to_new_peers ) - { - send_unable_to_check( originating_peer, check_firewall_message_received.node_id, endpoint_to_check ); - return; - } - // we're not connected to them, so we need to set up a connection to them - // to test. - peer_connection_ptr peer_for_testing(peer_connection::make_shared(this)); - peer_for_testing->firewall_check_state = new firewall_check_state_data; - peer_for_testing->firewall_check_state->endpoint_to_test = check_firewall_message_received.endpoint_to_check; - peer_for_testing->firewall_check_state->expected_node_id = check_firewall_message_received.node_id; - peer_for_testing->firewall_check_state->requesting_peer = originating_peer->node_id; - peer_for_testing->set_remote_endpoint( check_firewall_message_received.endpoint_to_check); - initiate_connect_to(peer_for_testing); - } - } + /* TODO: After next hardfork, remove this method and associated structures */ } void node_impl::on_check_firewall_reply_message(peer_connection* originating_peer, const check_firewall_reply_message& check_firewall_reply_message_received) { - VERIFY_CORRECT_THREAD(); - - if (originating_peer->firewall_check_state && - originating_peer->firewall_check_state->requesting_peer != node_id_t()) - { - // then this is a peer that is helping us check the firewalled state of one of our other peers - // and they're reporting back - // if they got a result, return it to the original peer. if they were unable to check, - // we'll try another peer. - ilog("Peer ${reporter} reports firewall check status ${status} for ${peer}", - ("reporter", originating_peer->get_remote_endpoint()) - ("status", check_firewall_reply_message_received.result) - ("peer", check_firewall_reply_message_received.endpoint_checked)); - - if (check_firewall_reply_message_received.result == firewall_check_result::unable_to_connect || - check_firewall_reply_message_received.result == firewall_check_result::connection_successful) - { - peer_connection_ptr original_peer = get_peer_by_node_id(originating_peer->firewall_check_state->requesting_peer); - if (original_peer) - { - if (check_firewall_reply_message_received.result == firewall_check_result::connection_successful) - { - // if we previously thought this peer was firewalled, mark them as not firewalled - if (original_peer->is_firewalled != firewalled_state::not_firewalled) - { - - original_peer->is_firewalled = firewalled_state::not_firewalled; - // there should be no old entry if we thought they were firewalled, so just create a new one - fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(*inbound_endpoint); - updated_peer_record.last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(updated_peer_record); - } - } - } - original_peer->send_message(check_firewall_reply_message_received); - } - delete originating_peer->firewall_check_state; - originating_peer->firewall_check_state = nullptr; - } - else - { - // they were unable to check for us, ask another peer - firewall_check_state_data* firewall_check_state = originating_peer->firewall_check_state; - originating_peer->firewall_check_state = nullptr; - forward_firewall_check_to_next_available_peer(firewall_check_state); - } - } - else if (originating_peer->firewall_check_state) - { - // this is a reply to a firewall check we initiated. - ilog("Firewall check we initiated has returned with result: ${result}, endpoint = ${endpoint}", - ("result", check_firewall_reply_message_received.result) - ("endpoint", check_firewall_reply_message_received.endpoint_checked)); - if (check_firewall_reply_message_received.result == firewall_check_result::connection_successful) - { - _is_firewalled = firewalled_state::not_firewalled; - _publicly_visible_listening_endpoint = check_firewall_reply_message_received.endpoint_checked; - } - else if (check_firewall_reply_message_received.result == firewall_check_result::unable_to_connect) - { - _is_firewalled = firewalled_state::firewalled; - _publicly_visible_listening_endpoint = fc::optional(); - } - delete originating_peer->firewall_check_state; - originating_peer->firewall_check_state = nullptr; - } - else - { - wlog("Received a firewall check reply to a request I never sent"); - } - + /* This should no longer be called, as we are no longer asking for a firewall check + TODO: When we are assured no one will call it (i.e. after next hardfork, remove + this method and associated structures */ } void node_impl::on_get_current_connections_request_message(peer_connection* originating_peer, const get_current_connections_request_message& get_current_connections_request_message_received) { - VERIFY_CORRECT_THREAD(); - get_current_connections_reply_message reply; - - if (!_average_network_read_speed_minutes.empty()) - { - reply.upload_rate_one_minute = _average_network_write_speed_minutes.back(); - reply.download_rate_one_minute = _average_network_read_speed_minutes.back(); - - size_t minutes_to_average = std::min(_average_network_write_speed_minutes.size(), (size_t)15); - boost::circular_buffer::iterator start_iter = _average_network_write_speed_minutes.end() - minutes_to_average; - reply.upload_rate_fifteen_minutes = std::accumulate(start_iter, _average_network_write_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - start_iter = _average_network_read_speed_minutes.end() - minutes_to_average; - reply.download_rate_fifteen_minutes = std::accumulate(start_iter, _average_network_read_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - - minutes_to_average = std::min(_average_network_write_speed_minutes.size(), (size_t)60); - start_iter = _average_network_write_speed_minutes.end() - minutes_to_average; - reply.upload_rate_one_hour = std::accumulate(start_iter, _average_network_write_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - start_iter = _average_network_read_speed_minutes.end() - minutes_to_average; - reply.download_rate_one_hour = std::accumulate(start_iter, _average_network_read_speed_minutes.end(), 0) / (uint32_t)minutes_to_average; - } - - fc::time_point now = fc::time_point::now(); - if ( _address_builder != nullptr ) - { - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - - if ( _address_builder->should_advertise( *peer->get_remote_endpoint() ) ) - { - current_connection_data data_for_this_peer; - data_for_this_peer.connection_duration = - now.sec_since_epoch() - peer->connection_initiation_time.sec_since_epoch(); - if (peer->get_remote_endpoint()) // should always be set for anyone we're actively connected to - data_for_this_peer.remote_endpoint = *peer->get_remote_endpoint(); - data_for_this_peer.clock_offset = peer->clock_offset; - data_for_this_peer.round_trip_delay = peer->round_trip_delay; - data_for_this_peer.node_id = peer->node_id; - data_for_this_peer.connection_direction = peer->direction; - data_for_this_peer.firewalled = peer->is_firewalled; - fc::mutable_variant_object user_data; - if (peer->graphene_git_revision_sha) - user_data["graphene_git_revision_sha"] = *peer->graphene_git_revision_sha; - if (peer->graphene_git_revision_unix_timestamp) - user_data["graphene_git_revision_unix_timestamp"] = *peer->graphene_git_revision_unix_timestamp; - if (peer->fc_git_revision_sha) - user_data["fc_git_revision_sha"] = *peer->fc_git_revision_sha; - if (peer->fc_git_revision_unix_timestamp) - user_data["fc_git_revision_unix_timestamp"] = *peer->fc_git_revision_unix_timestamp; - if (peer->platform) - user_data["platform"] = *peer->platform; - if (peer->bitness) - user_data["bitness"] = *peer->bitness; - user_data["user_agent"] = peer->user_agent; - - user_data["last_known_block_hash"] = fc::variant( peer->last_block_delegate_has_seen, 1 ); - user_data["last_known_block_number"] = _delegate->get_block_number(peer->last_block_delegate_has_seen); - user_data["last_known_block_time"] = peer->last_block_time_delegate_has_seen; - - data_for_this_peer.user_data = user_data; - reply.current_connections.emplace_back(data_for_this_peer); - } - } - } - originating_peer->send_message(reply); + /* TODO: When we are sure no one will call this, remove it and its associated structures */ } void node_impl::on_get_current_connections_reply_message(peer_connection* originating_peer, const get_current_connections_reply_message& get_current_connections_reply_message_received) { - VERIFY_CORRECT_THREAD(); + /* TODO: This never did anything. It should be removed, along with its associated structures + once we are sure that nodes have been upgraded */ } @@ -3934,13 +3714,6 @@ namespace graphene { namespace net { namespace detail { fc::ip::endpoint local_endpoint(peer->get_socket().local_endpoint()); uint16_t listening_port = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint.port() : 0; - if (_is_firewalled == firewalled_state::not_firewalled && - _publicly_visible_listening_endpoint) - { - local_endpoint = *_publicly_visible_listening_endpoint; - listening_port = _publicly_visible_listening_endpoint->port(); - } - hello_message hello(_user_agent_string, core_protocol_version, local_endpoint.get_address(), @@ -3979,9 +3752,6 @@ namespace graphene { namespace net { namespace detail { { new_peer->connect_to(remote_endpoint, _actual_listening_endpoint); // blocks until the connection is established and secure connection is negotiated - // we connected to the peer. guess they're not firewalled.... - new_peer->is_firewalled = firewalled_state::not_firewalled; - // connection succeeded, we've started handshaking. record that in our database potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); updated_peer_record.last_connection_disposition = last_connection_handshaking_failed; @@ -4553,7 +4323,7 @@ namespace graphene { namespace net { namespace detail { peer_details["version"] = ""; peer_details["subver"] = peer->user_agent; peer_details["inbound"] = peer->direction == peer_connection_direction::inbound; - peer_details["firewall_status"] = fc::variant( peer->is_firewalled, 1 ); + peer_details["firewall_status"] = (fc::enum_type)firewalled_state::unknown; peer_details["startingheight"] = ""; peer_details["banscore"] = ""; peer_details["syncnode"] = ""; @@ -4760,7 +4530,6 @@ namespace graphene { namespace net { namespace detail { info["listening_on"] = _actual_listening_endpoint; info["node_public_key"] = fc::variant( _node_public_key, 1 ); info["node_id"] = fc::variant( _node_id, 1 ); - info["firewalled"] = fc::variant( _is_firewalled, 1 ); return info; } fc::variant_object node_impl::network_get_usage_stats() const diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 206f9c96d5..bd639e8de0 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -275,11 +275,8 @@ class node_impl : public peer_connection_delegate // in use. fc::ip::endpoint _actual_listening_endpoint; - /// we determine whether we're firewalled by asking other nodes. Store the result here: - firewalled_state _is_firewalled; /// if we're behind NAT, our listening endpoint address will appear different to the rest of the world. store it here. fc::optional _publicly_visible_listening_endpoint; - fc::time_point _last_firewall_check_message_sent; /// used by the task that manages connecting to peers // @{ diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index 12a0eccdb4..f2c27d194d 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -76,7 +76,6 @@ namespace graphene { namespace net _message_connection(this), _total_queued_messages_size(0), direction(peer_connection_direction::unknown), - is_firewalled(firewalled_state::unknown), our_state(our_connection_state::disconnected), they_have_requested_close(false), their_state(their_connection_state::disconnected), diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index 68b5f526f2..5b0d6cc4c7 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -241,7 +241,6 @@ int main(int argc, char** argv) { graphene::net::address_info this_node_info; this_node_info.direction = graphene::net::peer_connection_direction::outbound; - this_node_info.firewalled = graphene::net::firewalled_state::not_firewalled; this_node_info.remote_endpoint = probe->_remote; this_node_info.node_id = probe->_node_id; @@ -253,7 +252,6 @@ int main(int argc, char** argv) for (const graphene::net::address_info& info : probe->_peers) { if (nodes_already_visited.find(info.remote_endpoint) == nodes_already_visited.end() && - info.firewalled == graphene::net::firewalled_state::not_firewalled && nodes_to_visit_set.find(info.remote_endpoint) == nodes_to_visit_set.end()) { nodes_to_visit.push(info.remote_endpoint); @@ -276,8 +274,7 @@ int main(int argc, char** argv) { if (address_info_for_node.second.remote_endpoint == seed_node1) seed_node_id = address_info_for_node.first; - if (address_info_for_node.second.firewalled == graphene::net::firewalled_state::not_firewalled) - non_firewalled_nodes_set.insert(address_info_for_node.first); + non_firewalled_nodes_set.insert(address_info_for_node.first); } std::set seed_node_connections; for (const graphene::net::address_info& info : connections_by_node_id[seed_node_id]) @@ -304,8 +301,7 @@ int main(int argc, char** argv) for (const auto& address_info_for_node : address_info_by_node_id) { dot_stream << " \"" << fc::variant( address_info_for_node.first, 1 ).as_string() << "\"[label=\"" << (std::string)address_info_for_node.second.remote_endpoint << "\""; - if (address_info_for_node.second.firewalled != graphene::net::firewalled_state::not_firewalled) - dot_stream << ",shape=rectangle"; + dot_stream << ",shape=rectangle"; dot_stream << "];\n"; } for (auto& node_and_connections : connections_by_node_id) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index cb6c45c255..fc6b49067b 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -299,15 +299,6 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) std::shared_ptr msg = peer3_ptr->message_received; test_address_message( msg, 0 ); } - - // attempt a firewall check, which should return "unable_to_check" - { - graphene::net::check_firewall_message req; - req.endpoint_to_check = fc::ip::endpoint::from_string("127.0.0.1:8080"); - node1.on_message( peer3_ptr, req ); - std::shared_ptr msg = peer3_ptr->message_received; - test_firewall_message( msg ); - } } BOOST_AUTO_TEST_CASE( advertise_list_test ) From 48223b294a8df7246f6b57ca69c0c9d737d80cba Mon Sep 17 00:00:00 2001 From: John Jones Date: Thu, 10 Oct 2019 14:08:12 -0500 Subject: [PATCH 033/338] rebase to fix conflict --- libraries/net/include/graphene/net/peer_connection.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 6ea7744576..5aca149691 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -172,7 +172,6 @@ namespace graphene { namespace net fc::time_point connection_closed_time; fc::time_point connection_terminated_time; peer_connection_direction direction = peer_connection_direction::unknown; - peer_connection_direction direction; fc::microseconds clock_offset; fc::microseconds round_trip_delay; From 9c443bd93924587872b3e005cadd5f84d02b55a6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 12 Dec 2021 16:58:13 +0000 Subject: [PATCH 034/338] Update SonarScanner config for develop branch --- sonar-project.properties | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sonar-project.properties b/sonar-project.properties index 83767bec8a..9fe1699f22 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -3,7 +3,7 @@ sonar.organization=bitshares-on-github sonar.projectKey=bitshares_bitshares-core sonar.projectName=BitShares-Core sonar.projectDescription=BitShares Blockchain implementation and command-line interface -sonar.projectVersion=6.0.x +sonar.projectVersion=6.1.x sonar.host.url=https://sonarcloud.io @@ -26,4 +26,4 @@ sonar.cfamily.cache.path=sonar_cache # Decide which tree the current build belongs to in SonarCloud. # Managed by the `set_sonar_branch*` script(s) when building with CI. -sonar.branch.target=master +sonar.branch.target=develop From 9d99fce47a93269f9c811bae8a3657567c14d3a6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 17 Dec 2021 12:10:21 +0000 Subject: [PATCH 035/338] Move some code from hpp file to cpp file --- .../elasticsearch/elasticsearch_plugin.cpp | 155 ++++++++++++++++++ .../elasticsearch/elasticsearch_plugin.hpp | 155 ------------------ 2 files changed, 155 insertions(+), 155 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 3ae9009ba7..29e56fc6d6 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -245,6 +245,106 @@ void elasticsearch_plugin_impl::getOperationType(const optional op.which(); } +struct adaptor_struct { + variant adapt(const variant_object& op) + { + fc::mutable_variant_object o(op); + vector keys_to_rename; + for (auto i = o.begin(); i != o.end(); ++i) + { + auto& element = (*i).value(); + if (element.is_object()) + { + const string& name = (*i).key(); + auto& vo = element.get_object(); + if (vo.contains(name.c_str())) + keys_to_rename.emplace_back(name); + element = adapt(vo); + } + else if (element.is_array()) + adapt(element.get_array()); + } + for (const auto& i : keys_to_rename) + { + string new_name = i + "_"; + o[new_name] = variant(o[i]); + o.erase(i); + } + + if (o.find("memo") != o.end()) + { + auto& memo = o["memo"]; + if (memo.is_string()) + { + o["memo_"] = o["memo"]; + o.erase("memo"); + } + else if (memo.is_object()) + { + fc::mutable_variant_object tmp(memo.get_object()); + if (tmp.find("nonce") != tmp.end()) + { + tmp["nonce"] = tmp["nonce"].as_string(); + o["memo"] = tmp; + } + } + } + if (o.find("new_parameters") != o.end()) + { + auto& tmp = o["new_parameters"]; + if (tmp.is_object()) + { + fc::mutable_variant_object tmp2(tmp.get_object()); + if (tmp2.find("current_fees") != tmp2.end()) + { + tmp2.erase("current_fees"); + o["new_parameters"] = tmp2; + } + } + } + if (o.find("owner") != o.end() && o["owner"].is_string()) + { + o["owner_"] = o["owner"].as_string(); + o.erase("owner"); + } + + vector to_string_fields = { + "proposed_ops", + "initializer", + "policy", + "predicates", + "active_special_authority", + "owner_special_authority", + "acceptable_collateral", + "acceptable_borrowers" + }; + for( const auto& name : to_string_fields ) + { + if (o.find(name) != o.end()) + { + o[name] = fc::json::to_string(o[name]); + } + } + + variant v; + fc::to_variant(o, v, FC_PACK_MAX_DEPTH); + return v; + } + + void adapt(fc::variants& v) + { + for (auto& array_element : v) + { + if (array_element.is_object()) + array_element = adapt(array_element.get_object()); + else if (array_element.is_array()) + adapt(array_element.get_array()); + else + array_element = array_element.as_string(); + } + } +}; + void elasticsearch_plugin_impl::doOperationHistory(const optional & oho) { os.trx_in_block = oho->trx_in_block; @@ -271,6 +371,61 @@ void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_bloc bs.trx_id = trx_id; } +struct operation_visitor +{ + typedef void result_type; + + share_type fee_amount; + asset_id_type fee_asset; + + asset_id_type transfer_asset_id; + share_type transfer_amount; + account_id_type transfer_from; + account_id_type transfer_to; + + void operator()( const graphene::chain::transfer_operation& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + + transfer_asset_id = o.amount.asset_id; + transfer_amount = o.amount.amount; + transfer_from = o.from; + transfer_to = o.to; + } + + object_id_type fill_order_id; + account_id_type fill_account_id; + asset_id_type fill_pays_asset_id; + share_type fill_pays_amount; + asset_id_type fill_receives_asset_id; + share_type fill_receives_amount; + double fill_fill_price; + bool fill_is_maker; + + void operator()( const graphene::chain::fill_order_operation& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + + fill_order_id = o.order_id; + fill_account_id = o.account_id; + fill_pays_asset_id = o.pays.asset_id; + fill_pays_amount = o.pays.amount; + fill_receives_asset_id = o.receives.asset_id; + fill_receives_amount = o.receives.amount; + fill_fill_price = o.fill_price.to_real(); + fill_is_maker = o.is_maker; + } + + template + void operator()( const T& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + } +}; + void elasticsearch_plugin_impl::doVisitor(const optional & oho) { graphene::chain::database& db = database(); diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index 05e24843f0..7aca292b33 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -80,61 +80,6 @@ class elasticsearch_plugin : public graphene::app::plugin }; -struct operation_visitor -{ - typedef void result_type; - - share_type fee_amount; - asset_id_type fee_asset; - - asset_id_type transfer_asset_id; - share_type transfer_amount; - account_id_type transfer_from; - account_id_type transfer_to; - - void operator()( const graphene::chain::transfer_operation& o ) - { - fee_asset = o.fee.asset_id; - fee_amount = o.fee.amount; - - transfer_asset_id = o.amount.asset_id; - transfer_amount = o.amount.amount; - transfer_from = o.from; - transfer_to = o.to; - } - - object_id_type fill_order_id; - account_id_type fill_account_id; - asset_id_type fill_pays_asset_id; - share_type fill_pays_amount; - asset_id_type fill_receives_asset_id; - share_type fill_receives_amount; - double fill_fill_price; - bool fill_is_maker; - - void operator()( const graphene::chain::fill_order_operation& o ) - { - fee_asset = o.fee.asset_id; - fee_amount = o.fee.amount; - - fill_order_id = o.order_id; - fill_account_id = o.account_id; - fill_pays_asset_id = o.pays.asset_id; - fill_pays_amount = o.pays.amount; - fill_receives_asset_id = o.receives.asset_id; - fill_receives_amount = o.receives.amount; - fill_fill_price = o.fill_price.to_real(); - fill_is_maker = o.is_maker; - } - - template - void operator()( const T& o ) - { - fee_asset = o.fee.asset_id; - fee_amount = o.fee.amount; - } -}; - struct operation_history_struct { int trx_in_block; int op_in_trx; @@ -197,106 +142,6 @@ struct bulk_struct { optional additional_data; }; -struct adaptor_struct { - variant adapt(const variant_object& op) - { - fc::mutable_variant_object o(op); - vector keys_to_rename; - for (auto i = o.begin(); i != o.end(); ++i) - { - auto& element = (*i).value(); - if (element.is_object()) - { - const string& name = (*i).key(); - auto& vo = element.get_object(); - if (vo.contains(name.c_str())) - keys_to_rename.emplace_back(name); - element = adapt(vo); - } - else if (element.is_array()) - adapt(element.get_array()); - } - for (const auto& i : keys_to_rename) - { - string new_name = i + "_"; - o[new_name] = variant(o[i]); - o.erase(i); - } - - if (o.find("memo") != o.end()) - { - auto& memo = o["memo"]; - if (memo.is_string()) - { - o["memo_"] = o["memo"]; - o.erase("memo"); - } - else if (memo.is_object()) - { - fc::mutable_variant_object tmp(memo.get_object()); - if (tmp.find("nonce") != tmp.end()) - { - tmp["nonce"] = tmp["nonce"].as_string(); - o["memo"] = tmp; - } - } - } - if (o.find("new_parameters") != o.end()) - { - auto& tmp = o["new_parameters"]; - if (tmp.is_object()) - { - fc::mutable_variant_object tmp2(tmp.get_object()); - if (tmp2.find("current_fees") != tmp2.end()) - { - tmp2.erase("current_fees"); - o["new_parameters"] = tmp2; - } - } - } - if (o.find("owner") != o.end() && o["owner"].is_string()) - { - o["owner_"] = o["owner"].as_string(); - o.erase("owner"); - } - - vector to_string_fields = { - "proposed_ops", - "initializer", - "policy", - "predicates", - "active_special_authority", - "owner_special_authority", - "acceptable_collateral", - "acceptable_borrowers" - }; - for( const auto& name : to_string_fields ) - { - if (o.find(name) != o.end()) - { - o[name] = fc::json::to_string(o[name]); - } - } - - variant v; - fc::to_variant(o, v, FC_PACK_MAX_DEPTH); - return v; - } - - void adapt(fc::variants& v) - { - for (auto& array_element : v) - { - if (array_element.is_object()) - array_element = adapt(array_element.get_object()); - else if (array_element.is_array()) - adapt(array_element.get_array()); - else - array_element = array_element.as_string(); - } - } -}; - } } //graphene::elasticsearch FC_REFLECT_ENUM( graphene::elasticsearch::mode, (only_save)(only_query)(all) ) From 91675d7271726158d421bae21b3454b5df250e2b Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 17 Dec 2021 14:49:58 +0000 Subject: [PATCH 036/338] Wrap long lines --- .../elasticsearch/elasticsearch_plugin.hpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index 7aca292b33..fbc012acba 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -87,6 +87,7 @@ struct operation_history_struct { int virtual_op; std::string op; variant op_object; + variant operation_result_object; }; struct block_struct { @@ -145,12 +146,15 @@ struct bulk_struct { } } //graphene::elasticsearch FC_REFLECT_ENUM( graphene::elasticsearch::mode, (only_save)(only_query)(all) ) -FC_REFLECT( graphene::elasticsearch::operation_history_struct, (trx_in_block)(op_in_trx)(operation_result)(virtual_op)(op)(op_object) ) +FC_REFLECT( graphene::elasticsearch::operation_history_struct, + (trx_in_block)(op_in_trx)(operation_result)(virtual_op)(op)(op_object)(operation_result_object) ) FC_REFLECT( graphene::elasticsearch::block_struct, (block_num)(block_time)(trx_id) ) FC_REFLECT( graphene::elasticsearch::fee_struct, (asset)(asset_name)(amount)(amount_units) ) FC_REFLECT( graphene::elasticsearch::transfer_struct, (asset)(asset_name)(amount)(amount_units)(from)(to) ) -FC_REFLECT( graphene::elasticsearch::fill_struct, (order_id)(account_id)(pays_asset_id)(pays_asset_name)(pays_amount)(pays_amount_units) - (receives_asset_id)(receives_asset_name)(receives_amount)(receives_amount_units)(fill_price) - (fill_price_units)(is_maker)) +FC_REFLECT( graphene::elasticsearch::fill_struct, + (order_id)(account_id)(pays_asset_id)(pays_asset_name)(pays_amount)(pays_amount_units) + (receives_asset_id)(receives_asset_name)(receives_amount)(receives_amount_units)(fill_price) + (fill_price_units)(is_maker) ) FC_REFLECT( graphene::elasticsearch::visitor_struct, (fee_data)(transfer_data)(fill_data) ) -FC_REFLECT( graphene::elasticsearch::bulk_struct, (account_history)(operation_history)(operation_type)(operation_id_num)(block_data)(additional_data) ) +FC_REFLECT( graphene::elasticsearch::bulk_struct, + (account_history)(operation_history)(operation_type)(operation_id_num)(block_data)(additional_data) ) From 03b90574e307fc2343d1a6f212fcc9b500a25399 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 17 Dec 2021 20:28:55 +0000 Subject: [PATCH 037/338] Adapt static_variant and map for ElasticSearch --- .../elasticsearch/elasticsearch_plugin.cpp | 143 +++++++++++++----- 1 file changed, 107 insertions(+), 36 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 29e56fc6d6..5eb34f2f5d 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -245,8 +245,13 @@ void elasticsearch_plugin_impl::getOperationType(const optional op.which(); } -struct adaptor_struct { - variant adapt(const variant_object& op) +struct es_data_adaptor { + enum class data_type { + map_type, + static_variant_type, + static_variant_array_type + }; + static variant adapt(const variant_object& op) { fc::mutable_variant_object o(op); vector keys_to_rename; @@ -257,26 +262,37 @@ struct adaptor_struct { { const string& name = (*i).key(); auto& vo = element.get_object(); - if (vo.contains(name.c_str())) + if (vo.contains(name.c_str())) // transfer_operation.amount.amount keys_to_rename.emplace_back(name); element = adapt(vo); } else if (element.is_array()) adapt(element.get_array()); } - for (const auto& i : keys_to_rename) + + for( const auto& i : keys_to_rename ) // transfer_operation.amount { string new_name = i + "_"; o[new_name] = variant(o[i]); o.erase(i); } - if (o.find("memo") != o.end()) + if( o.find("fee") != o.end() ) + { + auto& fee = o["fee"]; + if( fee.is_uint64() ) // fee schedule + { + o["fee_"] = fee; + o.erase("fee"); + } + } + + if( o.find("memo") != o.end() ) { auto& memo = o["memo"]; - if (memo.is_string()) + if (memo.is_string()) // seems unused. TODO remove { - o["memo_"] = o["memo"]; + o["memo_"] = memo; o.erase("memo"); } else if (memo.is_object()) @@ -289,40 +305,32 @@ struct adaptor_struct { } } } - if (o.find("new_parameters") != o.end()) - { - auto& tmp = o["new_parameters"]; - if (tmp.is_object()) - { - fc::mutable_variant_object tmp2(tmp.get_object()); - if (tmp2.find("current_fees") != tmp2.end()) - { - tmp2.erase("current_fees"); - o["new_parameters"] = tmp2; - } - } - } - if (o.find("owner") != o.end() && o["owner"].is_string()) + + if( o.find("owner") != o.end() && o["owner"].is_string() ) // vesting_balance_*_operation.owner { o["owner_"] = o["owner"].as_string(); o.erase("owner"); } - vector to_string_fields = { - "proposed_ops", - "initializer", - "policy", - "predicates", - "active_special_authority", - "owner_special_authority", - "acceptable_collateral", - "acceptable_borrowers" + map to_string_fields = { + { "current_fees", data_type::static_variant_array_type }, + { "proposed_ops", data_type::static_variant_array_type }, + { "initializer", data_type::static_variant_type }, + { "policy", data_type::static_variant_type }, + { "predicates", data_type::static_variant_array_type }, + { "active_special_authority", data_type::static_variant_type }, + { "owner_special_authority", data_type::static_variant_type }, + { "acceptable_collateral", data_type::map_type }, + { "acceptable_borrowers", data_type::map_type } }; - for( const auto& name : to_string_fields ) + for( const auto& pair : to_string_fields ) { - if (o.find(name) != o.end()) + const auto& name = pair.first; + if( o.find(name) != o.end() ) { - o[name] = fc::json::to_string(o[name]); + const auto& value = o[name]; + o[name + "_object"] = adapt( value.get_array(), pair.second ); + o[name] = fc::json::to_string(value); } } @@ -331,7 +339,66 @@ struct adaptor_struct { return v; } - void adapt(fc::variants& v) + static variant adapt( const fc::variants& v, data_type type ) + { + if( data_type::map_type == type ) + return adapt_map(v); + if( data_type::static_variant_type == type ) + return adapt_static_variant(v); + + // static_variant array + fc::variants vs; + vs.reserve( v.size() ); + for( const auto& item : v ) + { + vs.push_back( adapt_static_variant( item.get_array() ) ); + } + + variant nv; + fc::to_variant(vs, nv, FC_PACK_MAX_DEPTH); + return nv; + } + + static void extract_data_from_variant( const variant& v, fc::mutable_variant_object& mv, const string& prefix ) + { + if( v.is_object() ) + mv[prefix + "_object"] = adapt( v.get_object() ); + else if( v.is_int64() || v.is_uint64() ) + mv[prefix + "_int"] = v; + else if( v.is_bool() ) + mv[prefix + "_bool"] = v; + else + mv[prefix + "_string"] = fc::json::to_string( v ); + // Note: we don't use double or array here, and we convert null and blob to string + } + + static variant adapt_map( const fc::variants& v ) + { + FC_ASSERT( v.size() == 2, "Internal error" ); + fc::mutable_variant_object mv; + + extract_data_from_variant( v[0], mv, "key" ); + extract_data_from_variant( v[1], mv, "data" ); + + variant nv; + fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); + return nv; + } + + static variant adapt_static_variant( const fc::variants& v ) + { + FC_ASSERT( v.size() == 2, "Internal error" ); + fc::mutable_variant_object mv; + + mv["which"] = v[0]; + extract_data_from_variant( v[1], mv, "data" ); + + variant nv; + fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); + return nv; + } + + static void adapt(fc::variants& v) { for (auto& array_element : v) { @@ -353,9 +420,13 @@ void elasticsearch_plugin_impl::doOperationHistory(const optional virtual_op; if(_elasticsearch_operation_object) { + // op oho->op.visit(fc::from_static_variant(os.op_object, FC_PACK_MAX_DEPTH)); - adaptor_struct adaptor; - os.op_object = adaptor.adapt(os.op_object.get_object()); + os.op_object = es_data_adaptor::adapt(os.op_object.get_object()); + // operation_result + variant v; + fc::to_variant( oho->result, v, FC_PACK_MAX_DEPTH ); + os.operation_result_object = es_data_adaptor::adapt_static_variant( v.get_array() ); } if(_elasticsearch_operation_string) os.op = fc::json::to_string(oho->op); From 0947fed2e907da29121f2049542d4da78d4f5c83 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 17 Dec 2021 21:47:59 +0000 Subject: [PATCH 038/338] Add logging --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 5eb34f2f5d..d4e57b02b9 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -413,7 +413,7 @@ struct es_data_adaptor { }; void elasticsearch_plugin_impl::doOperationHistory(const optional & oho) -{ +{ try { os.trx_in_block = oho->trx_in_block; os.op_in_trx = oho->op_in_trx; os.operation_result = fc::json::to_string(oho->result); @@ -430,7 +430,7 @@ void elasticsearch_plugin_impl::doOperationHistory(const optional op); -} +} FC_CAPTURE_LOG_AND_RETHROW( (oho) ) } void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_block& b) { From d03ef2def2543d96f05cdab0b30c9c45e87afb6c Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 17 Dec 2021 22:24:20 +0000 Subject: [PATCH 039/338] Fix adaption of proposed_ops for ElasticSearch --- .../elasticsearch/elasticsearch_plugin.cpp | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index d4e57b02b9..e70f8ee83b 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -249,7 +249,7 @@ struct es_data_adaptor { enum class data_type { map_type, static_variant_type, - static_variant_array_type + array_type }; static variant adapt(const variant_object& op) { @@ -313,11 +313,12 @@ struct es_data_adaptor { } map to_string_fields = { - { "current_fees", data_type::static_variant_array_type }, - { "proposed_ops", data_type::static_variant_array_type }, + { "current_fees", data_type::array_type }, + { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op + { "proposed_ops", data_type::array_type }, { "initializer", data_type::static_variant_type }, { "policy", data_type::static_variant_type }, - { "predicates", data_type::static_variant_array_type }, + { "predicates", data_type::array_type }, { "active_special_authority", data_type::static_variant_type }, { "owner_special_authority", data_type::static_variant_type }, { "acceptable_collateral", data_type::map_type }, @@ -346,12 +347,17 @@ struct es_data_adaptor { if( data_type::static_variant_type == type ) return adapt_static_variant(v); - // static_variant array + // array_type fc::variants vs; vs.reserve( v.size() ); for( const auto& item : v ) { - vs.push_back( adapt_static_variant( item.get_array() ) ); + if( item.is_array() ) // static_variant array + vs.push_back( adapt_static_variant( item.get_array() ) ); + else if( item.is_object() ) // object array + vs.push_back( adapt( item.get_object() ) ); + else + wlog( "Type of item is unexpected: ${item}", ("item", item) ); } variant nv; From 7152dbb948e890c3692c7511210ad265f2b9d7fa Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 17 Dec 2021 22:37:14 +0000 Subject: [PATCH 040/338] Fix adaption of fee schedule update op for ES --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index e70f8ee83b..5d22beac3a 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -313,7 +313,7 @@ struct es_data_adaptor { } map to_string_fields = { - { "current_fees", data_type::array_type }, + { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op { "proposed_ops", data_type::array_type }, { "initializer", data_type::static_variant_type }, From aff8e72a26d688c2de3e80cfe99b92d0c90d5e22 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 17 Dec 2021 23:06:47 +0000 Subject: [PATCH 041/338] Fix adaption of arrays for ES --- .../elasticsearch/elasticsearch_plugin.cpp | 51 +++++++++++-------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 5d22beac3a..c437b55405 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -254,20 +254,44 @@ struct es_data_adaptor { static variant adapt(const variant_object& op) { fc::mutable_variant_object o(op); + + map to_string_fields = { + { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters + { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op + { "proposed_ops", data_type::array_type }, + { "initializer", data_type::static_variant_type }, + { "policy", data_type::static_variant_type }, + { "predicates", data_type::array_type }, + { "active_special_authority", data_type::static_variant_type }, + { "owner_special_authority", data_type::static_variant_type }, + { "acceptable_collateral", data_type::map_type }, + { "acceptable_borrowers", data_type::map_type } + }; + map original_arrays; vector keys_to_rename; for (auto i = o.begin(); i != o.end(); ++i) { + const string& name = (*i).key(); auto& element = (*i).value(); if (element.is_object()) { - const string& name = (*i).key(); auto& vo = element.get_object(); if (vo.contains(name.c_str())) // transfer_operation.amount.amount keys_to_rename.emplace_back(name); element = adapt(vo); } else if (element.is_array()) - adapt(element.get_array()); + { + auto& array = element.get_array(); + if( to_string_fields.find(name) != to_string_fields.end() ) + { + // make a backup and convert to string + original_arrays[name] = array; + element = fc::json::to_string(element); + } + else + adapt(array); + } } for( const auto& i : keys_to_rename ) // transfer_operation.amount @@ -312,27 +336,12 @@ struct es_data_adaptor { o.erase("owner"); } - map to_string_fields = { - { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters - { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op - { "proposed_ops", data_type::array_type }, - { "initializer", data_type::static_variant_type }, - { "policy", data_type::static_variant_type }, - { "predicates", data_type::array_type }, - { "active_special_authority", data_type::static_variant_type }, - { "owner_special_authority", data_type::static_variant_type }, - { "acceptable_collateral", data_type::map_type }, - { "acceptable_borrowers", data_type::map_type } - }; - for( const auto& pair : to_string_fields ) + for( const auto& pair : original_arrays ) { const auto& name = pair.first; - if( o.find(name) != o.end() ) - { - const auto& value = o[name]; - o[name + "_object"] = adapt( value.get_array(), pair.second ); - o[name] = fc::json::to_string(value); - } + auto& value = pair.second; + auto type = to_string_fields[name]; + o[name + "_object"] = adapt( value, type ); } variant v; From 345942b0732cc26dbe75addcfbf1e8955210ca90 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 18 Dec 2021 00:28:22 +0000 Subject: [PATCH 042/338] Fix adaption of maps for ES --- .../elasticsearch/elasticsearch_plugin.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index c437b55405..b9dba95ee5 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -247,9 +247,9 @@ void elasticsearch_plugin_impl::getOperationType(const optional Date: Sat, 18 Dec 2021 16:17:14 +0000 Subject: [PATCH 043/338] Remove unnecessary code --- .../elasticsearch/elasticsearch_plugin.cpp | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index b9dba95ee5..2e60966eed 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -301,25 +301,10 @@ struct es_data_adaptor { o.erase(i); } - if( o.find("fee") != o.end() ) - { - auto& fee = o["fee"]; - if( fee.is_uint64() ) // fee schedule - { - o["fee_"] = fee; - o.erase("fee"); - } - } - if( o.find("memo") != o.end() ) { auto& memo = o["memo"]; - if (memo.is_string()) // seems unused. TODO remove - { - o["memo_"] = memo; - o.erase("memo"); - } - else if (memo.is_object()) + if (memo.is_object()) { fc::mutable_variant_object tmp(memo.get_object()); if (tmp.find("nonce") != tmp.end()) From 601925b4feff4eb292d657aba0dbb9e76030f273 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:07:59 +0000 Subject: [PATCH 044/338] Switch type from 'data' to default '_doc' for ES --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 +- libraries/plugins/es_objects/es_objects.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index e7f9b1bf89..5cf518ab66 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -399,7 +399,7 @@ void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id fc::mutable_variant_object bulk_header; bulk_header["_index"] = index_name; if(!esge7) - bulk_header["_type"] = "data"; + bulk_header["_type"] = "_doc"; bulk_header["_id"] = fc::to_string(ath_id.space_id) + "." + fc::to_string(ath_id.type_id) + "." + fc::to_string(ath_id.instance.value); prepare = graphene::utilities::createBulk(bulk_header, std::move(bulk_line)); diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 5821fe4538..a693f922e8 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -229,7 +229,7 @@ void es_objects_plugin_impl::remove_from_database( object_id_type id, std::strin delete_line["_id"] = string(id); delete_line["_index"] = _es_objects_index_prefix + index; if(!esge7) - delete_line["_type"] = "data"; + delete_line["_type"] = "_doc"; fc::mutable_variant_object final_delete_line; final_delete_line["delete"] = delete_line; prepare.push_back(fc::json::to_string(final_delete_line)); @@ -244,7 +244,7 @@ void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_n fc::mutable_variant_object bulk_header; bulk_header["_index"] = _es_objects_index_prefix + index_name; if(!esge7) - bulk_header["_type"] = "data"; + bulk_header["_type"] = "_doc"; if(_es_objects_keep_only_current) { bulk_header["_id"] = string(blockchain_object.id); From 1402c4b679fbbd62295e734b335d4c13707bc6c1 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:15:05 +0000 Subject: [PATCH 045/338] Move generateIndexName(...) from util to ES plugin --- .../plugins/elasticsearch/elasticsearch_plugin.cpp | 14 +++++++++++++- libraries/utilities/elasticsearch.cpp | 10 ---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 5cf518ab66..9ec972a72a 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -28,6 +28,8 @@ #include #include +#include + namespace graphene { namespace elasticsearch { namespace detail @@ -105,10 +107,20 @@ elasticsearch_plugin_impl::~elasticsearch_plugin_impl() } } +static const std::string generateIndexName( const fc::time_point_sec& block_date, + const std::string& _elasticsearch_index_prefix ) +{ + auto block_date_string = block_date.to_iso_string(); + std::vector parts; + boost::split(parts, block_date_string, boost::is_any_of("-")); + std::string index_name = _elasticsearch_index_prefix + parts[0] + "-" + parts[1]; + return index_name; +} + bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b ) { checkState(b.timestamp); - index_name = graphene::utilities::generateIndexName(b.timestamp, _elasticsearch_index_prefix); + index_name = generateIndexName(b.timestamp, _elasticsearch_index_prefix); graphene::chain::database& db = database(); const vector >& hist = db.get_applied_operations(); diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index b0c69b193d..97c93eb6a8 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -24,7 +24,6 @@ #include #include -#include #include #include @@ -169,15 +168,6 @@ const std::string getEndPoint(ES& es) return doCurl(curl_request); } -const std::string generateIndexName(const fc::time_point_sec& block_date, const std::string& _elasticsearch_index_prefix) -{ - auto block_date_string = block_date.to_iso_string(); - std::vector parts; - boost::split(parts, block_date_string, boost::is_any_of("-")); - std::string index_name = _elasticsearch_index_prefix + parts[0] + "-" + parts[1]; - return index_name; -} - const std::string doCurl(CurlRequest& curl) { std::string CurlReadBuffer; From 054992743c46839302604cca2299a184d62bde66 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:18:09 +0000 Subject: [PATCH 046/338] Update ES tests to accommodate data type changes --- tests/elasticsearch/main.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 179a90389b..bbce2cccd1 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -72,7 +72,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { generate_block(); string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; - es.endpoint = es.index_prefix + "*/data/_count"; + es.endpoint = es.index_prefix + "*/_doc/_count"; es.query = query; string res; @@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { return (total == "5"); }); - es.endpoint = es.index_prefix + "*/data/_search"; + es.endpoint = es.index_prefix + "*/_doc/_search"; res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); auto first_id = j["hits"]["hits"][size_t(0)]["_id"].as_string(); @@ -96,7 +96,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { auto willie = create_account("willie"); generate_block(); - es.endpoint = es.index_prefix + "*/data/_count"; + es.endpoint = es.index_prefix + "*/_doc/_count"; fc::wait_for( ES_WAIT_TIME, [&]() { res = graphene::utilities::simpleQuery(es); @@ -123,7 +123,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { auto block_date = db.head_block_time(); std::string index_name = graphene::utilities::generateIndexName(block_date, es_index_prefix); - es.endpoint = index_name + "/data/2.9.12"; // we know last op is a transfer of amount 300 + es.endpoint = index_name + "/_doc/2.9.12"; // we know last op is a transfer of amount 300 res = graphene::utilities::getEndPoint(es); j = fc::json::from_string(res); auto last_transfer_amount = j["_source"]["operation_history"]["op_object"]["amount_"]["amount"].as_string(); @@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { generate_block(); - es.endpoint = es.index_prefix + "*/data/_count"; + es.endpoint = es.index_prefix + "*/_doc/_count"; fc::wait_for( ES_WAIT_TIME, [&]() { res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); @@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { generate_block(); string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; - es.endpoint = es.index_prefix + "*/data/_count"; + es.endpoint = es.index_prefix + "*/_doc/_count"; es.query = query; string res; @@ -251,14 +251,14 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { return (total == "2"); }); - es.endpoint = es.index_prefix + "asset/data/_search"; + es.endpoint = es.index_prefix + "asset/_doc/_search"; res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); auto first_id = j["hits"]["hits"][size_t(0)]["_source"]["symbol"].as_string(); BOOST_CHECK_EQUAL(first_id, "USD"); auto bitasset_data_id = j["hits"]["hits"][size_t(0)]["_source"]["bitasset_data_id"].as_string(); - es.endpoint = es.index_prefix + "bitasset/data/_search"; + es.endpoint = es.index_prefix + "bitasset/_doc/_search"; es.query = "{ \"query\" : { \"bool\": { \"must\" : [{ \"term\": { \"object_id\": \""+bitasset_data_id+"\"}}] } } }"; res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); From b037edc47ee686a6a499833694410b91bf09ab96 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:27:52 +0000 Subject: [PATCH 047/338] Rename variables for better readability --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 6 +++--- libraries/plugins/es_objects/es_objects.cpp | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 9ec972a72a..63f3512aaf 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -80,7 +80,7 @@ class elasticsearch_plugin_impl std::string bulk_line; std::string index_name; bool is_sync = false; - bool esge7 = false; + bool is_es_version_7_or_above = false; private: bool add_elasticsearch( const account_id_type account_id, const optional& oho, const uint32_t block_number ); const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, @@ -410,7 +410,7 @@ void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id const std::string _id = fc::json::to_string(ath_id); fc::mutable_variant_object bulk_header; bulk_header["_index"] = index_name; - if(!esge7) + if(!is_es_version_7_or_above) bulk_header["_type"] = "_doc"; bulk_header["_id"] = fc::to_string(ath_id.space_id) + "." + fc::to_string(ath_id.type_id) + "." + fc::to_string(ath_id.instance.value); @@ -574,7 +574,7 @@ void elasticsearch_plugin::plugin_startup() const auto es_version = graphene::utilities::getVersion(es); auto dot_pos = es_version.find('.'); if(std::stoi(es_version.substr(0,dot_pos)) >= 7) - my->esge7 = true; + my->is_es_version_7_or_above = true; ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); } diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index a693f922e8..b74897f25c 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -74,7 +74,7 @@ class es_objects_plugin_impl uint32_t block_number; fc::time_point_sec block_time; - bool esge7 = false; + bool is_es_version_7_or_above = false; private: template @@ -228,7 +228,7 @@ void es_objects_plugin_impl::remove_from_database( object_id_type id, std::strin fc::mutable_variant_object delete_line; delete_line["_id"] = string(id); delete_line["_index"] = _es_objects_index_prefix + index; - if(!esge7) + if(!is_es_version_7_or_above) delete_line["_type"] = "_doc"; fc::mutable_variant_object final_delete_line; final_delete_line["delete"] = delete_line; @@ -243,7 +243,7 @@ void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_n { fc::mutable_variant_object bulk_header; bulk_header["_index"] = _es_objects_index_prefix + index_name; - if(!esge7) + if(!is_es_version_7_or_above) bulk_header["_type"] = "_doc"; if(_es_objects_keep_only_current) { @@ -412,7 +412,7 @@ void es_objects_plugin::plugin_startup() const auto es_version = graphene::utilities::getVersion(es); auto dot_pos = es_version.find('.'); if(std::stoi(es_version.substr(0,dot_pos)) >= 7) - my->esge7 = true; + my->is_es_version_7_or_above = true; } } } From 8397d5dcdffa9fc0ffec1e9dd76225aec51bc8cd Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:40:40 +0000 Subject: [PATCH 048/338] Assume ES version is 7 or above by default --- .../elasticsearch/elasticsearch_plugin.cpp | 16 +++++++++++----- libraries/plugins/es_objects/es_objects.cpp | 16 +++++++++++----- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 63f3512aaf..23c6dff116 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -80,7 +80,7 @@ class elasticsearch_plugin_impl std::string bulk_line; std::string index_name; bool is_sync = false; - bool is_es_version_7_or_above = false; + bool is_es_version_7_or_above = true; private: bool add_elasticsearch( const account_id_type account_id, const optional& oho, const uint32_t block_number ); const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, @@ -571,10 +571,16 @@ void elasticsearch_plugin::plugin_startup() if(!graphene::utilities::checkES(es)) FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url)); - const auto es_version = graphene::utilities::getVersion(es); - auto dot_pos = es_version.find('.'); - if(std::stoi(es_version.substr(0,dot_pos)) >= 7) - my->is_es_version_7_or_above = true; + try { + const auto es_version = graphene::utilities::getVersion(es); + auto dot_pos = es_version.find('.'); + if( std::stoi(es_version.substr(0,dot_pos)) < 7 ) + my->is_es_version_7_or_above = false; + } + catch( ... ) + { + wlog( "Unable to get ES version, assuming it is above 7" ); + } ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); } diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index b74897f25c..6e41a4e2b8 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -74,7 +74,7 @@ class es_objects_plugin_impl uint32_t block_number; fc::time_point_sec block_time; - bool is_es_version_7_or_above = false; + bool is_es_version_7_or_above = true; private: template @@ -409,10 +409,16 @@ void es_objects_plugin::plugin_startup() FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_es_objects_elasticsearch_url)); ilog("elasticsearch OBJECTS: plugin_startup() begin"); - const auto es_version = graphene::utilities::getVersion(es); - auto dot_pos = es_version.find('.'); - if(std::stoi(es_version.substr(0,dot_pos)) >= 7) - my->is_es_version_7_or_above = true; + try { + const auto es_version = graphene::utilities::getVersion(es); + auto dot_pos = es_version.find('.'); + if( std::stoi(es_version.substr(0,dot_pos)) < 7 ) + my->is_es_version_7_or_above = false; + } + catch( ... ) + { + wlog( "Unable to get ES version, assuming it is above 7" ); + } } } } From 87abc8b2f2706f3b65ed58ce9758ce38fde94f87 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:42:04 +0000 Subject: [PATCH 049/338] Switch type from 'data' to '_doc' for search in ES --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 23c6dff116..fc2da78be3 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -699,7 +699,7 @@ graphene::utilities::ES elasticsearch_plugin::prepareHistoryQuery(string query) es.curl = curl; es.elasticsearch_url = my->_elasticsearch_node_url; es.index_prefix = my->_elasticsearch_index_prefix; - es.endpoint = es.index_prefix + "*/data/_search"; + es.endpoint = es.index_prefix + "*/_doc/_search"; es.query = query; return es; From ce7a639f5e1bfc8e4c2285f143d9f0338e2bdcaa Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:47:33 +0000 Subject: [PATCH 050/338] Remove generateIndexName function from header file --- libraries/utilities/include/graphene/utilities/elasticsearch.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 4ff9ffe460..2cb671e19d 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -61,7 +61,6 @@ namespace graphene { namespace utilities { bool deleteAll(ES& es); bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer); const std::string getEndPoint(ES& es); - const std::string generateIndexName(const fc::time_point_sec& block_date, const std::string& _elasticsearch_index_prefix); const std::string doCurl(CurlRequest& curl); const std::string joinBulkLines(const std::vector& bulk); long getResponseCode(CURL *handler); From e2d8baf63ce5031f4e7658ff7995552ea9affde5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 17:50:12 +0000 Subject: [PATCH 051/338] Update log messages --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 +- libraries/plugins/es_objects/es_objects.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index fc2da78be3..87f24334ea 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -579,7 +579,7 @@ void elasticsearch_plugin::plugin_startup() } catch( ... ) { - wlog( "Unable to get ES version, assuming it is above 7" ); + wlog( "Unable to get ES version, assuming it is 7 or above" ); } ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 6e41a4e2b8..73dcb1c321 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -417,7 +417,7 @@ void es_objects_plugin::plugin_startup() } catch( ... ) { - wlog( "Unable to get ES version, assuming it is above 7" ); + wlog( "Unable to get ES version, assuming it is 7 or above" ); } } From 7edb5ce7f54fbe375994438f645a795d7b13cb4a Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 20:52:54 +0000 Subject: [PATCH 052/338] Fix ES tests for the generateIndexName change --- tests/elasticsearch/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index bbce2cccd1..1b6a671ee7 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { // check the visitor data auto block_date = db.head_block_time(); - std::string index_name = graphene::utilities::generateIndexName(block_date, es_index_prefix); + std::string index_name = es_index_prefix + block_date.to_iso_string().substr( 0, 7 ); // yyyy-mm es.endpoint = index_name + "/_doc/2.9.12"; // we know last op is a transfer of amount 300 res = graphene::utilities::getEndPoint(es); From 679e6459adf38641448cae0283d812296236c80e Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 21:36:59 +0000 Subject: [PATCH 053/338] Check ES on plugin initialization --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 9 +++++---- libraries/plugins/es_objects/es_objects.cpp | 9 ++++++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 87f24334ea..f3eac13e1f 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -559,10 +559,7 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia "Error populating ES database, we are going to keep trying."); }); } -} -void elasticsearch_plugin::plugin_startup() -{ graphene::utilities::ES es; es.curl = my->curl; es.elasticsearch_url = my->_elasticsearch_node_url; @@ -582,7 +579,11 @@ void elasticsearch_plugin::plugin_startup() wlog( "Unable to get ES version, assuming it is 7 or above" ); } - ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); +} + +void elasticsearch_plugin::plugin_startup() +{ + // Nothing to do } operation_history_object elasticsearch_plugin::get_operation_by_id(operation_history_id_type id) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 73dcb1c321..182048e9fa 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -395,10 +395,7 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable "Error deleting object from ES database, we are going to keep trying."); } }); -} -void es_objects_plugin::plugin_startup() -{ graphene::utilities::ES es; es.curl = my->curl; es.elasticsearch_url = my->_es_objects_elasticsearch_url; @@ -419,6 +416,12 @@ void es_objects_plugin::plugin_startup() { wlog( "Unable to get ES version, assuming it is 7 or above" ); } + +} + +void es_objects_plugin::plugin_startup() +{ + // Nothing to do } } } From 149b285c7bdb21ee3a884a336325fc6927795ee1 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 20 Dec 2021 21:42:23 +0000 Subject: [PATCH 054/338] Update a comment --- tests/elasticsearch/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 1b6a671ee7..772ffe118b 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { // check the visitor data auto block_date = db.head_block_time(); - std::string index_name = es_index_prefix + block_date.to_iso_string().substr( 0, 7 ); // yyyy-mm + std::string index_name = es_index_prefix + block_date.to_iso_string().substr( 0, 7 ); // yyyy-MM es.endpoint = index_name + "/_doc/2.9.12"; // we know last op is a transfer of amount 300 res = graphene::utilities::getEndPoint(es); From eb1b5758d526777abe7c8d6e57b5d56aa272e193 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 21 Dec 2021 17:40:17 +0000 Subject: [PATCH 055/338] Use ES 7.16.2 in Github Actions workflows --- .github/workflows/build-and-test.ubuntu-debug.yml | 2 +- .github/workflows/build-and-test.ubuntu-release.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.ubuntu-debug.yml b/.github/workflows/build-and-test.ubuntu-debug.yml index 960902bf3b..3013cf912b 100644 --- a/.github/workflows/build-and-test.ubuntu-debug.yml +++ b/.github/workflows/build-and-test.ubuntu-debug.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.10.1 + image: docker://elasticsearch:7.16.2 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Install dependencies diff --git a/.github/workflows/build-and-test.ubuntu-release.yml b/.github/workflows/build-and-test.ubuntu-release.yml index 69e25a21ae..ba01da96d6 100644 --- a/.github/workflows/build-and-test.ubuntu-release.yml +++ b/.github/workflows/build-and-test.ubuntu-release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.10.1 + image: docker://elasticsearch:7.16.2 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Install dependencies From 3907840dc8849ff40eddb605e3a6a41c953da794 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 21 Dec 2021 18:35:15 +0000 Subject: [PATCH 056/338] Refactor code to fix code smells --- .../elasticsearch/elasticsearch_plugin.cpp | 58 +++++++++---------- libraries/plugins/es_objects/es_objects.cpp | 54 +++++++++-------- libraries/utilities/elasticsearch.cpp | 27 +++++++-- .../graphene/utilities/elasticsearch.hpp | 13 +++-- 4 files changed, 83 insertions(+), 69 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index f3eac13e1f..9a2eb1d333 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -53,6 +53,10 @@ class elasticsearch_plugin_impl return _self.database(); } + friend class graphene::elasticsearch::elasticsearch_plugin; + + private: + elasticsearch_plugin& _self; primary_index< operation_history_index >* _oho_index; @@ -81,7 +85,7 @@ class elasticsearch_plugin_impl std::string index_name; bool is_sync = false; bool is_es_version_7_or_above = true; - private: + bool add_elasticsearch( const account_id_type account_id, const optional& oho, const uint32_t block_number ); const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, const account_id_type& account_id, @@ -97,6 +101,7 @@ class elasticsearch_plugin_impl void createBulkLine(const account_transaction_history_object& ath); void prepareBulk(const account_transaction_history_id_type& ath_id); void populateESstruct(); + void init_program_options(const boost::program_options::variables_map& options); }; elasticsearch_plugin_impl::~elasticsearch_plugin_impl() @@ -107,8 +112,8 @@ elasticsearch_plugin_impl::~elasticsearch_plugin_impl() } } -static const std::string generateIndexName( const fc::time_point_sec& block_date, - const std::string& _elasticsearch_index_prefix ) +static std::string generateIndexName( const fc::time_point_sec& block_date, + const std::string& _elasticsearch_index_prefix ) { auto block_date_string = block_date.to_iso_string(); std::vector parts; @@ -509,44 +514,49 @@ void elasticsearch_plugin::plugin_set_program_options( cfg.add(cli); } -void elasticsearch_plugin::plugin_initialize(const boost::program_options::variables_map& options) +void detail::elasticsearch_plugin_impl::init_program_options(const boost::program_options::variables_map& options) { - my->_oho_index = database().add_index< primary_index< operation_history_index > >(); - database().add_index< primary_index< account_transaction_history_index > >(); - if (options.count("elasticsearch-node-url") > 0) { - my->_elasticsearch_node_url = options["elasticsearch-node-url"].as(); + _elasticsearch_node_url = options["elasticsearch-node-url"].as(); } if (options.count("elasticsearch-bulk-replay") > 0) { - my->_elasticsearch_bulk_replay = options["elasticsearch-bulk-replay"].as(); + _elasticsearch_bulk_replay = options["elasticsearch-bulk-replay"].as(); } if (options.count("elasticsearch-bulk-sync") > 0) { - my->_elasticsearch_bulk_sync = options["elasticsearch-bulk-sync"].as(); + _elasticsearch_bulk_sync = options["elasticsearch-bulk-sync"].as(); } if (options.count("elasticsearch-visitor") > 0) { - my->_elasticsearch_visitor = options["elasticsearch-visitor"].as(); + _elasticsearch_visitor = options["elasticsearch-visitor"].as(); } if (options.count("elasticsearch-basic-auth") > 0) { - my->_elasticsearch_basic_auth = options["elasticsearch-basic-auth"].as(); + _elasticsearch_basic_auth = options["elasticsearch-basic-auth"].as(); } if (options.count("elasticsearch-index-prefix") > 0) { - my->_elasticsearch_index_prefix = options["elasticsearch-index-prefix"].as(); + _elasticsearch_index_prefix = options["elasticsearch-index-prefix"].as(); } if (options.count("elasticsearch-operation-object") > 0) { - my->_elasticsearch_operation_object = options["elasticsearch-operation-object"].as(); + _elasticsearch_operation_object = options["elasticsearch-operation-object"].as(); } if (options.count("elasticsearch-start-es-after-block") > 0) { - my->_elasticsearch_start_es_after_block = options["elasticsearch-start-es-after-block"].as(); + _elasticsearch_start_es_after_block = options["elasticsearch-start-es-after-block"].as(); } if (options.count("elasticsearch-operation-string") > 0) { - my->_elasticsearch_operation_string = options["elasticsearch-operation-string"].as(); + _elasticsearch_operation_string = options["elasticsearch-operation-string"].as(); } if (options.count("elasticsearch-mode") > 0) { const auto option_number = options["elasticsearch-mode"].as(); if(option_number > mode::all) FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Elasticsearch mode not valid"); - my->_elasticsearch_mode = static_cast(options["elasticsearch-mode"].as()); + _elasticsearch_mode = static_cast(options["elasticsearch-mode"].as()); } +} + +void elasticsearch_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + my->_oho_index = database().add_index< primary_index< operation_history_index > >(); + database().add_index< primary_index< account_transaction_history_index > >(); + + my->init_program_options( options ); if(my->_elasticsearch_mode != mode::only_query) { if (my->_elasticsearch_mode == mode::all && !my->_elasticsearch_operation_string) @@ -566,19 +576,9 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia es.auth = my->_elasticsearch_basic_auth; if(!graphene::utilities::checkES(es)) - FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url)); - - try { - const auto es_version = graphene::utilities::getVersion(es); - auto dot_pos = es_version.find('.'); - if( std::stoi(es_version.substr(0,dot_pos)) < 7 ) - my->is_es_version_7_or_above = false; - } - catch( ... ) - { - wlog( "Unable to get ES version, assuming it is 7 or above" ); - } + FC_THROW( "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url) ); + graphene::utilities::checkESVersion7OrAbove(es, my->is_es_version_7_or_above); } void elasticsearch_plugin::plugin_startup() diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 182048e9fa..93a4687f36 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -53,6 +53,9 @@ class es_objects_plugin_impl bool genesis(); void remove_from_database(object_id_type id, std::string index); + friend class graphene::es_objects::es_objects_plugin; + + private: es_objects_plugin& _self; std::string _es_objects_elasticsearch_url = "http://localhost:9200/"; std::string _es_objects_auth = ""; @@ -76,9 +79,10 @@ class es_objects_plugin_impl fc::time_point_sec block_time; bool is_es_version_7_or_above = true; - private: template void prepareTemplate(T blockchain_object, string index_name); + + void init_program_options(const boost::program_options::variables_map& options); }; bool es_objects_plugin_impl::genesis() @@ -323,47 +327,52 @@ void es_objects_plugin::plugin_set_program_options( cfg.add(cli); } -void es_objects_plugin::plugin_initialize(const boost::program_options::variables_map& options) +void detail::es_objects_plugin_impl::init_program_options(const boost::program_options::variables_map& options) { if (options.count("es-objects-elasticsearch-url") > 0) { - my->_es_objects_elasticsearch_url = options["es-objects-elasticsearch-url"].as(); + _es_objects_elasticsearch_url = options["es-objects-elasticsearch-url"].as(); } if (options.count("es-objects-auth") > 0) { - my->_es_objects_auth = options["es-objects-auth"].as(); + _es_objects_auth = options["es-objects-auth"].as(); } if (options.count("es-objects-bulk-replay") > 0) { - my->_es_objects_bulk_replay = options["es-objects-bulk-replay"].as(); + _es_objects_bulk_replay = options["es-objects-bulk-replay"].as(); } if (options.count("es-objects-bulk-sync") > 0) { - my->_es_objects_bulk_sync = options["es-objects-bulk-sync"].as(); + _es_objects_bulk_sync = options["es-objects-bulk-sync"].as(); } if (options.count("es-objects-proposals") > 0) { - my->_es_objects_proposals = options["es-objects-proposals"].as(); + _es_objects_proposals = options["es-objects-proposals"].as(); } if (options.count("es-objects-accounts") > 0) { - my->_es_objects_accounts = options["es-objects-accounts"].as(); + _es_objects_accounts = options["es-objects-accounts"].as(); } if (options.count("es-objects-assets") > 0) { - my->_es_objects_assets = options["es-objects-assets"].as(); + _es_objects_assets = options["es-objects-assets"].as(); } if (options.count("es-objects-balances") > 0) { - my->_es_objects_balances = options["es-objects-balances"].as(); + _es_objects_balances = options["es-objects-balances"].as(); } if (options.count("es-objects-limit-orders") > 0) { - my->_es_objects_limit_orders = options["es-objects-limit-orders"].as(); + _es_objects_limit_orders = options["es-objects-limit-orders"].as(); } if (options.count("es-objects-asset-bitasset") > 0) { - my->_es_objects_asset_bitasset = options["es-objects-asset-bitasset"].as(); + _es_objects_asset_bitasset = options["es-objects-asset-bitasset"].as(); } if (options.count("es-objects-index-prefix") > 0) { - my->_es_objects_index_prefix = options["es-objects-index-prefix"].as(); + _es_objects_index_prefix = options["es-objects-index-prefix"].as(); } if (options.count("es-objects-keep-only-current") > 0) { - my->_es_objects_keep_only_current = options["es-objects-keep-only-current"].as(); + _es_objects_keep_only_current = options["es-objects-keep-only-current"].as(); } if (options.count("es-objects-start-es-after-block") > 0) { - my->_es_objects_start_es_after_block = options["es-objects-start-es-after-block"].as(); + _es_objects_start_es_after_block = options["es-objects-start-es-after-block"].as(); } +} + +void es_objects_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + my->init_program_options( options ); database().applied_block.connect([this](const signed_block &b) { if(b.block_num() == 1 && my->_es_objects_start_es_after_block == 0) { @@ -403,20 +412,9 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable es.auth = my->_es_objects_index_prefix; if(!graphene::utilities::checkES(es)) - FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_es_objects_elasticsearch_url)); - ilog("elasticsearch OBJECTS: plugin_startup() begin"); - - try { - const auto es_version = graphene::utilities::getVersion(es); - auto dot_pos = es_version.find('.'); - if( std::stoi(es_version.substr(0,dot_pos)) < 7 ) - my->is_es_version_7_or_above = false; - } - catch( ... ) - { - wlog( "Unable to get ES version, assuming it is 7 or above" ); - } + FC_THROW( "ES database is not up in url ${url}", ("url", my->_es_objects_elasticsearch_url) ); + graphene::utilities::checkESVersion7OrAbove(es, my->is_es_version_7_or_above); } void es_objects_plugin::plugin_startup() diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 97c93eb6a8..cf66eb1ecb 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -49,7 +49,7 @@ bool checkES(ES& es) } -const std::string getVersion(ES& es) +std::string getESVersion(ES& es) { graphene::utilities::CurlRequest curl_request; curl_request.handler = es.curl; @@ -62,7 +62,22 @@ const std::string getVersion(ES& es) return response["version"]["number"].as_string(); } -const std::string simpleQuery(ES& es) +void checkESVersion7OrAbove(ES& es, bool& result) noexcept +{ + static const int64_t version_7 = 7; + try { + const auto es_version = graphene::utilities::getESVersion(es); + auto dot_pos = es_version.find('.'); + result = ( std::stoi(es_version.substr(0,dot_pos)) >= version_7 ); + } + catch( ... ) + { + wlog( "Unable to get ES version, assuming it is 7 or above" ); + result = true; + } +} + +std::string simpleQuery(ES& es) { graphene::utilities::CurlRequest curl_request; curl_request.handler = es.curl; @@ -92,7 +107,7 @@ bool SendBulk(ES&& es) return false; } -const std::string joinBulkLines(const std::vector& bulk) +std::string joinBulkLines(const std::vector& bulk) { auto bulking = boost::algorithm::join(bulk, "\n"); bulking = bulking + "\n"; @@ -132,7 +147,7 @@ bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer) return true; } -const std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data) +std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data) { std::vector bulk; fc::mutable_variant_object final_bulk_header; @@ -157,7 +172,7 @@ bool deleteAll(ES& es) else return true; } -const std::string getEndPoint(ES& es) +std::string getEndPoint(ES& es) { graphene::utilities::CurlRequest curl_request; curl_request.handler = es.curl; @@ -168,7 +183,7 @@ const std::string getEndPoint(ES& es) return doCurl(curl_request); } -const std::string doCurl(CurlRequest& curl) +std::string doCurl(CurlRequest& curl) { std::string CurlReadBuffer; struct curl_slist *headers = NULL; diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 2cb671e19d..2fb29a0967 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -54,15 +54,16 @@ namespace graphene { namespace utilities { }; bool SendBulk(ES&& es); - const std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); + std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); bool checkES(ES& es); - const std::string getVersion(ES& es); - const std::string simpleQuery(ES& es); + std::string getESVersion(ES& es); + void checkESVersion7OrAbove(ES& es, bool& result) noexcept; + std::string simpleQuery(ES& es); bool deleteAll(ES& es); bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer); - const std::string getEndPoint(ES& es); - const std::string doCurl(CurlRequest& curl); - const std::string joinBulkLines(const std::vector& bulk); + std::string getEndPoint(ES& es); + std::string doCurl(CurlRequest& curl); + std::string joinBulkLines(const std::vector& bulk); long getResponseCode(CURL *handler); } } // end namespace graphene::utilities From b4497c497ce66df0db6006a65d55c4fb5200beed Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 21 Dec 2021 18:58:56 +0000 Subject: [PATCH 057/338] Wrap long lines --- .../elasticsearch/elasticsearch_plugin.cpp | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 9a2eb1d333..2eb601fd65 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -56,7 +56,6 @@ class elasticsearch_plugin_impl friend class graphene::elasticsearch::elasticsearch_plugin; private: - elasticsearch_plugin& _self; primary_index< operation_history_index >* _oho_index; @@ -86,7 +85,8 @@ class elasticsearch_plugin_impl bool is_sync = false; bool is_es_version_7_or_above = true; - bool add_elasticsearch( const account_id_type account_id, const optional& oho, const uint32_t block_number ); + bool add_elasticsearch( const account_id_type account_id, const optional& oho, + const uint32_t block_number ); const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, const account_id_type& account_id, const optional & oho); @@ -306,7 +306,8 @@ void elasticsearch_plugin_impl::doVisitor(const optional & oho) +const account_transaction_history_object& elasticsearch_plugin_impl::addNewEntry( + const account_statistics_object& stats_obj, + const account_id_type& account_id, + const optional & oho) { graphene::chain::database& db = database(); const auto &ath = db.create([&](account_transaction_history_object &obj) { @@ -424,7 +430,8 @@ void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id prepare.clear(); } -void elasticsearch_plugin_impl::cleanObjects(const account_transaction_history_id_type& ath_id, const account_id_type& account_id) +void elasticsearch_plugin_impl::cleanObjects( const account_transaction_history_id_type& ath_id, + const account_id_type& account_id ) { graphene::chain::database& db = database(); // remove everything except current object from ath @@ -652,7 +659,7 @@ vector elasticsearch_plugin::get_account_history( const auto response = graphene::utilities::simpleQuery(es); variant variant_response = fc::json::from_string(response); - + const auto hits = variant_response["hits"]["total"]; uint32_t size; if( hits.is_object() ) // ES-7 ? @@ -711,5 +718,4 @@ mode elasticsearch_plugin::get_running_mode() return my->_elasticsearch_mode; } - } } From 89e4276901dd64fce17d32f17c418f3147c340bd Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 21 Dec 2021 20:42:28 +0000 Subject: [PATCH 058/338] Fix Github Actions Ubuntu debug build Build tests in root volume --- .github/workflows/build-and-test.ubuntu-debug.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-and-test.ubuntu-debug.yml b/.github/workflows/build-and-test.ubuntu-debug.yml index 3013cf912b..9b8f81fa3a 100644 --- a/.github/workflows/build-and-test.ubuntu-debug.yml +++ b/.github/workflows/build-and-test.ubuntu-debug.yml @@ -51,14 +51,14 @@ jobs: pwd df -h . mkdir -p _build - sudo mkdir -p /_build/libraries /_build/programs /mnt/_build/tests - sudo chmod a+rwx /_build/libraries /_build/programs /mnt/_build/tests + sudo mkdir -p /_build/libraries /_build/programs /_build/tests /mnt/_build + sudo chmod a+rwx /_build/libraries /_build/programs /_build/tests ln -s /_build/libraries _build/libraries ln -s /_build/programs _build/programs - ln -s /mnt/_build/tests _build/tests + ln -s /_build/tests _build/tests sudo ln -s /_build/libraries /mnt/_build/libraries sudo ln -s /_build/programs /mnt/_build/programs - sudo ln -s /mnt/_build/tests /_build/tests + sudo ln -s /_build/tests /mnt/_build/tests ls -al _build pushd _build export -n BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR From a1c6bbeb42f710e78d3638d92929dae1d892923d Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 21 Dec 2021 21:37:50 +0000 Subject: [PATCH 059/338] Fix a code smell --- libraries/utilities/elasticsearch.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index cf66eb1ecb..9a268b766e 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -153,7 +153,7 @@ std::vector createBulk(const fc::mutable_variant_object& bulk_heade fc::mutable_variant_object final_bulk_header; final_bulk_header["index"] = bulk_header; bulk.push_back(fc::json::to_string(final_bulk_header)); - bulk.push_back(data); + bulk.emplace_back(std::move(data)); return bulk; } From 76f44380d99d45885d43487c81c9db2692f24e3c Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 21 Dec 2021 22:13:16 +0000 Subject: [PATCH 060/338] Make with -j1 in Github Actions Ubuntu debug build --- .github/workflows/build-and-test.ubuntu-debug.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-and-test.ubuntu-debug.yml b/.github/workflows/build-and-test.ubuntu-debug.yml index 9b8f81fa3a..2615bf7d31 100644 --- a/.github/workflows/build-and-test.ubuntu-debug.yml +++ b/.github/workflows/build-and-test.ubuntu-debug.yml @@ -83,13 +83,13 @@ jobs: export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" mkdir -p "$CCACHE_DIR" df -h - make -j 2 -C _build chain_test - make -j 2 -C _build cli_test - make -j 2 -C _build app_test - make -j 2 -C _build es_test - make -j 2 -C _build cli_wallet - make -j 2 -C _build witness_node - make -j 2 -C _build + make -j 1 -C _build chain_test + make -j 1 -C _build cli_test + make -j 1 -C _build app_test + make -j 1 -C _build es_test + make -j 1 -C _build cli_wallet + make -j 1 -C _build witness_node + make -j 1 -C _build df -h du -hs _build/libraries/* _build/programs/* _build/tests/* du -hs _build/* From ab64829fc6985ca5efc4b134be9c3420b49bb8d7 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 22 Dec 2021 22:06:32 +0000 Subject: [PATCH 061/338] Fix code smells --- .../elasticsearch/elasticsearch_plugin.cpp | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index ec0bc9bd42..44c4ee3c77 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -273,7 +273,7 @@ struct es_data_adaptor { { fc::mutable_variant_object o(op); - map to_string_fields = { + static const map> to_string_fields = { { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op { "proposed_ops", data_type::array_type }, @@ -285,20 +285,20 @@ struct es_data_adaptor { { "acceptable_collateral", data_type::map_type }, { "acceptable_borrowers", data_type::map_type } }; - map original_arrays; + map> original_arrays; vector keys_to_rename; - for (auto i = o.begin(); i != o.end(); ++i) + for( auto& i : o ) { - const string& name = (*i).key(); - auto& element = (*i).value(); - if (element.is_object()) + const string& name = i.key(); + auto& element = i.value(); + if( element.is_object() ) { - auto& vo = element.get_object(); - if (vo.contains(name.c_str())) // transfer_operation.amount.amount + const auto& vo = element.get_object(); + if( vo.contains(name.c_str()) ) // transfer_operation.amount.amount keys_to_rename.emplace_back(name); element = adapt(vo); } - else if (element.is_array()) + else if( element.is_array() ) { auto& array = element.get_array(); if( to_string_fields.find(name) != to_string_fields.end() ) @@ -343,7 +343,7 @@ struct es_data_adaptor { { const auto& name = pair.first; auto& value = pair.second; - auto type = to_string_fields[name]; + auto type = to_string_fields.at(name); o[name + "_object"] = adapt( value, type ); } @@ -465,7 +465,7 @@ void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_bloc struct operation_visitor { - typedef void result_type; + using result_type = void; share_type fee_amount; asset_id_type fee_asset; From 38d5b285f11e5502531396cb0c192dd1b7d4f2e3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 22 Dec 2021 22:12:43 +0000 Subject: [PATCH 062/338] Simplify code about adaption of nonce for ES --- .../plugins/elasticsearch/elasticsearch_plugin.cpp | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 44c4ee3c77..5bca4cf0c9 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -319,18 +319,9 @@ struct es_data_adaptor { o.erase(i); } - if( o.find("memo") != o.end() ) + if( o.find("nonce") != o.end() ) { - auto& memo = o["memo"]; - if (memo.is_object()) - { - fc::mutable_variant_object tmp(memo.get_object()); - if (tmp.find("nonce") != tmp.end()) - { - tmp["nonce"] = tmp["nonce"].as_string(); - o["memo"] = tmp; - } - } + o["nonce"] = o["nonce"].as_string(); } if( o.find("owner") != o.end() && o["owner"].is_string() ) // vesting_balance_*_operation.owner From 0165630d4d940bcf29a91a405b9371171ff6f043 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 23 Dec 2021 19:12:56 +0000 Subject: [PATCH 063/338] Add a virtual destructor in base_operation --- libraries/protocol/include/graphene/protocol/base.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/protocol/include/graphene/protocol/base.hpp b/libraries/protocol/include/graphene/protocol/base.hpp index d7b533d0d3..46b127afa4 100644 --- a/libraries/protocol/include/graphene/protocol/base.hpp +++ b/libraries/protocol/include/graphene/protocol/base.hpp @@ -123,6 +123,7 @@ namespace graphene { namespace protocol { struct base_operation { + virtual ~base_operation() = default; template share_type calculate_fee(const T& params)const { From ba394221688fe9802e0271afc45f0ab37fa7b42f Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 24 Dec 2021 16:08:09 +0000 Subject: [PATCH 064/338] Adapt more maps as objects for ES --- .../elasticsearch/elasticsearch_plugin.cpp | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 5bca4cf0c9..e54f64c054 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -273,6 +273,16 @@ struct es_data_adaptor { { fc::mutable_variant_object o(op); + // Note: these fields are maps, but were stored in ES as flattened arrays + static const map> flattened_fields = { + { "account_auths", data_type::map_type }, + { "address_auths", data_type::map_type }, + { "key_auths", data_type::map_type } + }; + // Note: + // object arrays listed in this map are stored redundantly in ES, with one instance as a nested object and + // the other as a string for backward compatibility, + // object arrays not listed in this map are stored as nested objects only. static const map> to_string_fields = { { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op @@ -307,6 +317,13 @@ struct es_data_adaptor { original_arrays[name] = array; element = fc::json::to_string(element); } + else if( flattened_fields.find(name) != flattened_fields.end() ) + { + // make a backup and adapt the original + auto backup = array; + original_arrays[name] = backup; + adapt(array); + } else adapt(array); } @@ -334,7 +351,9 @@ struct es_data_adaptor { { const auto& name = pair.first; auto& value = pair.second; - auto type = to_string_fields.at(name); + auto type = data_type::map_type; + if( to_string_fields.find(name) != to_string_fields.end() ) + type = to_string_fields.at(name); o[name + "_object"] = adapt( value, type ); } From e28cb307ef252554f16f92eac0615c8c8b5c7f37 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 24 Dec 2021 18:51:12 +0000 Subject: [PATCH 065/338] Fix adaption of strings in maps or static variants --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index e54f64c054..9521d185c3 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -398,6 +398,8 @@ struct es_data_adaptor { mv[prefix + "_int"] = v; else if( v.is_bool() ) mv[prefix + "_bool"] = v; + else if( v.is_string() ) + mv[prefix + "_string"] = v.get_string(); else mv[prefix + "_string"] = fc::json::to_string( v ); // Note: we don't use double or array here, and we convert null and blob to string From 79993e2e4035d9077ce50ada19d0c2d96fe60229 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 24 Dec 2021 20:13:26 +0000 Subject: [PATCH 066/338] Move es_data_adaptor into utilities namespace --- .../elasticsearch/elasticsearch_plugin.cpp | 186 +----------------- libraries/utilities/elasticsearch.cpp | 177 +++++++++++++++++ .../graphene/utilities/elasticsearch.hpp | 27 +++ 3 files changed, 206 insertions(+), 184 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 9521d185c3..ba02c4d074 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -263,188 +263,6 @@ void elasticsearch_plugin_impl::getOperationType(const optional op.which(); } -struct es_data_adaptor { - enum class data_type { - static_variant_type, - map_type, - array_type // can be simple arrays, object arrays, static_variant arrays, or even nested arrays - }; - static variant adapt(const variant_object& op) - { - fc::mutable_variant_object o(op); - - // Note: these fields are maps, but were stored in ES as flattened arrays - static const map> flattened_fields = { - { "account_auths", data_type::map_type }, - { "address_auths", data_type::map_type }, - { "key_auths", data_type::map_type } - }; - // Note: - // object arrays listed in this map are stored redundantly in ES, with one instance as a nested object and - // the other as a string for backward compatibility, - // object arrays not listed in this map are stored as nested objects only. - static const map> to_string_fields = { - { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters - { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op - { "proposed_ops", data_type::array_type }, - { "initializer", data_type::static_variant_type }, - { "policy", data_type::static_variant_type }, - { "predicates", data_type::array_type }, - { "active_special_authority", data_type::static_variant_type }, - { "owner_special_authority", data_type::static_variant_type }, - { "acceptable_collateral", data_type::map_type }, - { "acceptable_borrowers", data_type::map_type } - }; - map> original_arrays; - vector keys_to_rename; - for( auto& i : o ) - { - const string& name = i.key(); - auto& element = i.value(); - if( element.is_object() ) - { - const auto& vo = element.get_object(); - if( vo.contains(name.c_str()) ) // transfer_operation.amount.amount - keys_to_rename.emplace_back(name); - element = adapt(vo); - } - else if( element.is_array() ) - { - auto& array = element.get_array(); - if( to_string_fields.find(name) != to_string_fields.end() ) - { - // make a backup and convert to string - original_arrays[name] = array; - element = fc::json::to_string(element); - } - else if( flattened_fields.find(name) != flattened_fields.end() ) - { - // make a backup and adapt the original - auto backup = array; - original_arrays[name] = backup; - adapt(array); - } - else - adapt(array); - } - } - - for( const auto& i : keys_to_rename ) // transfer_operation.amount - { - string new_name = i + "_"; - o[new_name] = variant(o[i]); - o.erase(i); - } - - if( o.find("nonce") != o.end() ) - { - o["nonce"] = o["nonce"].as_string(); - } - - if( o.find("owner") != o.end() && o["owner"].is_string() ) // vesting_balance_*_operation.owner - { - o["owner_"] = o["owner"].as_string(); - o.erase("owner"); - } - - for( const auto& pair : original_arrays ) - { - const auto& name = pair.first; - auto& value = pair.second; - auto type = data_type::map_type; - if( to_string_fields.find(name) != to_string_fields.end() ) - type = to_string_fields.at(name); - o[name + "_object"] = adapt( value, type ); - } - - variant v; - fc::to_variant(o, v, FC_PACK_MAX_DEPTH); - return v; - } - - static variant adapt( const fc::variants& v, data_type type ) - { - if( data_type::static_variant_type == type ) - return adapt_static_variant(v); - - // map_type or array_type - fc::variants vs; - vs.reserve( v.size() ); - for( const auto& item : v ) - { - if( item.is_array() ) - { - if( data_type::map_type == type ) - vs.push_back( adapt_map_item( item.get_array() ) ); - else // assume it is a static_variant array - vs.push_back( adapt_static_variant( item.get_array() ) ); - } - else if( item.is_object() ) // object array - vs.push_back( adapt( item.get_object() ) ); - else - wlog( "Type of item is unexpected: ${item}", ("item", item) ); - } - - variant nv; - fc::to_variant(vs, nv, FC_PACK_MAX_DEPTH); - return nv; - } - - static void extract_data_from_variant( const variant& v, fc::mutable_variant_object& mv, const string& prefix ) - { - if( v.is_object() ) - mv[prefix + "_object"] = adapt( v.get_object() ); - else if( v.is_int64() || v.is_uint64() ) - mv[prefix + "_int"] = v; - else if( v.is_bool() ) - mv[prefix + "_bool"] = v; - else if( v.is_string() ) - mv[prefix + "_string"] = v.get_string(); - else - mv[prefix + "_string"] = fc::json::to_string( v ); - // Note: we don't use double or array here, and we convert null and blob to string - } - - static variant adapt_map_item( const fc::variants& v ) - { - FC_ASSERT( v.size() == 2, "Internal error" ); - fc::mutable_variant_object mv; - - extract_data_from_variant( v[0], mv, "key" ); - extract_data_from_variant( v[1], mv, "data" ); - - variant nv; - fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); - return nv; - } - - static variant adapt_static_variant( const fc::variants& v ) - { - FC_ASSERT( v.size() == 2, "Internal error" ); - fc::mutable_variant_object mv; - - mv["which"] = v[0]; - extract_data_from_variant( v[1], mv, "data" ); - - variant nv; - fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); - return nv; - } - - static void adapt(fc::variants& v) - { - for (auto& array_element : v) - { - if (array_element.is_object()) - array_element = adapt(array_element.get_object()); - else if (array_element.is_array()) - adapt(array_element.get_array()); - else - array_element = array_element.as_string(); - } - } -}; - void elasticsearch_plugin_impl::doOperationHistory(const optional & oho) { try { os.trx_in_block = oho->trx_in_block; @@ -455,11 +273,11 @@ void elasticsearch_plugin_impl::doOperationHistory(const optional op.visit(fc::from_static_variant(os.op_object, FC_PACK_MAX_DEPTH)); - os.op_object = es_data_adaptor::adapt(os.op_object.get_object()); + os.op_object = graphene::utilities::es_data_adaptor::adapt( os.op_object.get_object() ); // operation_result variant v; fc::to_variant( oho->result, v, FC_PACK_MAX_DEPTH ); - os.operation_result_object = es_data_adaptor::adapt_static_variant( v.get_array() ); + os.operation_result_object = graphene::utilities::es_data_adaptor::adapt_static_variant( v.get_array() ); } if(_elasticsearch_operation_string) os.op = fc::json::to_string(oho->op); diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 9a268b766e..b6b6182d37 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -26,6 +26,7 @@ #include #include #include +#include size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp) { @@ -216,4 +217,180 @@ std::string doCurl(CurlRequest& curl) return CurlReadBuffer; } +fc::variant es_data_adaptor::adapt(const fc::variant_object& op) +{ + fc::mutable_variant_object o(op); + + // Note: these fields are maps, but were stored in ES as flattened arrays + static const std::map> flattened_fields = { + { "account_auths", data_type::map_type }, + { "address_auths", data_type::map_type }, + { "key_auths", data_type::map_type } + }; + // Note: + // object arrays listed in this map are stored redundantly in ES, with one instance as a nested object and + // the other as a string for backward compatibility, + // object arrays not listed in this map are stored as nested objects only. + static const std::map> to_string_fields = { + { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters + { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op + { "proposed_ops", data_type::array_type }, + { "initializer", data_type::static_variant_type }, + { "policy", data_type::static_variant_type }, + { "predicates", data_type::array_type }, + { "active_special_authority", data_type::static_variant_type }, + { "owner_special_authority", data_type::static_variant_type }, + { "acceptable_collateral", data_type::map_type }, + { "acceptable_borrowers", data_type::map_type } + }; + std::map> original_arrays; + std::vector keys_to_rename; + for( auto& i : o ) + { + const std::string& name = i.key(); + auto& element = i.value(); + if( element.is_object() ) + { + const auto& vo = element.get_object(); + if( vo.contains(name.c_str()) ) // transfer_operation.amount.amount + keys_to_rename.emplace_back(name); + element = adapt(vo); + } + else if( element.is_array() ) + { + auto& array = element.get_array(); + if( to_string_fields.find(name) != to_string_fields.end() ) + { + // make a backup and convert to string + original_arrays[name] = array; + element = fc::json::to_string(element); + } + else if( flattened_fields.find(name) != flattened_fields.end() ) + { + // make a backup and adapt the original + auto backup = array; + original_arrays[name] = backup; + adapt(array); + } + else + adapt(array); + } + } + + for( const auto& i : keys_to_rename ) // transfer_operation.amount + { + std::string new_name = i + "_"; + o[new_name] = fc::variant(o[i]); + o.erase(i); + } + + if( o.find("nonce") != o.end() ) + { + o["nonce"] = o["nonce"].as_string(); + } + + if( o.find("owner") != o.end() && o["owner"].is_string() ) // vesting_balance_*_operation.owner + { + o["owner_"] = o["owner"].as_string(); + o.erase("owner"); + } + + for( const auto& pair : original_arrays ) + { + const auto& name = pair.first; + auto& value = pair.second; + auto type = data_type::map_type; + if( to_string_fields.find(name) != to_string_fields.end() ) + type = to_string_fields.at(name); + o[name + "_object"] = adapt( value, type ); + } + + fc::variant v; + fc::to_variant(o, v, FC_PACK_MAX_DEPTH); + return v; +} + +fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type ) +{ + if( data_type::static_variant_type == type ) + return adapt_static_variant(v); + + // map_type or array_type + fc::variants vs; + vs.reserve( v.size() ); + for( const auto& item : v ) + { + if( item.is_array() ) + { + if( data_type::map_type == type ) + vs.push_back( adapt_map_item( item.get_array() ) ); + else // assume it is a static_variant array + vs.push_back( adapt_static_variant( item.get_array() ) ); + } + else if( item.is_object() ) // object array + vs.push_back( adapt( item.get_object() ) ); + else + wlog( "Type of item is unexpected: ${item}", ("item", item) ); + } + + fc::variant nv; + fc::to_variant(vs, nv, FC_PACK_MAX_DEPTH); + return nv; +} + +void es_data_adaptor::extract_data_from_variant( + const fc::variant& v, fc::mutable_variant_object& mv, const std::string& prefix ) +{ + if( v.is_object() ) + mv[prefix + "_object"] = adapt( v.get_object() ); + else if( v.is_int64() || v.is_uint64() ) + mv[prefix + "_int"] = v; + else if( v.is_bool() ) + mv[prefix + "_bool"] = v; + else if( v.is_string() ) + mv[prefix + "_string"] = v.get_string(); + else + mv[prefix + "_string"] = fc::json::to_string( v ); + // Note: we don't use double or array here, and we convert null and blob to string +} + +fc::variant es_data_adaptor::adapt_map_item( const fc::variants& v ) +{ + FC_ASSERT( v.size() == 2, "Internal error" ); + fc::mutable_variant_object mv; + + extract_data_from_variant( v[0], mv, "key" ); + extract_data_from_variant( v[1], mv, "data" ); + + fc::variant nv; + fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); + return nv; +} + +fc::variant es_data_adaptor::adapt_static_variant( const fc::variants& v ) +{ + FC_ASSERT( v.size() == 2, "Internal error" ); + fc::mutable_variant_object mv; + + mv["which"] = v[0]; + extract_data_from_variant( v[1], mv, "data" ); + + fc::variant nv; + fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); + return nv; +} + +void es_data_adaptor::adapt(fc::variants& v) +{ + for (auto& array_element : v) + { + if (array_element.is_object()) + array_element = adapt(array_element.get_object()); + else if (array_element.is_array()) + adapt(array_element.get_array()); + else + array_element = array_element.as_string(); + } +} + } } // end namespace graphene::utilities diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 2fb29a0967..e64f59719d 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -66,4 +66,31 @@ namespace graphene { namespace utilities { std::string joinBulkLines(const std::vector& bulk); long getResponseCode(CURL *handler); +struct es_data_adaptor +{ + enum class data_type + { + static_variant_type, + map_type, + array_type // can be simple arrays, object arrays, static_variant arrays, or even nested arrays + }; + + static fc::variant adapt( const fc::variant_object& op ); + + static fc::variant adapt( const fc::variants& v, data_type type ); + + static fc::variant adapt_map_item( const fc::variants& v ); + + static fc::variant adapt_static_variant( const fc::variants& v ); + + /// In-place update + static void adapt( fc::variants& v ); + + /// Extract data from @p v into @p mv + static void extract_data_from_variant( const fc::variant& v, + fc::mutable_variant_object& mv, + const std::string& prefix ); + +}; + } } // end namespace graphene::utilities From 0eed9237c3a2f0d3ae351ea7c682792ef6656734 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 24 Dec 2021 22:39:54 +0000 Subject: [PATCH 067/338] Merge adaptor_struct in es_objects into utilities --- libraries/plugins/es_objects/es_objects.cpp | 3 +- .../graphene/es_objects/es_objects.hpp | 57 ------------------- libraries/utilities/elasticsearch.cpp | 2 + 3 files changed, 3 insertions(+), 59 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 93a4687f36..791df773df 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -254,10 +254,9 @@ void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_n bulk_header["_id"] = string(blockchain_object.id); } - adaptor_struct adaptor; fc::variant blockchain_object_variant; fc::to_variant( blockchain_object, blockchain_object_variant, GRAPHENE_NET_MAX_NESTED_OBJECTS ); - fc::mutable_variant_object o = adaptor.adapt(blockchain_object_variant.get_object()); + fc::mutable_variant_object o( utilities::es_data_adaptor::adapt( blockchain_object_variant.get_object() ) ); o["object_id"] = string(blockchain_object.id); o["block_time"] = block_time; diff --git a/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp index dff4812498..cfae63d77b 100644 --- a/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp +++ b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp @@ -53,61 +53,4 @@ class es_objects_plugin : public graphene::app::plugin std::unique_ptr my; }; -struct adaptor_struct { - fc::mutable_variant_object adapt(const variant_object &obj) { - fc::mutable_variant_object o(obj); - vector keys_to_rename; - for (auto i = o.begin(); i != o.end(); ++i) { - auto &element = (*i).value(); - if (element.is_object()) { - const string &name = (*i).key(); - auto &vo = element.get_object(); - if (vo.contains(name.c_str())) - keys_to_rename.emplace_back(name); - element = adapt(vo); - } else if (element.is_array()) - adapt(element.get_array()); - } - for (const auto &i : keys_to_rename) { - string new_name = i + "_"; - o[new_name] = variant(o[i]); - o.erase(i); - } - if (o.find("owner") != o.end() && o["owner"].is_string()) - { - o["owner_"] = o["owner"].as_string(); - o.erase("owner"); - } - if (o.find("active_special_authority") != o.end()) - { - o["active_special_authority"] = fc::json::to_string(o["active_special_authority"]); - } - if (o.find("owner_special_authority") != o.end()) - { - o["owner_special_authority"] = fc::json::to_string(o["owner_special_authority"]); - } - if (o.find("feeds") != o.end()) - { - o["feeds"] = fc::json::to_string(o["feeds"]); - } - if (o.find("operations") != o.end()) - { - o["operations"] = fc::json::to_string(o["operations"]); - } - - return o; - } - - void adapt(fc::variants &v) { - for (auto &array_element : v) { - if (array_element.is_object()) - array_element = adapt(array_element.get_object()); - else if (array_element.is_array()) - adapt(array_element.get_array()); - else - array_element = array_element.as_string(); - } - } -}; - } } //graphene::es_objects diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index b6b6182d37..756d7c0352 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -235,11 +235,13 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op { "proposed_ops", data_type::array_type }, + { "operations", data_type::array_type }, // proposal_object.operations { "initializer", data_type::static_variant_type }, { "policy", data_type::static_variant_type }, { "predicates", data_type::array_type }, { "active_special_authority", data_type::static_variant_type }, { "owner_special_authority", data_type::static_variant_type }, + { "feeds", data_type::map_type }, // asset_bitasset_data_object.feeds { "acceptable_collateral", data_type::map_type }, { "acceptable_borrowers", data_type::map_type } }; From 67e6733569b0c571fff441718fb4f9da193ae0e1 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 24 Dec 2021 23:59:35 +0000 Subject: [PATCH 068/338] Try to fix genesis data process in es_objects --- libraries/plugins/es_objects/es_objects.cpp | 63 ++++++++++----------- 1 file changed, 29 insertions(+), 34 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 791df773df..f7d4263b62 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -54,6 +54,7 @@ class es_objects_plugin_impl void remove_from_database(object_id_type id, std::string index); friend class graphene::es_objects::es_objects_plugin; + friend struct genesis_inserter; private: es_objects_plugin& _self; @@ -85,6 +86,31 @@ class es_objects_plugin_impl void init_program_options(const boost::program_options::variables_map& options); }; +struct genesis_inserter +{ + template + static void insert_genesis_objects( bool b, const string& prefix, es_objects_plugin_impl* my, DB& db ) + { + if( !b ) + return; + + db.get_index( SpaceID, TypeID ).inspect_all_objects( [my, &db, &prefix](const graphene::db::object &o) { + auto a = static_cast(&o); + my->prepareTemplate(*a, prefix); + }); + + graphene::utilities::ES es; + es.curl = my->curl; + es.bulk_lines = my->bulk; + es.elasticsearch_url = my->_es_objects_elasticsearch_url; + es.auth = my->_es_objects_auth; + if (!graphene::utilities::SendBulk(std::move(es))) + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error inserting genesis data."); + else + my->bulk.clear(); + } +}; + bool es_objects_plugin_impl::genesis() { ilog("elasticsearch OBJECTS: inserting data from genesis"); @@ -94,40 +120,9 @@ bool es_objects_plugin_impl::genesis() block_number = db.head_block_num(); block_time = db.head_block_time(); - if (_es_objects_accounts) { - auto &index_accounts = db.get_index(1, 2); - index_accounts.inspect_all_objects([this, &db](const graphene::db::object &o) { - auto obj = db.find_object(o.id); - auto a = static_cast(obj); - prepareTemplate(*a, "account"); - }); - } - if (_es_objects_assets) { - auto &index_assets = db.get_index(1, 3); - index_assets.inspect_all_objects([this, &db](const graphene::db::object &o) { - auto obj = db.find_object(o.id); - auto a = static_cast(obj); - prepareTemplate(*a, "asset"); - }); - } - if (_es_objects_balances) { - auto &index_balances = db.get_index(2, 5); - index_balances.inspect_all_objects([this, &db](const graphene::db::object &o) { - auto obj = db.find_object(o.id); - auto b = static_cast(obj); - prepareTemplate(*b, "balance"); - }); - } - - graphene::utilities::ES es; - es.curl = curl; - es.bulk_lines = bulk; - es.elasticsearch_url = _es_objects_elasticsearch_url; - es.auth = _es_objects_auth; - if (!graphene::utilities::SendBulk(std::move(es))) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error inserting genesis data."); - else - bulk.clear(); + genesis_inserter::insert_genesis_objects<1,2,account_object >( _es_objects_accounts, "account", this, db ); + genesis_inserter::insert_genesis_objects<1,3,asset_object >( _es_objects_assets, "asset", this, db ); + genesis_inserter::insert_genesis_objects<2,5,account_balance_object>( _es_objects_balances, "balance", this, db ); return true; } From 952a326895dff096e2ad8e91ab75264e275314b5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 25 Dec 2021 00:31:52 +0000 Subject: [PATCH 069/338] Insert genesis objects into ES in smaller bulks --- libraries/plugins/es_objects/es_objects.cpp | 25 ++++++++++++--------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index f7d4263b62..833fb25761 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -62,6 +62,7 @@ class es_objects_plugin_impl std::string _es_objects_auth = ""; uint32_t _es_objects_bulk_replay = 10000; uint32_t _es_objects_bulk_sync = 100; + uint32_t limit_documents = _es_objects_bulk_replay; bool _es_objects_proposals = true; bool _es_objects_accounts = true; bool _es_objects_assets = true; @@ -98,16 +99,6 @@ struct genesis_inserter auto a = static_cast(&o); my->prepareTemplate(*a, prefix); }); - - graphene::utilities::ES es; - es.curl = my->curl; - es.bulk_lines = my->bulk; - es.elasticsearch_url = my->_es_objects_elasticsearch_url; - es.auth = my->_es_objects_auth; - if (!graphene::utilities::SendBulk(std::move(es))) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error inserting genesis data."); - else - my->bulk.clear(); } }; @@ -137,7 +128,6 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s if(block_number > _es_objects_start_es_after_block) { // check if we are in replay or in sync and change number of bulk documents accordingly - uint32_t limit_documents = 0; if ((fc::time_point::now() - block_time) < fc::seconds(30)) limit_documents = _es_objects_bulk_sync; else @@ -262,6 +252,19 @@ void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_n prepare = graphene::utilities::createBulk(bulk_header, std::move(data)); std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); prepare.clear(); + + if( curl && bulk.size() >= limit_documents ) // send data to elasticsearch when bulk is too large + { + graphene::utilities::ES es; + es.curl = curl; + es.bulk_lines = bulk; + es.elasticsearch_url = _es_objects_elasticsearch_url; + es.auth = _es_objects_auth; + if (!graphene::utilities::SendBulk(std::move(es))) + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); + else + bulk.clear(); + } } es_objects_plugin_impl::~es_objects_plugin_impl() From 4f4f8a41ada4e3de702ccf2fb2de63084cb3adb6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 25 Dec 2021 00:45:21 +0000 Subject: [PATCH 070/338] Insert bitasset data in genesis into ES --- libraries/plugins/es_objects/es_objects.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 833fb25761..339bef138c 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -90,7 +90,7 @@ class es_objects_plugin_impl struct genesis_inserter { template - static void insert_genesis_objects( bool b, const string& prefix, es_objects_plugin_impl* my, DB& db ) + static void insert( bool b, const string& prefix, es_objects_plugin_impl* my, DB& db ) { if( !b ) return; @@ -111,9 +111,10 @@ bool es_objects_plugin_impl::genesis() block_number = db.head_block_num(); block_time = db.head_block_time(); - genesis_inserter::insert_genesis_objects<1,2,account_object >( _es_objects_accounts, "account", this, db ); - genesis_inserter::insert_genesis_objects<1,3,asset_object >( _es_objects_assets, "asset", this, db ); - genesis_inserter::insert_genesis_objects<2,5,account_balance_object>( _es_objects_balances, "balance", this, db ); + genesis_inserter::insert<1,2,account_object >( _es_objects_accounts, "account", this, db ); + genesis_inserter::insert<1,3,asset_object >( _es_objects_assets, "asset", this, db ); + genesis_inserter::insert<2,4,asset_bitasset_data_object >( _es_objects_asset_bitasset, "bitasset", this, db ); + genesis_inserter::insert<2,5,account_balance_object >( _es_objects_balances, "balance", this, db ); return true; } From 695187c031f8afbb8045c60a3c9baa360421641a Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 25 Dec 2021 13:42:04 +0000 Subject: [PATCH 071/338] Move program options of es_objects into a struct --- libraries/plugins/es_objects/es_objects.cpp | 113 ++++++++++++-------- 1 file changed, 69 insertions(+), 44 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 339bef138c..3e70e28688 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -57,25 +57,34 @@ class es_objects_plugin_impl friend struct genesis_inserter; private: + struct plugin_options + { + std::string _es_objects_elasticsearch_url = "http://localhost:9200/"; + std::string _es_objects_auth = ""; + uint32_t _es_objects_bulk_replay = 10000; + uint32_t _es_objects_bulk_sync = 100; + bool _es_objects_proposals = true; + bool _es_objects_accounts = true; + bool _es_objects_assets = true; + bool _es_objects_balances = true; + bool _es_objects_limit_orders = false; + bool _es_objects_asset_bitasset = true; + std::string _es_objects_index_prefix = "objects-"; + uint32_t _es_objects_start_es_after_block = 0; + bool _es_objects_keep_only_current = true; + + void init(const boost::program_options::variables_map& options); + }; + es_objects_plugin& _self; - std::string _es_objects_elasticsearch_url = "http://localhost:9200/"; - std::string _es_objects_auth = ""; - uint32_t _es_objects_bulk_replay = 10000; - uint32_t _es_objects_bulk_sync = 100; - uint32_t limit_documents = _es_objects_bulk_replay; - bool _es_objects_proposals = true; - bool _es_objects_accounts = true; - bool _es_objects_assets = true; - bool _es_objects_balances = true; - bool _es_objects_limit_orders = false; - bool _es_objects_asset_bitasset = true; - std::string _es_objects_index_prefix = "objects-"; - uint32_t _es_objects_start_es_after_block = 0; + plugin_options _options; + + uint32_t limit_documents = _options._es_objects_bulk_replay; + CURL *curl; // curl handler - vector bulk; + vector bulk; vector prepare; - bool _es_objects_keep_only_current = true; uint32_t block_number; fc::time_point_sec block_time; @@ -89,13 +98,22 @@ class es_objects_plugin_impl struct genesis_inserter { - template - static void insert( bool b, const string& prefix, es_objects_plugin_impl* my, DB& db ) + es_objects_plugin_impl* my; + graphene::chain::database &db; + + explicit genesis_inserter( es_objects_plugin_impl* _my ) + : my(_my), db( my->_self.database() ) + { // Nothing to do + } + + template + void insert( bool b, const string& prefix ) { if( !b ) return; - db.get_index( SpaceID, TypeID ).inspect_all_objects( [my, &db, &prefix](const graphene::db::object &o) { + db.get_index( ObjType::space_id, ObjType::type_id ).inspect_all_objects( + [this, &prefix](const graphene::db::object &o) { auto a = static_cast(&o); my->prepareTemplate(*a, prefix); }); @@ -111,10 +129,12 @@ bool es_objects_plugin_impl::genesis() block_number = db.head_block_num(); block_time = db.head_block_time(); - genesis_inserter::insert<1,2,account_object >( _es_objects_accounts, "account", this, db ); - genesis_inserter::insert<1,3,asset_object >( _es_objects_assets, "asset", this, db ); - genesis_inserter::insert<2,4,asset_bitasset_data_object >( _es_objects_asset_bitasset, "bitasset", this, db ); - genesis_inserter::insert<2,5,account_balance_object >( _es_objects_balances, "balance", this, db ); + genesis_inserter inserter( this ); + + inserter.insert( _options._es_objects_accounts, "account" ); + inserter.insert( _options._es_objects_assets, "asset" ); + inserter.insert( _options._es_objects_asset_bitasset, "bitasset" ); + inserter.insert( _options._es_objects_balances, "balance" ); return true; } @@ -126,17 +146,17 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s block_time = db.head_block_time(); block_number = db.head_block_num(); - if(block_number > _es_objects_start_es_after_block) { + if(block_number > _options._es_objects_start_es_after_block) { // check if we are in replay or in sync and change number of bulk documents accordingly if ((fc::time_point::now() - block_time) < fc::seconds(30)) - limit_documents = _es_objects_bulk_sync; + limit_documents = _options._es_objects_bulk_sync; else - limit_documents = _es_objects_bulk_replay; + limit_documents = _options._es_objects_bulk_replay; for (auto const &value: ids) { - if (value.is() && _es_objects_proposals) { + if (value.is() && _options._es_objects_proposals) { auto obj = db.find_object(value); auto p = static_cast(obj); if (p != nullptr) { @@ -145,7 +165,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s else prepareTemplate(*p, "proposal"); } - } else if (value.is() && _es_objects_accounts) { + } else if (value.is() && _options._es_objects_accounts) { auto obj = db.find_object(value); auto a = static_cast(obj); if (a != nullptr) { @@ -154,7 +174,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s else prepareTemplate(*a, "account"); } - } else if (value.is() && _es_objects_assets) { + } else if (value.is() && _options._es_objects_assets) { auto obj = db.find_object(value); auto a = static_cast(obj); if (a != nullptr) { @@ -163,7 +183,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s else prepareTemplate(*a, "asset"); } - } else if (value.is() && _es_objects_balances) { + } else if (value.is() && _options._es_objects_balances) { auto obj = db.find_object(value); auto b = static_cast(obj); if (b != nullptr) { @@ -172,7 +192,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s else prepareTemplate(*b, "balance"); } - } else if (value.is() && _es_objects_limit_orders) { + } else if (value.is() && _options._es_objects_limit_orders) { auto obj = db.find_object(value); auto l = static_cast(obj); if (l != nullptr) { @@ -181,7 +201,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s else prepareTemplate(*l, "limitorder"); } - } else if (value.is() && _es_objects_asset_bitasset) { + } else if (value.is() && _options._es_objects_asset_bitasset) { auto obj = db.find_object(value); auto ba = static_cast(obj); if (ba != nullptr) { @@ -198,8 +218,8 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s graphene::utilities::ES es; es.curl = curl; es.bulk_lines = bulk; - es.elasticsearch_url = _es_objects_elasticsearch_url; - es.auth = _es_objects_auth; + es.elasticsearch_url = _options._es_objects_elasticsearch_url; + es.auth = _options._es_objects_auth; if (!graphene::utilities::SendBulk(std::move(es))) return false; @@ -213,11 +233,11 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s void es_objects_plugin_impl::remove_from_database( object_id_type id, std::string index) { - if(_es_objects_keep_only_current) + if(_options._es_objects_keep_only_current) { fc::mutable_variant_object delete_line; delete_line["_id"] = string(id); - delete_line["_index"] = _es_objects_index_prefix + index; + delete_line["_index"] = _options._es_objects_index_prefix + index; if(!is_es_version_7_or_above) delete_line["_type"] = "_doc"; fc::mutable_variant_object final_delete_line; @@ -232,10 +252,10 @@ template void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_name) { fc::mutable_variant_object bulk_header; - bulk_header["_index"] = _es_objects_index_prefix + index_name; + bulk_header["_index"] = _options._es_objects_index_prefix + index_name; if(!is_es_version_7_or_above) bulk_header["_type"] = "_doc"; - if(_es_objects_keep_only_current) + if(_options._es_objects_keep_only_current) { bulk_header["_id"] = string(blockchain_object.id); } @@ -259,8 +279,8 @@ void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_n graphene::utilities::ES es; es.curl = curl; es.bulk_lines = bulk; - es.elasticsearch_url = _es_objects_elasticsearch_url; - es.auth = _es_objects_auth; + es.elasticsearch_url = _options._es_objects_elasticsearch_url; + es.auth = _options._es_objects_auth; if (!graphene::utilities::SendBulk(std::move(es))) FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); else @@ -326,6 +346,11 @@ void es_objects_plugin::plugin_set_program_options( } void detail::es_objects_plugin_impl::init_program_options(const boost::program_options::variables_map& options) +{ + _options.init( options ); +} + +void detail::es_objects_plugin_impl::plugin_options::init(const boost::program_options::variables_map& options) { if (options.count("es-objects-elasticsearch-url") > 0) { _es_objects_elasticsearch_url = options["es-objects-elasticsearch-url"].as(); @@ -373,7 +398,7 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable my->init_program_options( options ); database().applied_block.connect([this](const signed_block &b) { - if(b.block_num() == 1 && my->_es_objects_start_es_after_block == 0) { + if( 1U == b.block_num() && 0 == my->_options._es_objects_start_es_after_block ) { if (!my->genesis()) FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error populating genesis data."); } @@ -405,12 +430,12 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable graphene::utilities::ES es; es.curl = my->curl; - es.elasticsearch_url = my->_es_objects_elasticsearch_url; - es.auth = my->_es_objects_auth; - es.auth = my->_es_objects_index_prefix; + es.elasticsearch_url = my->_options._es_objects_elasticsearch_url; + es.auth = my->_options._es_objects_auth; + es.auth = my->_options._es_objects_index_prefix; if(!graphene::utilities::checkES(es)) - FC_THROW( "ES database is not up in url ${url}", ("url", my->_es_objects_elasticsearch_url) ); + FC_THROW( "ES database is not up in url ${url}", ("url", my->_options._es_objects_elasticsearch_url) ); graphene::utilities::checkESVersion7OrAbove(es, my->is_es_version_7_or_above); } From b02337e2b0dd3b5c2af13268bd0bd3bd3ea6e4dc Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 25 Dec 2021 22:15:31 +0000 Subject: [PATCH 072/338] Adapt htlc_preimage_hash as an object for ES --- libraries/utilities/elasticsearch.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 756d7c0352..75c4abe5cf 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -241,6 +241,7 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) { "predicates", data_type::array_type }, { "active_special_authority", data_type::static_variant_type }, { "owner_special_authority", data_type::static_variant_type }, + { "htlc_preimage_hash", data_type::static_variant_type }, { "feeds", data_type::map_type }, // asset_bitasset_data_object.feeds { "acceptable_collateral", data_type::map_type }, { "acceptable_borrowers", data_type::map_type } @@ -353,7 +354,8 @@ void es_data_adaptor::extract_data_from_variant( mv[prefix + "_string"] = v.get_string(); else mv[prefix + "_string"] = fc::json::to_string( v ); - // Note: we don't use double or array here, and we convert null and blob to string + // Note: we don't use double or array here, and we convert null and blob to string, + // and static_variants (i.e. in custom authorities) and maps (if any) are converted to strings too. } fc::variant es_data_adaptor::adapt_map_item( const fc::variants& v ) From 42be3bee44b4ddcca050f240682c9a3d370fb4b6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 26 Dec 2021 17:30:46 +0000 Subject: [PATCH 073/338] Refactor es_object plugin code for performance --- libraries/plugins/es_objects/es_objects.cpp | 76 +++++++++++---------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 3e70e28688..134a7ec2eb 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -49,14 +49,30 @@ class es_objects_plugin_impl } virtual ~es_objects_plugin_impl(); - bool index_database(const vector& ids, std::string action); - bool genesis(); - void remove_from_database(object_id_type id, std::string index); - + private: friend class graphene::es_objects::es_objects_plugin; friend struct genesis_inserter; - private: + enum class action_type + { + insertion, + update, + deletion + }; + + bool on_objects_create(const vector& ids) + { return index_database( ids, action_type::insertion ); } + + bool on_objects_update(const vector& ids) + { return index_database( ids, action_type::update ); } + + bool on_objects_delete(const vector& ids) + { return index_database( ids, action_type::deletion ); } + + bool index_database(const vector& ids, action_type action); + bool genesis(); + void remove_from_database(object_id_type id, std::string index); + struct plugin_options { std::string _es_objects_elasticsearch_url = "http://localhost:9200/"; @@ -91,7 +107,7 @@ class es_objects_plugin_impl bool is_es_version_7_or_above = true; template - void prepareTemplate(T blockchain_object, string index_name); + void prepareTemplate(const T& blockchain_object, string index_name); void init_program_options(const boost::program_options::variables_map& options); }; @@ -114,8 +130,7 @@ struct genesis_inserter db.get_index( ObjType::space_id, ObjType::type_id ).inspect_all_objects( [this, &prefix](const graphene::db::object &o) { - auto a = static_cast(&o); - my->prepareTemplate(*a, prefix); + my->prepareTemplate( static_cast(o), prefix); }); } }; @@ -139,7 +154,7 @@ bool es_objects_plugin_impl::genesis() return true; } -bool es_objects_plugin_impl::index_database(const vector& ids, std::string action) +bool es_objects_plugin_impl::index_database(const vector& ids, action_type action) { graphene::chain::database &db = _self.database(); @@ -160,7 +175,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s auto obj = db.find_object(value); auto p = static_cast(obj); if (p != nullptr) { - if (action == "delete") + if( action_type::deletion == action ) remove_from_database(p->id, "proposal"); else prepareTemplate(*p, "proposal"); @@ -169,7 +184,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s auto obj = db.find_object(value); auto a = static_cast(obj); if (a != nullptr) { - if (action == "delete") + if( action_type::deletion == action ) remove_from_database(a->id, "account"); else prepareTemplate(*a, "account"); @@ -178,7 +193,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s auto obj = db.find_object(value); auto a = static_cast(obj); if (a != nullptr) { - if (action == "delete") + if( action_type::deletion == action ) remove_from_database(a->id, "asset"); else prepareTemplate(*a, "asset"); @@ -187,7 +202,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s auto obj = db.find_object(value); auto b = static_cast(obj); if (b != nullptr) { - if (action == "delete") + if( action_type::deletion == action ) remove_from_database(b->id, "balance"); else prepareTemplate(*b, "balance"); @@ -196,7 +211,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s auto obj = db.find_object(value); auto l = static_cast(obj); if (l != nullptr) { - if (action == "delete") + if( action_type::deletion == action ) remove_from_database(l->id, "limitorder"); else prepareTemplate(*l, "limitorder"); @@ -205,7 +220,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s auto obj = db.find_object(value); auto ba = static_cast(obj); if (ba != nullptr) { - if (action == "delete") + if( action_type::deletion == action ) remove_from_database(ba->id, "bitasset"); else prepareTemplate(*ba, "bitasset"); @@ -222,7 +237,7 @@ bool es_objects_plugin_impl::index_database(const vector& ids, s es.auth = _options._es_objects_auth; if (!graphene::utilities::SendBulk(std::move(es))) - return false; + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); else bulk.clear(); } @@ -249,7 +264,7 @@ void es_objects_plugin_impl::remove_from_database( object_id_type id, std::strin } template -void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_name) +void es_objects_plugin_impl::prepareTemplate(const T& blockchain_object, string index_name) { fc::mutable_variant_object bulk_header; bulk_header["_index"] = _options._es_objects_index_prefix + index_name; @@ -399,33 +414,20 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable database().applied_block.connect([this](const signed_block &b) { if( 1U == b.block_num() && 0 == my->_options._es_objects_start_es_after_block ) { - if (!my->genesis()) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error populating genesis data."); + my->genesis(); } }); database().new_objects.connect([this]( const vector& ids, - const flat_set& impacted_accounts ) { - if(!my->index_database(ids, "create")) - { - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, - "Error creating object from ES database, we are going to keep trying."); - } + const flat_set& ) { + my->on_objects_create( ids ); }); database().changed_objects.connect([this]( const vector& ids, - const flat_set& impacted_accounts ) { - if(!my->index_database(ids, "update")) - { - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, - "Error updating object from ES database, we are going to keep trying."); - } + const flat_set& ) { + my->on_objects_update( ids ); }); database().removed_objects.connect([this](const vector& ids, - const vector& objs, const flat_set& impacted_accounts) { - if(!my->index_database(ids, "delete")) - { - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, - "Error deleting object from ES database, we are going to keep trying."); - } + const vector&, const flat_set& ) { + my->on_objects_delete( ids ); }); graphene::utilities::ES es; From 82fca305f2a4f2de8b60abdb0e46a7496edc5bd4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 26 Dec 2021 17:49:04 +0000 Subject: [PATCH 074/338] Fix code smells --- libraries/chain/db_notify.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chain/db_notify.cpp b/libraries/chain/db_notify.cpp index 8f33f8a15d..37b5dd3873 100644 --- a/libraries/chain/db_notify.cpp +++ b/libraries/chain/db_notify.cpp @@ -599,7 +599,7 @@ void database::notify_changed_objects() MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time)); } - if( new_ids.size() ) + if( !new_ids.empty() ) GRAPHENE_TRY_NOTIFY( new_objects, new_ids, new_accounts_impacted) } @@ -616,7 +616,7 @@ void database::notify_changed_objects() MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time)); } - if( changed_ids.size() ) + if( !changed_ids.empty() ) GRAPHENE_TRY_NOTIFY( changed_objects, changed_ids, changed_accounts_impacted) } @@ -637,7 +637,7 @@ void database::notify_changed_objects() MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time)); } - if( removed_ids.size() ) + if( !removed_ids.empty() ) GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted ) } } From 7e969e382e6b9402fe1b35bd99521c7ff6d88cca Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 26 Dec 2021 19:06:10 +0000 Subject: [PATCH 075/338] Fix data deletion in es_objects plugin --- libraries/plugins/es_objects/es_objects.cpp | 147 ++++++++------------ 1 file changed, 58 insertions(+), 89 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 134a7ec2eb..33491792b3 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -60,17 +60,17 @@ class es_objects_plugin_impl deletion }; - bool on_objects_create(const vector& ids) - { return index_database( ids, action_type::insertion ); } + void on_objects_create(const vector& ids) + { index_database( ids, action_type::insertion ); } - bool on_objects_update(const vector& ids) - { return index_database( ids, action_type::update ); } + void on_objects_update(const vector& ids) + { index_database( ids, action_type::update ); } - bool on_objects_delete(const vector& ids) - { return index_database( ids, action_type::deletion ); } + void on_objects_delete(const vector& ids) + { index_database( ids, action_type::deletion ); } - bool index_database(const vector& ids, action_type action); - bool genesis(); + void index_database(const vector& ids, action_type action); + void genesis(); void remove_from_database(object_id_type id, std::string index); struct plugin_options @@ -110,6 +110,8 @@ class es_objects_plugin_impl void prepareTemplate(const T& blockchain_object, string index_name); void init_program_options(const boost::program_options::variables_map& options); + + void send_bulk_if_ready(); }; struct genesis_inserter @@ -135,7 +137,7 @@ struct genesis_inserter } }; -bool es_objects_plugin_impl::genesis() +void es_objects_plugin_impl::genesis() { ilog("elasticsearch OBJECTS: inserting data from genesis"); @@ -150,100 +152,60 @@ bool es_objects_plugin_impl::genesis() inserter.insert( _options._es_objects_assets, "asset" ); inserter.insert( _options._es_objects_asset_bitasset, "bitasset" ); inserter.insert( _options._es_objects_balances, "balance" ); - - return true; } -bool es_objects_plugin_impl::index_database(const vector& ids, action_type action) +void es_objects_plugin_impl::index_database(const vector& ids, action_type action) { graphene::chain::database &db = _self.database(); - block_time = db.head_block_time(); block_number = db.head_block_num(); - if(block_number > _options._es_objects_start_es_after_block) { + if( block_number <= _options._es_objects_start_es_after_block ) + return; - // check if we are in replay or in sync and change number of bulk documents accordingly - if ((fc::time_point::now() - block_time) < fc::seconds(30)) - limit_documents = _options._es_objects_bulk_sync; - else - limit_documents = _options._es_objects_bulk_replay; - - - for (auto const &value: ids) { - if (value.is() && _options._es_objects_proposals) { - auto obj = db.find_object(value); - auto p = static_cast(obj); - if (p != nullptr) { - if( action_type::deletion == action ) - remove_from_database(p->id, "proposal"); - else - prepareTemplate(*p, "proposal"); - } - } else if (value.is() && _options._es_objects_accounts) { - auto obj = db.find_object(value); - auto a = static_cast(obj); - if (a != nullptr) { - if( action_type::deletion == action ) - remove_from_database(a->id, "account"); - else - prepareTemplate(*a, "account"); - } - } else if (value.is() && _options._es_objects_assets) { - auto obj = db.find_object(value); - auto a = static_cast(obj); - if (a != nullptr) { - if( action_type::deletion == action ) - remove_from_database(a->id, "asset"); - else - prepareTemplate(*a, "asset"); - } - } else if (value.is() && _options._es_objects_balances) { - auto obj = db.find_object(value); - auto b = static_cast(obj); - if (b != nullptr) { - if( action_type::deletion == action ) - remove_from_database(b->id, "balance"); - else - prepareTemplate(*b, "balance"); - } - } else if (value.is() && _options._es_objects_limit_orders) { - auto obj = db.find_object(value); - auto l = static_cast(obj); - if (l != nullptr) { - if( action_type::deletion == action ) - remove_from_database(l->id, "limitorder"); - else - prepareTemplate(*l, "limitorder"); - } - } else if (value.is() && _options._es_objects_asset_bitasset) { - auto obj = db.find_object(value); - auto ba = static_cast(obj); - if (ba != nullptr) { - if( action_type::deletion == action ) - remove_from_database(ba->id, "bitasset"); - else - prepareTemplate(*ba, "bitasset"); - } - } - } - - if (curl && bulk.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech + block_time = db.head_block_time(); - graphene::utilities::ES es; - es.curl = curl; - es.bulk_lines = bulk; - es.elasticsearch_url = _options._es_objects_elasticsearch_url; - es.auth = _options._es_objects_auth; + // check if we are in replay or in sync and change number of bulk documents accordingly + if( (fc::time_point::now() - block_time) < fc::seconds(30) ) + limit_documents = _options._es_objects_bulk_sync; + else + limit_documents = _options._es_objects_bulk_replay; - if (!graphene::utilities::SendBulk(std::move(es))) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); + for( auto const &value: ids ) + { + if( value.is() && _options._es_objects_balances ) { + if( action_type::deletion == action ) + remove_from_database( value, "balance" ); + else + prepareTemplate( account_balance_id_type(value)(db), "balance" ); + } else if( value.is() && _options._es_objects_limit_orders ) { + if( action_type::deletion == action ) + remove_from_database( value, "limitorder" ); else - bulk.clear(); + prepareTemplate( limit_order_id_type(value)(db), "limitorder" ); + } else if( value.is() && _options._es_objects_asset_bitasset) { + if( action_type::deletion == action ) + remove_from_database( value, "bitasset" ); + else + prepareTemplate( asset_bitasset_data_id_type(value)(db), "bitasset" ); + } else if( value.is() && _options._es_objects_assets ) { + if( action_type::deletion == action ) + remove_from_database( value, "asset" ); + else + prepareTemplate( asset_id_type(value)(db), "asset" ); + } else if( value.is() && _options._es_objects_accounts ) { + if( action_type::deletion == action ) + remove_from_database( value, "account" ); + else + prepareTemplate( account_id_type(value)(db), "account" ); + } else if( value.is() && _options._es_objects_proposals ) { + if( action_type::deletion == action ) + remove_from_database( value, "proposal" ); + else + prepareTemplate( proposal_id_type(value)(db), "proposal" ); } } - return true; } void es_objects_plugin_impl::remove_from_database( object_id_type id, std::string index) @@ -260,6 +222,8 @@ void es_objects_plugin_impl::remove_from_database( object_id_type id, std::strin prepare.push_back(fc::json::to_string(final_delete_line)); std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); prepare.clear(); + + send_bulk_if_ready(); } } @@ -289,6 +253,11 @@ void es_objects_plugin_impl::prepareTemplate(const T& blockchain_object, string std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); prepare.clear(); + send_bulk_if_ready(); +} + +void es_objects_plugin_impl::send_bulk_if_ready() +{ if( curl && bulk.size() >= limit_documents ) // send data to elasticsearch when bulk is too large { graphene::utilities::ES es; From b1e4ad277e17cde1d3f0b24388332ec4281bda41 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 10:19:26 +0000 Subject: [PATCH 076/338] Fix code smells by passing objects by reference --- libraries/plugins/es_objects/es_objects.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 33491792b3..cb3800760a 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -71,7 +71,7 @@ class es_objects_plugin_impl void index_database(const vector& ids, action_type action); void genesis(); - void remove_from_database(object_id_type id, std::string index); + void remove_from_database(const object_id_type& id, const std::string& index); struct plugin_options { @@ -107,7 +107,7 @@ class es_objects_plugin_impl bool is_es_version_7_or_above = true; template - void prepareTemplate(const T& blockchain_object, string index_name); + void prepareTemplate(const T& blockchain_object, const string& index_name); void init_program_options(const boost::program_options::variables_map& options); @@ -208,7 +208,7 @@ void es_objects_plugin_impl::index_database(const vector& ids, a } -void es_objects_plugin_impl::remove_from_database( object_id_type id, std::string index) +void es_objects_plugin_impl::remove_from_database( const object_id_type& id, const std::string& index) { if(_options._es_objects_keep_only_current) { @@ -228,7 +228,7 @@ void es_objects_plugin_impl::remove_from_database( object_id_type id, std::strin } template -void es_objects_plugin_impl::prepareTemplate(const T& blockchain_object, string index_name) +void es_objects_plugin_impl::prepareTemplate(const T& blockchain_object, const string& index_name) { fc::mutable_variant_object bulk_header; bulk_header["_index"] = _options._es_objects_index_prefix + index_name; From 0ab4dcd8cf52d320f205762e989de048f0d74850 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 12:05:25 +0000 Subject: [PATCH 077/338] Slightly refactor code to improve performance --- libraries/utilities/elasticsearch.cpp | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 75c4abe5cf..2f47e719bf 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -222,16 +222,13 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) fc::mutable_variant_object o(op); // Note: these fields are maps, but were stored in ES as flattened arrays - static const std::map> flattened_fields = { - { "account_auths", data_type::map_type }, - { "address_auths", data_type::map_type }, - { "key_auths", data_type::map_type } - }; + static const std::unordered_set flattened_fields = { "account_auths", "address_auths", "key_auths" }; + // Note: // object arrays listed in this map are stored redundantly in ES, with one instance as a nested object and // the other as a string for backward compatibility, // object arrays not listed in this map are stored as nested objects only. - static const std::map> to_string_fields = { + static const std::unordered_map to_string_fields = { { "parameters", data_type::array_type }, // in committee proposals, current_fees.parameters { "op", data_type::static_variant_type }, // proposal_create_op.proposed_ops[*].op { "proposed_ops", data_type::array_type }, @@ -246,7 +243,7 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) { "acceptable_collateral", data_type::map_type }, { "acceptable_borrowers", data_type::map_type } }; - std::map> original_arrays; + std::vector> original_arrays; std::vector keys_to_rename; for( auto& i : o ) { @@ -265,14 +262,14 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) if( to_string_fields.find(name) != to_string_fields.end() ) { // make a backup and convert to string - original_arrays[name] = array; + original_arrays.emplace_back( std::make_pair( name, array ) ); element = fc::json::to_string(element); } else if( flattened_fields.find(name) != flattened_fields.end() ) { // make a backup and adapt the original auto backup = array; - original_arrays[name] = backup; + original_arrays.emplace_back( std::make_pair( name, backup ) ); adapt(array); } else From 9c8c927fc0f02500e495b37492e5d4c086831664 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 14:50:53 +0000 Subject: [PATCH 078/338] Fix duplicate code --- libraries/plugins/es_objects/es_objects.cpp | 50 +++++++------------ .../include/graphene/protocol/object_id.hpp | 14 +++--- 2 files changed, 26 insertions(+), 38 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index cb3800760a..65f5dec126 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -171,39 +171,25 @@ void es_objects_plugin_impl::index_database(const vector& ids, a else limit_documents = _options._es_objects_bulk_replay; - for( auto const &value: ids ) + static const unordered_map> data_type_map = { + { account_id_type::space_type, { "account", _options._es_objects_accounts } }, + { account_balance_id_type::space_type, { "balance", _options._es_objects_balances } }, + { asset_id_type::space_type, { "asset", _options._es_objects_assets } }, + { asset_bitasset_data_id_type::space_type, { "bitasset", _options._es_objects_asset_bitasset } }, + { limit_order_id_type::space_type, { "limitorder", _options._es_objects_limit_orders } }, + { proposal_id_type::space_type, { "proposal", _options._es_objects_proposals } } + }; + + for( const auto& value: ids ) { - if( value.is() && _options._es_objects_balances ) { - if( action_type::deletion == action ) - remove_from_database( value, "balance" ); - else - prepareTemplate( account_balance_id_type(value)(db), "balance" ); - } else if( value.is() && _options._es_objects_limit_orders ) { - if( action_type::deletion == action ) - remove_from_database( value, "limitorder" ); - else - prepareTemplate( limit_order_id_type(value)(db), "limitorder" ); - } else if( value.is() && _options._es_objects_asset_bitasset) { - if( action_type::deletion == action ) - remove_from_database( value, "bitasset" ); - else - prepareTemplate( asset_bitasset_data_id_type(value)(db), "bitasset" ); - } else if( value.is() && _options._es_objects_assets ) { - if( action_type::deletion == action ) - remove_from_database( value, "asset" ); - else - prepareTemplate( asset_id_type(value)(db), "asset" ); - } else if( value.is() && _options._es_objects_accounts ) { - if( action_type::deletion == action ) - remove_from_database( value, "account" ); - else - prepareTemplate( account_id_type(value)(db), "account" ); - } else if( value.is() && _options._es_objects_proposals ) { - if( action_type::deletion == action ) - remove_from_database( value, "proposal" ); - else - prepareTemplate( proposal_id_type(value)(db), "proposal" ); - } + const auto itr = data_type_map.find( value.space_type() ); + if( itr == data_type_map.end() || !(itr->second.second) ) + continue; + const string& prefix = itr->second.first; + if( action_type::deletion == action ) + remove_from_database( value, prefix ); + else + prepareTemplate( db.get_object(value), prefix ); } } diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index b44f208be9..84843348cc 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -57,11 +57,7 @@ namespace graphene { namespace db { friend bool operator > ( const object_id_type& a, const object_id_type& b ) { return a.number > b.number; } object_id_type& operator++(int) { ++number; return *this; } - object_id_type& operator++() { ++number; return *this; } - friend object_id_type operator+(const object_id_type& a, int delta ) { - return object_id_type( a.space(), a.type(), a.instance() + delta ); - } friend object_id_type operator+(const object_id_type& a, int64_t delta ) { return object_id_type( a.space(), a.type(), a.instance() + delta ); } @@ -82,7 +78,7 @@ namespace graphene { namespace db { explicit operator std::string() const { - return fc::to_string(space()) + "." + fc::to_string(type()) + "." + fc::to_string(instance()); + return fc::to_string(space()) + "." + fc::to_string(type()) + "." + fc::to_string(instance()); } uint64_t number; @@ -110,6 +106,8 @@ namespace graphene { namespace db { static constexpr uint8_t space_id = SpaceID; static constexpr uint8_t type_id = TypeID; + static constexpr uint16_t space_type = (uint16_t(space_id) << 8) | (uint16_t(type_id)); + object_id() = default; object_id( unsigned_int i ):instance(i){} explicit object_id( uint64_t i ):instance(i) @@ -121,7 +119,6 @@ namespace graphene { namespace db { } friend object_id operator+(const object_id a, int64_t delta ) { return object_id( uint64_t(a.instance.value+delta) ); } - friend object_id operator+(const object_id a, int delta ) { return object_id( uint64_t(a.instance.value+delta) ); } operator object_id_type()const { return object_id_type( SpaceID, TypeID, instance.value ); } explicit operator uint64_t()const { return object_id_type( *this ).number; } @@ -145,6 +142,11 @@ namespace graphene { namespace db { friend size_t hash_value( object_id v ) { return std::hash()(v.instance.value); } + explicit operator std::string() const + { + return fc::to_string(space_id) + "." + fc::to_string(type_id) + "." + fc::to_string(instance.value); + } + unsigned_int instance; }; From d85c6f2932436faba439fbf3fd382e259ce18878 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 15:29:29 +0000 Subject: [PATCH 079/338] Explicitly specify object types in es_objects --- libraries/plugins/es_objects/es_objects.cpp | 26 ++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 65f5dec126..ef64856e81 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -189,7 +189,31 @@ void es_objects_plugin_impl::index_database(const vector& ids, a if( action_type::deletion == action ) remove_from_database( value, prefix ); else - prepareTemplate( db.get_object(value), prefix ); + { + switch( itr->first ) + { + case account_id_type::space_type: + prepareTemplate( db.get(value), prefix ); + break; + case account_balance_id_type::space_type: + prepareTemplate( db.get(value), prefix ); + break; + case asset_id_type::space_type: + prepareTemplate( db.get(value), prefix ); + break; + case asset_bitasset_data_id_type::space_type: + prepareTemplate( db.get(value), prefix ); + break; + case limit_order_id_type::space_type: + prepareTemplate( db.get(value), prefix ); + break; + case proposal_id_type::space_type: + prepareTemplate( db.get(value), prefix ); + break; + default: + break; + } + } } } From 69e4a1fd64a21474509eec0f20bfb2aaa2ecc04d Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 19:25:23 +0000 Subject: [PATCH 080/338] Fix undefined reference linker error (Linux debug) --- libraries/plugins/es_objects/es_objects.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index ef64856e81..4d6b6cad78 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -33,6 +33,11 @@ #include +namespace graphene { namespace db { + template + constexpr uint16_t object_id::space_type; +} }; + namespace graphene { namespace es_objects { namespace detail From 034d3d6c3e54099f8facbc2ede46668d199bb313 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 19:28:06 +0000 Subject: [PATCH 081/338] Simplify code --- libraries/protocol/include/graphene/protocol/object_id.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index 84843348cc..de590916dd 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -216,7 +216,7 @@ struct member_name, 0> { static constexpr const cha template void to_variant( const graphene::db::object_id& var, fc::variant& vo, uint32_t max_depth = 1 ) { - vo = fc::to_string(SpaceID) + "." + fc::to_string(TypeID) + "." + fc::to_string(var.instance.value); + vo = std::string( var ); } template void from_variant( const fc::variant& var, graphene::db::object_id& vo, uint32_t max_depth = 1 ) From 0218dd7b097655e70f9acb75255c3c02f1540f90 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 21:55:26 +0000 Subject: [PATCH 082/338] Keep prefix increment operator of object_id_type and remove the postfix increment operator --- libraries/protocol/include/graphene/protocol/object_id.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index de590916dd..56c0991e24 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -56,7 +56,7 @@ namespace graphene { namespace db { friend bool operator < ( const object_id_type& a, const object_id_type& b ) { return a.number < b.number; } friend bool operator > ( const object_id_type& a, const object_id_type& b ) { return a.number > b.number; } - object_id_type& operator++(int) { ++number; return *this; } + object_id_type& operator++() { ++number; return *this; } friend object_id_type operator+(const object_id_type& a, int64_t delta ) { return object_id_type( a.space(), a.type(), a.instance() + delta ); From e6818e1e5f589a5b7f6c031f503b789beb20b653 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 27 Dec 2021 22:55:34 +0000 Subject: [PATCH 083/338] Explicitly cast the result of the "<<" operator --- libraries/protocol/include/graphene/protocol/object_id.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index 56c0991e24..585ddc96d0 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -106,7 +106,7 @@ namespace graphene { namespace db { static constexpr uint8_t space_id = SpaceID; static constexpr uint8_t type_id = TypeID; - static constexpr uint16_t space_type = (uint16_t(space_id) << 8) | (uint16_t(type_id)); + static constexpr uint16_t space_type = uint16_t(uint16_t(space_id) << 8) | uint16_t(type_id); object_id() = default; object_id( unsigned_int i ):instance(i){} From 0b233a575052a34dfd772570c42c4a1840c12d1a Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 28 Dec 2021 11:29:22 +0000 Subject: [PATCH 084/338] Fix code smells --- libraries/utilities/elasticsearch.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 2f47e719bf..d7ed539360 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -262,14 +262,14 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) if( to_string_fields.find(name) != to_string_fields.end() ) { // make a backup and convert to string - original_arrays.emplace_back( std::make_pair( name, array ) ); + original_arrays.emplace_back( name, array ); element = fc::json::to_string(element); } else if( flattened_fields.find(name) != flattened_fields.end() ) { // make a backup and adapt the original auto backup = array; - original_arrays.emplace_back( std::make_pair( name, backup ) ); + original_arrays.emplace_back( name, backup ); adapt(array); } else From 8cc9e4cac02de11cf2b631e2d39032a6e7053430 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 28 Dec 2021 15:31:18 +0000 Subject: [PATCH 085/338] Deprecate keep-only-current, add dedicated options store-updates and no-delete for each applicable object --- libraries/plugins/es_objects/es_objects.cpp | 267 +++++++++++++------- 1 file changed, 169 insertions(+), 98 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 4d6b6cad78..c46b61fecd 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -58,6 +58,37 @@ class es_objects_plugin_impl friend class graphene::es_objects::es_objects_plugin; friend struct genesis_inserter; + struct plugin_options + { + struct object_options + { + object_options( bool e, bool su, bool nd, const string& in ) + : enabled(e), store_updates(su), no_delete(nd), index_name(in) + {} + + bool enabled = true; + bool store_updates = false; + bool no_delete = false; + string index_name = ""; + }; + std::string elasticsearch_url = "http://localhost:9200/"; + std::string auth = ""; + uint32_t bulk_replay = 10000; + uint32_t bulk_sync = 100; + + object_options proposals { true, false, true, "proposal" }; + object_options accounts { true, false, true, "account" }; + object_options assets { true, false, true, "asset" }; + object_options balances { true, false, true, "balance" }; + object_options limit_orders { true, false, false, "limitorder" }; + object_options asset_bitasset { true, false, true, "bitasset" }; + + std::string index_prefix = "objects-"; + uint32_t start_es_after_block = 0; + + void init(const boost::program_options::variables_map& options); + }; + enum class action_type { insertion, @@ -76,31 +107,12 @@ class es_objects_plugin_impl void index_database(const vector& ids, action_type action); void genesis(); - void remove_from_database(const object_id_type& id, const std::string& index); - - struct plugin_options - { - std::string _es_objects_elasticsearch_url = "http://localhost:9200/"; - std::string _es_objects_auth = ""; - uint32_t _es_objects_bulk_replay = 10000; - uint32_t _es_objects_bulk_sync = 100; - bool _es_objects_proposals = true; - bool _es_objects_accounts = true; - bool _es_objects_assets = true; - bool _es_objects_balances = true; - bool _es_objects_limit_orders = false; - bool _es_objects_asset_bitasset = true; - std::string _es_objects_index_prefix = "objects-"; - uint32_t _es_objects_start_es_after_block = 0; - bool _es_objects_keep_only_current = true; - - void init(const boost::program_options::variables_map& options); - }; + void remove_from_database( const object_id_type& id, const plugin_options::object_options& opt ); es_objects_plugin& _self; plugin_options _options; - uint32_t limit_documents = _options._es_objects_bulk_replay; + uint32_t limit_documents = _options.bulk_replay; CURL *curl; // curl handler vector bulk; @@ -112,7 +124,7 @@ class es_objects_plugin_impl bool is_es_version_7_or_above = true; template - void prepareTemplate(const T& blockchain_object, const string& index_name); + void prepareTemplate( const T& blockchain_object, const plugin_options::object_options& opt ); void init_program_options(const boost::program_options::variables_map& options); @@ -130,14 +142,14 @@ struct genesis_inserter } template - void insert( bool b, const string& prefix ) + void insert( const es_objects_plugin_impl::plugin_options::object_options& opt ) { - if( !b ) + if( !opt.enabled ) return; db.get_index( ObjType::space_id, ObjType::type_id ).inspect_all_objects( - [this, &prefix](const graphene::db::object &o) { - my->prepareTemplate( static_cast(o), prefix); + [this, &opt](const graphene::db::object &o) { + my->prepareTemplate( static_cast(o), opt ); }); } }; @@ -153,10 +165,10 @@ void es_objects_plugin_impl::genesis() genesis_inserter inserter( this ); - inserter.insert( _options._es_objects_accounts, "account" ); - inserter.insert( _options._es_objects_assets, "asset" ); - inserter.insert( _options._es_objects_asset_bitasset, "bitasset" ); - inserter.insert( _options._es_objects_balances, "balance" ); + inserter.insert( _options.accounts ); + inserter.insert( _options.assets ); + inserter.insert( _options.asset_bitasset ); + inserter.insert( _options.balances ); } void es_objects_plugin_impl::index_database(const vector& ids, action_type action) @@ -165,55 +177,55 @@ void es_objects_plugin_impl::index_database(const vector& ids, a block_number = db.head_block_num(); - if( block_number <= _options._es_objects_start_es_after_block ) + if( block_number <= _options.start_es_after_block ) return; block_time = db.head_block_time(); // check if we are in replay or in sync and change number of bulk documents accordingly if( (fc::time_point::now() - block_time) < fc::seconds(30) ) - limit_documents = _options._es_objects_bulk_sync; + limit_documents = _options.bulk_sync; else - limit_documents = _options._es_objects_bulk_replay; - - static const unordered_map> data_type_map = { - { account_id_type::space_type, { "account", _options._es_objects_accounts } }, - { account_balance_id_type::space_type, { "balance", _options._es_objects_balances } }, - { asset_id_type::space_type, { "asset", _options._es_objects_assets } }, - { asset_bitasset_data_id_type::space_type, { "bitasset", _options._es_objects_asset_bitasset } }, - { limit_order_id_type::space_type, { "limitorder", _options._es_objects_limit_orders } }, - { proposal_id_type::space_type, { "proposal", _options._es_objects_proposals } } + limit_documents = _options.bulk_replay; + + static const unordered_map data_type_map = { + { account_id_type::space_type, _options.accounts }, + { account_balance_id_type::space_type, _options.balances }, + { asset_id_type::space_type, _options.assets }, + { asset_bitasset_data_id_type::space_type, _options.asset_bitasset }, + { limit_order_id_type::space_type, _options.limit_orders }, + { proposal_id_type::space_type, _options.proposals } }; for( const auto& value: ids ) { const auto itr = data_type_map.find( value.space_type() ); - if( itr == data_type_map.end() || !(itr->second.second) ) + if( itr == data_type_map.end() || !(itr->second.enabled) ) continue; - const string& prefix = itr->second.first; + const auto& opt = itr->second; if( action_type::deletion == action ) - remove_from_database( value, prefix ); + remove_from_database( value, opt ); else { switch( itr->first ) { case account_id_type::space_type: - prepareTemplate( db.get(value), prefix ); + prepareTemplate( db.get(value), opt ); break; case account_balance_id_type::space_type: - prepareTemplate( db.get(value), prefix ); + prepareTemplate( db.get(value), opt ); break; case asset_id_type::space_type: - prepareTemplate( db.get(value), prefix ); + prepareTemplate( db.get(value), opt ); break; case asset_bitasset_data_id_type::space_type: - prepareTemplate( db.get(value), prefix ); + prepareTemplate( db.get(value), opt ); break; case limit_order_id_type::space_type: - prepareTemplate( db.get(value), prefix ); + prepareTemplate( db.get(value), opt ); break; case proposal_id_type::space_type: - prepareTemplate( db.get(value), prefix ); + prepareTemplate( db.get(value), opt ); break; default: break; @@ -223,33 +235,35 @@ void es_objects_plugin_impl::index_database(const vector& ids, a } -void es_objects_plugin_impl::remove_from_database( const object_id_type& id, const std::string& index) +void es_objects_plugin_impl::remove_from_database( + const object_id_type& id, const es_objects_plugin_impl::plugin_options::object_options& opt ) { - if(_options._es_objects_keep_only_current) - { - fc::mutable_variant_object delete_line; - delete_line["_id"] = string(id); - delete_line["_index"] = _options._es_objects_index_prefix + index; - if(!is_es_version_7_or_above) - delete_line["_type"] = "_doc"; - fc::mutable_variant_object final_delete_line; - final_delete_line["delete"] = delete_line; - prepare.push_back(fc::json::to_string(final_delete_line)); - std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); - prepare.clear(); - - send_bulk_if_ready(); - } + if( opt.no_delete ) + return; + + fc::mutable_variant_object delete_line; + delete_line["_id"] = string(id); // Note: this does not work if `store_updates` is true + delete_line["_index"] = _options.index_prefix + opt.index_name; + if( !is_es_version_7_or_above ) + delete_line["_type"] = "_doc"; + fc::mutable_variant_object final_delete_line; + final_delete_line["delete"] = std::move( delete_line ); + prepare.push_back( fc::json::to_string(final_delete_line) ); + std::move( prepare.begin(), prepare.end(), std::back_inserter(bulk) ); + prepare.clear(); + + send_bulk_if_ready(); } template -void es_objects_plugin_impl::prepareTemplate(const T& blockchain_object, const string& index_name) +void es_objects_plugin_impl::prepareTemplate( + const T& blockchain_object, const es_objects_plugin_impl::plugin_options::object_options& opt ) { fc::mutable_variant_object bulk_header; - bulk_header["_index"] = _options._es_objects_index_prefix + index_name; - if(!is_es_version_7_or_above) + bulk_header["_index"] = _options.index_prefix + opt.index_name; + if( !is_es_version_7_or_above ) bulk_header["_type"] = "_doc"; - if(_options._es_objects_keep_only_current) + if( !opt.store_updates ) { bulk_header["_id"] = string(blockchain_object.id); } @@ -278,8 +292,8 @@ void es_objects_plugin_impl::send_bulk_if_ready() graphene::utilities::ES es; es.curl = curl; es.bulk_lines = bulk; - es.elasticsearch_url = _options._es_objects_elasticsearch_url; - es.auth = _options._es_objects_auth; + es.elasticsearch_url = _options.elasticsearch_url; + es.auth = _options.auth; if (!graphene::utilities::SendBulk(std::move(es))) FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); else @@ -328,15 +342,44 @@ void es_objects_plugin::plugin_set_program_options( "Number of bulk documents to index on replay(10000)") ("es-objects-bulk-sync", boost::program_options::value(), "Number of bulk documents to index on a synchronized chain(100)") - ("es-objects-proposals", boost::program_options::value(), "Store proposal objects(true)") - ("es-objects-accounts", boost::program_options::value(), "Store account objects(true)") - ("es-objects-assets", boost::program_options::value(), "Store asset objects(true)") - ("es-objects-balances", boost::program_options::value(), "Store balances objects(true)") - ("es-objects-limit-orders", boost::program_options::value(), "Store limit order objects(false)") - ("es-objects-asset-bitasset", boost::program_options::value(), "Store feed data(true)") + + ("es-objects-proposals", boost::program_options::value(), "Store proposal objects (true)") + ("es-objects-proposals-store-updates", boost::program_options::value(), + "Store all updates to the proposal objects (false)") + ("es-objects-proposals-no-delete", boost::program_options::value(), + "Do not delete a proposal from ES even if it is deleted from chain state. " + "It is implicitly true and can not be set to false if es-objects-proposals-store-updates is true. " + "(true)") + + ("es-objects-accounts", boost::program_options::value(), "Store account objects (true)") + ("es-objects-accounts-store-updates", boost::program_options::value(), + "Store all updates to the account objects (false)") + + ("es-objects-assets", boost::program_options::value(), "Store asset objects (true)") + ("es-objects-assets-store-updates", boost::program_options::value(), + "Store all updates to the asset objects (false)") + + ("es-objects-balances", boost::program_options::value(), "Store account balances (true)") + ("es-objects-balances-store-updates", boost::program_options::value(), + "Store all updates to the account balances (false)") + + ("es-objects-limit-orders", boost::program_options::value(), "Store limit order objects (true)") + ("es-objects-limit-orders-store-updates", boost::program_options::value(), + "Store all updates to the limit orders (false)") + ("es-objects-limit-orders-no-delete", boost::program_options::value(), + "Do not delete a limit order object from ES even if it is deleted from chain state. " + "It is implicitly true and can not be set to false if es-objects-limit-orders-store-updates is true. " + "(false)") + + ("es-objects-asset-bitasset", boost::program_options::value(), + "Store bitasset data, including price feeds (true)") + ("es-objects-asset-bitasset-store-updates", boost::program_options::value(), + "Store all updates to the bitasset data (false)") + ("es-objects-index-prefix", boost::program_options::value(), "Add a prefix to the index(objects-)") ("es-objects-keep-only-current", boost::program_options::value(), + "Deprecated. Please use the store-updates or no-delete options. " "Keep only current state of the objects(true)") ("es-objects-start-es-after-block", boost::program_options::value(), "Start doing ES job after block(0)") @@ -352,43 +395,72 @@ void detail::es_objects_plugin_impl::init_program_options(const boost::program_o void detail::es_objects_plugin_impl::plugin_options::init(const boost::program_options::variables_map& options) { if (options.count("es-objects-elasticsearch-url") > 0) { - _es_objects_elasticsearch_url = options["es-objects-elasticsearch-url"].as(); + elasticsearch_url = options["es-objects-elasticsearch-url"].as(); } if (options.count("es-objects-auth") > 0) { - _es_objects_auth = options["es-objects-auth"].as(); + auth = options["es-objects-auth"].as(); } if (options.count("es-objects-bulk-replay") > 0) { - _es_objects_bulk_replay = options["es-objects-bulk-replay"].as(); + bulk_replay = options["es-objects-bulk-replay"].as(); } if (options.count("es-objects-bulk-sync") > 0) { - _es_objects_bulk_sync = options["es-objects-bulk-sync"].as(); + bulk_sync = options["es-objects-bulk-sync"].as(); } + if (options.count("es-objects-proposals") > 0) { - _es_objects_proposals = options["es-objects-proposals"].as(); + proposals.enabled = options["es-objects-proposals"].as(); + } + if (options.count("es-objects-proposals-store-updates") > 0) { + proposals.store_updates = options["es-objects-proposals-store-updates"].as(); + } + if (options.count("es-objects-proposals-no-delete") > 0) { + proposals.no_delete = options["es-objects-proposals-no-delete"].as(); } + + if (options.count("es-objects-accounts") > 0) { - _es_objects_accounts = options["es-objects-accounts"].as(); + accounts.enabled = options["es-objects-accounts"].as(); + } + if (options.count("es-objects-accounts-store-updates") > 0) { + accounts.store_updates = options["es-objects-accounts-store-updates"].as(); } + if (options.count("es-objects-assets") > 0) { - _es_objects_assets = options["es-objects-assets"].as(); + assets.enabled = options["es-objects-assets"].as(); + } + if (options.count("es-objects-assets-store-updates") > 0) { + assets.store_updates = options["es-objects-assets-store-updates"].as(); } + if (options.count("es-objects-balances") > 0) { - _es_objects_balances = options["es-objects-balances"].as(); + balances.enabled = options["es-objects-balances"].as(); + } + if (options.count("es-objects-balances-store-updates") > 0) { + balances.store_updates = options["es-objects-balances-store-updates"].as(); } + if (options.count("es-objects-limit-orders") > 0) { - _es_objects_limit_orders = options["es-objects-limit-orders"].as(); + limit_orders.enabled = options["es-objects-limit-orders"].as(); + } + if (options.count("es-objects-limit-orders-store-updates") > 0) { + limit_orders.store_updates = options["es-objects-limit-orders-store-updates"].as(); + } + if (options.count("es-objects-limit-orders-no-delete") > 0) { + limit_orders.no_delete = options["es-objects-limit-orders-no-delete"].as(); } + if (options.count("es-objects-asset-bitasset") > 0) { - _es_objects_asset_bitasset = options["es-objects-asset-bitasset"].as(); + asset_bitasset.enabled = options["es-objects-asset-bitasset"].as(); } - if (options.count("es-objects-index-prefix") > 0) { - _es_objects_index_prefix = options["es-objects-index-prefix"].as(); + if (options.count("es-objects-asset-bitasset-store-updates") > 0) { + asset_bitasset.store_updates = options["es-objects-asset-bitasset-store-updates"].as(); } - if (options.count("es-objects-keep-only-current") > 0) { - _es_objects_keep_only_current = options["es-objects-keep-only-current"].as(); + + if (options.count("es-objects-index-prefix") > 0) { + index_prefix = options["es-objects-index-prefix"].as(); } if (options.count("es-objects-start-es-after-block") > 0) { - _es_objects_start_es_after_block = options["es-objects-start-es-after-block"].as(); + start_es_after_block = options["es-objects-start-es-after-block"].as(); } } @@ -397,7 +469,7 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable my->init_program_options( options ); database().applied_block.connect([this](const signed_block &b) { - if( 1U == b.block_num() && 0 == my->_options._es_objects_start_es_after_block ) { + if( 1U == b.block_num() && 0 == my->_options.start_es_after_block ) { my->genesis(); } }); @@ -416,12 +488,11 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable graphene::utilities::ES es; es.curl = my->curl; - es.elasticsearch_url = my->_options._es_objects_elasticsearch_url; - es.auth = my->_options._es_objects_auth; - es.auth = my->_options._es_objects_index_prefix; + es.elasticsearch_url = my->_options.elasticsearch_url; + es.auth = my->_options.auth; if(!graphene::utilities::checkES(es)) - FC_THROW( "ES database is not up in url ${url}", ("url", my->_options._es_objects_elasticsearch_url) ); + FC_THROW( "ES database is not up in url ${url}", ("url", my->_options.elasticsearch_url) ); graphene::utilities::checkESVersion7OrAbove(es, my->is_es_version_7_or_above); } From dbd8901d9f4cb1349643c09072835b17ab320809 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 28 Dec 2021 18:52:13 +0000 Subject: [PATCH 086/338] Add utilities::get_program_option(), refactor code --- libraries/plugins/es_objects/es_objects.cpp | 89 +++++-------------- .../utilities/boost_program_options.hpp | 38 ++++++++ 2 files changed, 59 insertions(+), 68 deletions(-) create mode 100644 libraries/utilities/include/graphene/utilities/boost_program_options.hpp diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index c46b61fecd..4646e81a30 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -32,6 +32,7 @@ #include #include +#include namespace graphene { namespace db { template @@ -394,74 +395,26 @@ void detail::es_objects_plugin_impl::init_program_options(const boost::program_o void detail::es_objects_plugin_impl::plugin_options::init(const boost::program_options::variables_map& options) { - if (options.count("es-objects-elasticsearch-url") > 0) { - elasticsearch_url = options["es-objects-elasticsearch-url"].as(); - } - if (options.count("es-objects-auth") > 0) { - auth = options["es-objects-auth"].as(); - } - if (options.count("es-objects-bulk-replay") > 0) { - bulk_replay = options["es-objects-bulk-replay"].as(); - } - if (options.count("es-objects-bulk-sync") > 0) { - bulk_sync = options["es-objects-bulk-sync"].as(); - } - - if (options.count("es-objects-proposals") > 0) { - proposals.enabled = options["es-objects-proposals"].as(); - } - if (options.count("es-objects-proposals-store-updates") > 0) { - proposals.store_updates = options["es-objects-proposals-store-updates"].as(); - } - if (options.count("es-objects-proposals-no-delete") > 0) { - proposals.no_delete = options["es-objects-proposals-no-delete"].as(); - } - - - if (options.count("es-objects-accounts") > 0) { - accounts.enabled = options["es-objects-accounts"].as(); - } - if (options.count("es-objects-accounts-store-updates") > 0) { - accounts.store_updates = options["es-objects-accounts-store-updates"].as(); - } - - if (options.count("es-objects-assets") > 0) { - assets.enabled = options["es-objects-assets"].as(); - } - if (options.count("es-objects-assets-store-updates") > 0) { - assets.store_updates = options["es-objects-assets-store-updates"].as(); - } - - if (options.count("es-objects-balances") > 0) { - balances.enabled = options["es-objects-balances"].as(); - } - if (options.count("es-objects-balances-store-updates") > 0) { - balances.store_updates = options["es-objects-balances-store-updates"].as(); - } - - if (options.count("es-objects-limit-orders") > 0) { - limit_orders.enabled = options["es-objects-limit-orders"].as(); - } - if (options.count("es-objects-limit-orders-store-updates") > 0) { - limit_orders.store_updates = options["es-objects-limit-orders-store-updates"].as(); - } - if (options.count("es-objects-limit-orders-no-delete") > 0) { - limit_orders.no_delete = options["es-objects-limit-orders-no-delete"].as(); - } - - if (options.count("es-objects-asset-bitasset") > 0) { - asset_bitasset.enabled = options["es-objects-asset-bitasset"].as(); - } - if (options.count("es-objects-asset-bitasset-store-updates") > 0) { - asset_bitasset.store_updates = options["es-objects-asset-bitasset-store-updates"].as(); - } - - if (options.count("es-objects-index-prefix") > 0) { - index_prefix = options["es-objects-index-prefix"].as(); - } - if (options.count("es-objects-start-es-after-block") > 0) { - start_es_after_block = options["es-objects-start-es-after-block"].as(); - } + utilities::get_program_option( options, "es-objects-elasticsearch-url", elasticsearch_url ); + utilities::get_program_option( options, "es-objects-auth", auth ); + utilities::get_program_option( options, "es-objects-bulk-replay", bulk_replay ); + utilities::get_program_option( options, "es-objects-bulk-sync", bulk_sync ); + utilities::get_program_option( options, "es-objects-proposals", proposals.enabled ); + utilities::get_program_option( options, "es-objects-proposals-store-updates", proposals.store_updates ); + utilities::get_program_option( options, "es-objects-proposals-no-delete", proposals.no_delete ); + utilities::get_program_option( options, "es-objects-accounts", accounts.enabled ); + utilities::get_program_option( options, "es-objects-accounts-store-updates", accounts.store_updates ); + utilities::get_program_option( options, "es-objects-assets", assets.enabled ); + utilities::get_program_option( options, "es-objects-assets-store-updates", assets.store_updates ); + utilities::get_program_option( options, "es-objects-balances", balances.enabled ); + utilities::get_program_option( options, "es-objects-balances-store-updates", balances.store_updates ); + utilities::get_program_option( options, "es-objects-limit-orders", limit_orders.enabled ); + utilities::get_program_option( options, "es-objects-limit-orders-store-updates", limit_orders.store_updates ); + utilities::get_program_option( options, "es-objects-limit-orders-no-delete", limit_orders.no_delete ); + utilities::get_program_option( options, "es-objects-asset-bitasset", asset_bitasset.enabled ); + utilities::get_program_option( options, "es-objects-asset-bitasset-store-updates", asset_bitasset.store_updates ); + utilities::get_program_option( options, "es-objects-index-prefix", index_prefix ); + utilities::get_program_option( options, "es-objects-start-es-after-block", start_es_after_block ); } void es_objects_plugin::plugin_initialize(const boost::program_options::variables_map& options) diff --git a/libraries/utilities/include/graphene/utilities/boost_program_options.hpp b/libraries/utilities/include/graphene/utilities/boost_program_options.hpp new file mode 100644 index 0000000000..31e3dd9eaa --- /dev/null +++ b/libraries/utilities/include/graphene/utilities/boost_program_options.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once +#include + +namespace graphene { namespace utilities { + +template +void get_program_option( const boost::program_options::variables_map& from, const std::string& key, T& to ) +{ + if( from.count( key ) > 0 ) + { + to = from[key].as(); + } +} + +} } // end namespace graphene::utilities From fa90b824bdc0d7330aed04289230ed6b6232fb3f Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 28 Dec 2021 19:55:53 +0000 Subject: [PATCH 087/338] Add es-objects-sync-db-on-startup option --- libraries/plugins/es_objects/es_objects.cpp | 38 +++++++++++---------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 4646e81a30..5b3e82facc 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -57,7 +57,7 @@ class es_objects_plugin_impl private: friend class graphene::es_objects::es_objects_plugin; - friend struct genesis_inserter; + friend struct data_loader; struct plugin_options { @@ -86,6 +86,7 @@ class es_objects_plugin_impl std::string index_prefix = "objects-"; uint32_t start_es_after_block = 0; + bool sync_db_on_startup = false; void init(const boost::program_options::variables_map& options); }; @@ -107,7 +108,7 @@ class es_objects_plugin_impl { index_database( ids, action_type::deletion ); } void index_database(const vector& ids, action_type action); - void genesis(); + void sync_db(); void remove_from_database( const object_id_type& id, const plugin_options::object_options& opt ); es_objects_plugin& _self; @@ -132,18 +133,18 @@ class es_objects_plugin_impl void send_bulk_if_ready(); }; -struct genesis_inserter +struct data_loader { es_objects_plugin_impl* my; graphene::chain::database &db; - explicit genesis_inserter( es_objects_plugin_impl* _my ) + explicit data_loader( es_objects_plugin_impl* _my ) : my(_my), db( my->_self.database() ) { // Nothing to do } template - void insert( const es_objects_plugin_impl::plugin_options::object_options& opt ) + void load( const es_objects_plugin_impl::plugin_options::object_options& opt ) { if( !opt.enabled ) return; @@ -155,21 +156,23 @@ struct genesis_inserter } }; -void es_objects_plugin_impl::genesis() +void es_objects_plugin_impl::sync_db() { - ilog("elasticsearch OBJECTS: inserting data from genesis"); + ilog("elasticsearch OBJECTS: loading data from the object database (chain state)"); graphene::chain::database &db = _self.database(); block_number = db.head_block_num(); block_time = db.head_block_time(); - genesis_inserter inserter( this ); + data_loader loader( this ); - inserter.insert( _options.accounts ); - inserter.insert( _options.assets ); - inserter.insert( _options.asset_bitasset ); - inserter.insert( _options.balances ); + loader.load( _options.accounts ); + loader.load( _options.assets ); + loader.load( _options.asset_bitasset ); + loader.load( _options.balances ); + loader.load( _options.proposals ); + loader.load( _options.limit_orders ); } void es_objects_plugin_impl::index_database(const vector& ids, action_type action) @@ -384,6 +387,8 @@ void es_objects_plugin::plugin_set_program_options( "Keep only current state of the objects(true)") ("es-objects-start-es-after-block", boost::program_options::value(), "Start doing ES job after block(0)") + ("es-objects-sync-db-on-startup", boost::program_options::value(), + "Copy all applicable objects from the object database (chain state) to ES on program startup (false)") ; cfg.add(cli); } @@ -415,17 +420,13 @@ void detail::es_objects_plugin_impl::plugin_options::init(const boost::program_o utilities::get_program_option( options, "es-objects-asset-bitasset-store-updates", asset_bitasset.store_updates ); utilities::get_program_option( options, "es-objects-index-prefix", index_prefix ); utilities::get_program_option( options, "es-objects-start-es-after-block", start_es_after_block ); + utilities::get_program_option( options, "es-objects-sync-db-on-startup", sync_db_on_startup ); } void es_objects_plugin::plugin_initialize(const boost::program_options::variables_map& options) { my->init_program_options( options ); - database().applied_block.connect([this](const signed_block &b) { - if( 1U == b.block_num() && 0 == my->_options.start_es_after_block ) { - my->genesis(); - } - }); database().new_objects.connect([this]( const vector& ids, const flat_set& ) { my->on_objects_create( ids ); @@ -452,7 +453,8 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable void es_objects_plugin::plugin_startup() { - // Nothing to do + if( my->_options.sync_db_on_startup || 0 == database().head_block_num() ) + my->sync_db(); } } } From bacd276e46822e00e3db6af229f4fa0c505d97b1 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 28 Dec 2021 20:17:44 +0000 Subject: [PATCH 088/338] Send bulk on es_objects plugin shutdown --- libraries/plugins/es_objects/es_objects.cpp | 34 +++++++++++-------- .../graphene/es_objects/es_objects.hpp | 1 + 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 5b3e82facc..c7227ca5ac 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -130,7 +130,7 @@ class es_objects_plugin_impl void init_program_options(const boost::program_options::variables_map& options); - void send_bulk_if_ready(); + void send_bulk_if_ready( bool force = false ); }; struct data_loader @@ -289,20 +289,21 @@ void es_objects_plugin_impl::prepareTemplate( send_bulk_if_ready(); } -void es_objects_plugin_impl::send_bulk_if_ready() +void es_objects_plugin_impl::send_bulk_if_ready( bool force ) { - if( curl && bulk.size() >= limit_documents ) // send data to elasticsearch when bulk is too large - { - graphene::utilities::ES es; - es.curl = curl; - es.bulk_lines = bulk; - es.elasticsearch_url = _options.elasticsearch_url; - es.auth = _options.auth; - if (!graphene::utilities::SendBulk(std::move(es))) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); - else - bulk.clear(); - } + if( !curl || bulk.empty() ) + return; + if( !force && bulk.size() < limit_documents ) + return; + // send data to elasticsearch when being forced or bulk is too large + graphene::utilities::ES es; + es.curl = curl; + es.bulk_lines = bulk; + es.elasticsearch_url = _options.elasticsearch_url; + es.auth = _options.auth; + if( !graphene::utilities::SendBulk(std::move(es)) ) + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); + bulk.clear(); } es_objects_plugin_impl::~es_objects_plugin_impl() @@ -457,4 +458,9 @@ void es_objects_plugin::plugin_startup() my->sync_db(); } +void es_objects_plugin::plugin_shutdown() +{ + my->send_bulk_if_ready(true); // flush +} + } } diff --git a/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp index cfae63d77b..9fbfdd6289 100644 --- a/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp +++ b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp @@ -48,6 +48,7 @@ class es_objects_plugin : public graphene::app::plugin boost::program_options::options_description& cfg) override; void plugin_initialize(const boost::program_options::variables_map& options) override; void plugin_startup() override; + void plugin_shutdown() override; private: std::unique_ptr my; From 8cf414442f68eeaa91965e4a5452660ecd9dc866 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Jan 2022 16:49:36 +0000 Subject: [PATCH 089/338] Add a missing quotation mark --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index ba02c4d074..570e902674 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -661,7 +661,7 @@ operation_history_object elasticsearch_plugin::get_operation_by_id(operation_his "query": { "match": { - "account_history.operation_id": )" + operation_id_string + R"(" + "account_history.operation_id": ")" + operation_id_string + R"(" } } } From 827340abc5298dcf682685c1d8e58a3060d2bfa6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Jan 2022 17:05:04 +0000 Subject: [PATCH 090/338] Store budget records in ES --- libraries/plugins/es_objects/es_objects.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index c7227ca5ac..8814179225 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -83,6 +84,7 @@ class es_objects_plugin_impl object_options balances { true, false, true, "balance" }; object_options limit_orders { true, false, false, "limitorder" }; object_options asset_bitasset { true, false, true, "bitasset" }; + object_options budget { true, false, true, "budget" }; std::string index_prefix = "objects-"; uint32_t start_es_after_block = 0; @@ -173,6 +175,7 @@ void es_objects_plugin_impl::sync_db() loader.load( _options.balances ); loader.load( _options.proposals ); loader.load( _options.limit_orders ); + loader.load( _options.budget ); } void es_objects_plugin_impl::index_database(const vector& ids, action_type action) @@ -198,7 +201,8 @@ void es_objects_plugin_impl::index_database(const vector& ids, a { asset_id_type::space_type, _options.assets }, { asset_bitasset_data_id_type::space_type, _options.asset_bitasset }, { limit_order_id_type::space_type, _options.limit_orders }, - { proposal_id_type::space_type, _options.proposals } + { proposal_id_type::space_type, _options.proposals }, + { budget_record_id_type::space_type, _options.budget } }; for( const auto& value: ids ) @@ -231,6 +235,9 @@ void es_objects_plugin_impl::index_database(const vector& ids, a case proposal_id_type::space_type: prepareTemplate( db.get(value), opt ); break; + case budget_record_id_type::space_type: + prepareTemplate( db.get(value), opt ); + break; default: break; } @@ -381,6 +388,8 @@ void es_objects_plugin::plugin_set_program_options( ("es-objects-asset-bitasset-store-updates", boost::program_options::value(), "Store all updates to the bitasset data (false)") + ("es-objects-budget-records", boost::program_options::value(), "Store budget records (true)") + ("es-objects-index-prefix", boost::program_options::value(), "Add a prefix to the index(objects-)") ("es-objects-keep-only-current", boost::program_options::value(), @@ -419,6 +428,7 @@ void detail::es_objects_plugin_impl::plugin_options::init(const boost::program_o utilities::get_program_option( options, "es-objects-limit-orders-no-delete", limit_orders.no_delete ); utilities::get_program_option( options, "es-objects-asset-bitasset", asset_bitasset.enabled ); utilities::get_program_option( options, "es-objects-asset-bitasset-store-updates", asset_bitasset.store_updates ); + utilities::get_program_option( options, "es-objects-budget-records", budget.enabled ); utilities::get_program_option( options, "es-objects-index-prefix", index_prefix ); utilities::get_program_option( options, "es-objects-start-es-after-block", start_es_after_block ); utilities::get_program_option( options, "es-objects-sync-db-on-startup", sync_db_on_startup ); From 7a30eded9173403174f040081f73870dd0ef672e Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Jan 2022 18:22:49 +0000 Subject: [PATCH 091/338] Add tests for storing budget records in ES --- tests/elasticsearch/main.cpp | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 772ffe118b..6e82f27530 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -224,7 +224,10 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { es.elasticsearch_url = GRAPHENE_TESTING_ES_URL; es.index_prefix = es_obj_index_prefix; - // delete all first + // The head block number is 1 + BOOST_CHECK_EQUAL( db.head_block_num(), 1u ); + + // delete all first, this will delete genesis data and data inserted at block 1 auto delete_objects = graphene::utilities::deleteAll(es); BOOST_REQUIRE(delete_objects); // require successful deletion @@ -259,11 +262,30 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { auto bitasset_data_id = j["hits"]["hits"][size_t(0)]["_source"]["bitasset_data_id"].as_string(); es.endpoint = es.index_prefix + "bitasset/_doc/_search"; - es.query = "{ \"query\" : { \"bool\": { \"must\" : [{ \"term\": { \"object_id\": \""+bitasset_data_id+"\"}}] } } }"; + es.query = "{ \"query\" : { \"bool\": { \"must\" : [{ \"term\": { \"object_id\": \"" + + bitasset_data_id + "\"}}] } } }"; res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); auto bitasset_object_id = j["hits"]["hits"][size_t(0)]["_source"]["object_id"].as_string(); BOOST_CHECK_EQUAL(bitasset_object_id, bitasset_data_id); + + // maintenance, for budget records + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + + es.endpoint = es.index_prefix + "budget/_doc/_count"; + es.query = ""; + fc::wait_for( ES_WAIT_TIME, [&]() { + res = graphene::utilities::getEndPoint(es); + j = fc::json::from_string(res); + if( !j.is_object() ) + return false; + const auto& obj = j.get_object(); + if( obj.find("count") == obj.end() ) + return false; + total = obj["count"].as_string(); + return (total == "1"); // new record inserted at the first maintenance block + }); + } } catch (fc::exception &e) { From 474b7b8128cfc5af8c9485c1cf546bc646b77cf5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 3 Jan 2022 14:37:40 +0000 Subject: [PATCH 092/338] Refactor ES utilities code (add new code) --- libraries/utilities/elasticsearch.cpp | 219 ++++++++++++++---- .../graphene/utilities/elasticsearch.hpp | 96 +++++++- 2 files changed, 269 insertions(+), 46 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index d7ed539360..38cbddaa09 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -28,7 +28,7 @@ #include #include -size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp) +static size_t curl_write_function(void *contents, size_t size, size_t nmemb, void *userp) { ((std::string*)userp)->append((char*)contents, size * nmemb); return size * nmemb; @@ -90,62 +90,67 @@ std::string simpleQuery(ES& es) return doCurl(curl_request); } -bool SendBulk(ES&& es) +static bool handle_bulk_response( long http_code, const std::string& curl_read_buffer ) { - std::string bulking = joinBulkLines(es.bulk_lines); - - graphene::utilities::CurlRequest curl_request; - curl_request.handler = es.curl; - curl_request.url = es.elasticsearch_url + "_bulk"; - curl_request.auth = es.auth; - curl_request.type = "POST"; - curl_request.query = std::move(bulking); - - auto curlResponse = doCurl(curl_request); - - if(handleBulkResponse(getResponseCode(curl_request.handler), curlResponse)) + if( 200 == http_code ) + { + // all good, but check errors in response + fc::variant j = fc::json::from_string(curl_read_buffer); + bool errors = j["errors"].as_bool(); + if( errors ) + { + elog( "ES returned 200 but with errors: ${e}", ("e", curl_read_buffer) ); + return false; + } return true; + } + + if( 413 == http_code ) + { + elog( "413 error: Request too large. Can be low disk space. ${e}", ("e", curl_read_buffer) ); + } + else if( 401 == http_code ) + { + elog( "401 error: Unauthorized. ${e}", ("e", curl_read_buffer) ); + } + else + { + elog( "${code} error: ${e}", ("code", std::to_string(http_code)) ("e", curl_read_buffer) ); + } return false; } -std::string joinBulkLines(const std::vector& bulk) +static std::string joinBulkLines(const std::vector& bulk) { auto bulking = boost::algorithm::join(bulk, "\n"); bulking = bulking + "\n"; return bulking; } -long getResponseCode(CURL *handler) + +static long getResponseCode(CURL *handler) { long http_code = 0; curl_easy_getinfo (handler, CURLINFO_RESPONSE_CODE, &http_code); return http_code; } -bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer) +bool SendBulk(ES&& es) { - if(http_code == 200) { - // all good, but check errors in response - fc::variant j = fc::json::from_string(CurlReadBuffer); - bool errors = j["errors"].as_bool(); - if( errors ) { - elog( "ES returned 200 but with errors: ${e}", ("e", CurlReadBuffer) ); - return false; - } - } - else { - if(http_code == 413) { - elog( "413 error: Can be low disk space. ${e}", ("e", CurlReadBuffer) ); - } - else if(http_code == 401) { - elog( "401 error: Unauthorized. ${e}", ("e", CurlReadBuffer) ); - } - else { - elog( "${code} error: ${e}", ("code", std::to_string(http_code)) ("e", CurlReadBuffer) ); - } - return false; - } - return true; + std::string bulking = joinBulkLines(es.bulk_lines); + + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + "_bulk"; + curl_request.auth = es.auth; + curl_request.type = "POST"; + curl_request.query = std::move(bulking); + + auto curlResponse = doCurl(curl_request); + + if(handle_bulk_response(getResponseCode(curl_request.handler), curlResponse)) + return true; + return false; } std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data) @@ -207,7 +212,7 @@ std::string doCurl(CurlRequest& curl) curl_easy_setopt(curl.handler, CURLOPT_POST, false); curl_easy_setopt(curl.handler, CURLOPT_HTTPGET, true); } - curl_easy_setopt(curl.handler, CURLOPT_WRITEFUNCTION, WriteCallback); + curl_easy_setopt(curl.handler, CURLOPT_WRITEFUNCTION, curl_write_function); curl_easy_setopt(curl.handler, CURLOPT_WRITEDATA, (void *)&CurlReadBuffer); curl_easy_setopt(curl.handler, CURLOPT_USERAGENT, "libcrp/0.1"); if(!curl.auth.empty()) @@ -217,6 +222,140 @@ std::string doCurl(CurlRequest& curl) return CurlReadBuffer; } +curl_wrapper::curl_wrapper() +{ + curl.reset( curl_easy_init() ); + if( !curl ) + FC_THROW( "Unable to init cURL" ); + + curl_easy_setopt( curl.get(), CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2 ); + + request_headers.reset( curl_slist_append( NULL, "Content-Type: application/json" ) ); + if( !request_headers ) + FC_THROW( "Unable to init cURL request headers" ); + + curl_easy_setopt( curl.get(), CURLOPT_HTTPHEADER, request_headers.get() ); + curl_easy_setopt( curl.get(), CURLOPT_USERAGENT, "bitshares-core/6.1" ); +} + +curl_wrapper::response curl_wrapper::request( curl_wrapper::http_request_method method, + const std::string& url, + const std::string& auth, + const std::string& query ) const +{ + curl_wrapper::response resp; + + // Note: the variable curl has a long lifetime, it only gets initialized once, then be used many times, + // thus we need to clear old data + + // Note: host and auth are always the same in the program, ideally we don't need to set them every time + curl_easy_setopt( curl.get(), CURLOPT_URL, url.c_str() ); + if( !auth.empty() ) + curl_easy_setopt( curl.get(), CURLOPT_USERPWD, auth.c_str() ); + + // Empty for GET, POST or HEAD, non-empty for DELETE or PUT + static const std::vector http_request_method_custom_str = { + "", // GET + "", // POST + "", // HEAD + "PUT", + "DELETE", + "PATCH", + "OPTIONS" + }; + const auto& custom_request = http_request_method_custom_str[static_cast(method)]; + const auto* p_custom_request = custom_request.empty() ? NULL : custom_request.c_str(); + curl_easy_setopt( curl.get(), CURLOPT_CUSTOMREQUEST, p_custom_request ); + + if( curl_wrapper::http_request_method::POST == method + || curl_wrapper::http_request_method::PUT == method ) + { + curl_easy_setopt( curl.get(), CURLOPT_HTTPGET, false ); + curl_easy_setopt( curl.get(), CURLOPT_POST, true ); + curl_easy_setopt( curl.get(), CURLOPT_POSTFIELDS, query.c_str() ); + } + else // GET or DELETE (only these are used in this file) + { + curl_easy_setopt( curl.get(), CURLOPT_POSTFIELDS, NULL ); + curl_easy_setopt( curl.get(), CURLOPT_POST, false ); + curl_easy_setopt( curl.get(), CURLOPT_HTTPGET, true ); + } + + curl_easy_setopt( curl.get(), CURLOPT_WRITEFUNCTION, curl_write_function ); + curl_easy_setopt( curl.get(), CURLOPT_WRITEDATA, (void *)(&resp.content) ); + curl_easy_perform( curl.get() ); + + curl_easy_getinfo( curl.get(), CURLINFO_RESPONSE_CODE, &resp.code ); + + return resp; +} + +bool es_client::check_status() const +{ + const auto response = curl.get( base_url + "_nodes", auth ); + + // Note: response.code is ignored here + return !response.content.empty(); +} + +std::string es_client::get_version() const +{ try { + const auto response = curl.get( base_url, auth ); + if( response.code != 200 ) + FC_THROW( "Error on es_client::get_version(): code = ${code}, message = ${message} ", + ("code", response.code) ("message", response.content) ); + + fc::variant content = fc::json::from_string( response.content ); + return content["version"]["number"].as_string(); +} FC_CAPTURE_AND_RETHROW() } + +void es_client::check_version_7_or_above( bool& result ) const noexcept +{ + static const int64_t version_7 = 7; + try { + const auto es_version = get_version(); + auto dot_pos = es_version.find('.'); + result = ( std::stoi(es_version.substr(0,dot_pos)) >= version_7 ); + } + catch( ... ) + { + wlog( "Unable to get ES version, assuming it is 7 or above" ); + result = true; + } +} + +bool es_client::send_bulk( const std::vector& bulk_lines ) const +{ + auto bulk_str = boost::algorithm::join( bulk_lines, "\n" ) + "\n"; + const auto response = curl.post( base_url + "_bulk", auth, bulk_str ); + + return handle_bulk_response( response.code, response.content ); +} + +bool es_client::del( const std::string& path ) const +{ + const auto response = curl.del( base_url + path, auth ); + + // Note: response.code is ignored here + return !response.content.empty(); +} + +std::string es_client::get( const std::string& path ) const +{ + const auto response = curl.get( base_url + path, auth ); + + // Note: response.code is ignored here + return response.content; +} + +std::string es_client::query( const std::string& path, const std::string& query ) const +{ + const auto response = curl.post( base_url + path, auth, query ); + + // Note: response.code is ignored here + return response.content; +} + fc::variant es_data_adaptor::adapt(const fc::variant_object& op) { fc::mutable_variant_object o(op); diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index e64f59719d..7eac6e6ee4 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -30,10 +30,95 @@ #include #include -size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp); - namespace graphene { namespace utilities { +class curl_wrapper +{ +public: + curl_wrapper(); + + // Note: the numbers are used in the request() function. If we need to update or add, please check the function + enum class http_request_method + { + GET = 0, + POST = 1, + HEAD = 2, + PUT = 3, + DELETE = 4, + PATCH = 5, + OPTIONS = 6 + }; + + struct response + { + long code; + std::string content; + }; + + response request( http_request_method method, + const std::string& url, + const std::string& auth, + const std::string& query ) const; + + response get( const std::string& url, const std::string& auth ) const + { return request( http_request_method::GET, url, auth, "" ); } + + response del( const std::string& url, const std::string& auth ) const + { return request( http_request_method::DELETE, url, auth, "" ); } + + response post( const std::string& url, const std::string& auth, const std::string& query ) const + { return request( http_request_method::POST, url, auth, query ); } + + response put( const std::string& url, const std::string& auth, const std::string& query ) const + { return request( http_request_method::PUT, url, auth, query ); } + +private: + + struct curl_deleter + { + void operator()( CURL* curl ) const + { + if( !curl ) + curl_easy_cleanup( curl ); + } + }; + + struct curl_slist_deleter + { + void operator()( curl_slist* slist ) const + { + if( !slist ) + curl_slist_free_all( slist ); + } + }; + + std::unique_ptr curl; + std::unique_ptr request_headers; +}; + +class es_client +{ +public: + es_client( const std::string& p_base_url, const std::string& p_auth ) : base_url(p_base_url), auth(p_auth) {} + + bool check_status() const; + std::string get_version() const; + void check_version_7_or_above( bool& result ) const noexcept; + + bool send_bulk( const std::vector& bulk_lines ) const; + bool del( const std::string& path ) const; + std::string get( const std::string& path ) const; + std::string query( const std::string& path, const std::string& query ) const; +private: + std::string base_url; + std::string auth; + curl_wrapper curl; + //std::string index_prefix; // bitshares-, objects- + //std::string endpoint; // index_prefix + "*/_doc/_search"; + //std::string query; // json + //std::vector bulk_lines; +}; + class ES { public: CURL *curl; @@ -54,17 +139,16 @@ namespace graphene { namespace utilities { }; bool SendBulk(ES&& es); - std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); bool checkES(ES& es); std::string getESVersion(ES& es); void checkESVersion7OrAbove(ES& es, bool& result) noexcept; std::string simpleQuery(ES& es); bool deleteAll(ES& es); - bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer); std::string getEndPoint(ES& es); + std::string doCurl(CurlRequest& curl); - std::string joinBulkLines(const std::vector& bulk); - long getResponseCode(CURL *handler); + + std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); struct es_data_adaptor { From 71b792afb551c0a7a2c4003aa5e709060cc59089 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 6 Jan 2022 17:01:41 +0000 Subject: [PATCH 093/338] Refactor program options code for ES op his plugin --- .../elasticsearch/elasticsearch_plugin.cpp | 155 +++++++++--------- .../elasticsearch/elasticsearch_plugin.hpp | 2 +- 2 files changed, 80 insertions(+), 77 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 570e902674..d095d3fbd6 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -30,6 +30,8 @@ #include +#include + namespace graphene { namespace elasticsearch { namespace detail @@ -46,6 +48,29 @@ class elasticsearch_plugin_impl } virtual ~elasticsearch_plugin_impl(); + private: + friend class graphene::elasticsearch::elasticsearch_plugin; + + struct plugin_options + { + std::string elasticsearch_url = "http://localhost:9200/"; + std::string auth = ""; + uint32_t bulk_replay = 10000; + uint32_t bulk_sync = 100; + + std::string index_prefix = "bitshares-"; + + uint32_t start_es_after_block = 0; + + bool visitor = false; + bool operation_object = true; + bool operation_string = false; + + mode elasticsearch_mode = mode::only_save; + + void init(const boost::program_options::variables_map& options); + }; + bool update_account_histories( const signed_block& b ); graphene::chain::database& database() @@ -53,28 +78,18 @@ class elasticsearch_plugin_impl return _self.database(); } - friend class graphene::elasticsearch::elasticsearch_plugin; - - private: elasticsearch_plugin& _self; + plugin_options _options; + primary_index< operation_history_index >* _oho_index; - std::string _elasticsearch_node_url = "http://localhost:9200/"; - uint32_t _elasticsearch_bulk_replay = 10000; - uint32_t _elasticsearch_bulk_sync = 100; - bool _elasticsearch_visitor = false; - std::string _elasticsearch_basic_auth = ""; - std::string _elasticsearch_index_prefix = "bitshares-"; - bool _elasticsearch_operation_object = true; - uint32_t _elasticsearch_start_es_after_block = 0; - bool _elasticsearch_operation_string = false; - mode _elasticsearch_mode = mode::only_save; + uint32_t limit_documents = _options.bulk_replay; + CURL *curl; // curl handler vector bulk_lines; // vector of op lines vector prepare; graphene::utilities::ES es; - uint32_t limit_documents; int16_t op_type; operation_history_struct os; block_struct bs; @@ -113,19 +128,19 @@ elasticsearch_plugin_impl::~elasticsearch_plugin_impl() } static std::string generateIndexName( const fc::time_point_sec& block_date, - const std::string& _elasticsearch_index_prefix ) + const std::string& index_prefix ) { auto block_date_string = block_date.to_iso_string(); std::vector parts; boost::split(parts, block_date_string, boost::is_any_of("-")); - std::string index_name = _elasticsearch_index_prefix + parts[0] + "-" + parts[1]; + std::string index_name = index_prefix + parts[0] + "-" + parts[1]; return index_name; } bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b ) { checkState(b.timestamp); - index_name = generateIndexName(b.timestamp, _elasticsearch_index_prefix); + index_name = generateIndexName(b.timestamp, _options.index_prefix); graphene::chain::database& db = database(); const vector >& hist = db.get_applied_operations(); @@ -168,7 +183,7 @@ bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b getOperationType(oho); doOperationHistory(oho); doBlock(oho->trx_in_block, b); - if(_elasticsearch_visitor) + if(_options.visitor) doVisitor(oho); const operation_history_object& op = *o_op; @@ -247,12 +262,12 @@ void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) { if((fc::time_point::now() - block_time) < fc::seconds(30)) { - limit_documents = _elasticsearch_bulk_sync; + limit_documents = _options.bulk_sync; is_sync = true; } else { - limit_documents = _elasticsearch_bulk_replay; + limit_documents = _options.bulk_replay; is_sync = false; } } @@ -270,7 +285,7 @@ void elasticsearch_plugin_impl::doOperationHistory(const optional result); os.virtual_op = oho->virtual_op; - if(_elasticsearch_operation_object) { + if(_options.operation_object) { // op oho->op.visit(fc::from_static_variant(os.op_object, FC_PACK_MAX_DEPTH)); os.op_object = graphene::utilities::es_data_adaptor::adapt( os.op_object.get_object() ); @@ -279,7 +294,7 @@ void elasticsearch_plugin_impl::doOperationHistory(const optional result, v, FC_PACK_MAX_DEPTH ); os.operation_result_object = graphene::utilities::es_data_adaptor::adapt_static_variant( v.get_array() ); } - if(_elasticsearch_operation_string) + if(_options.operation_string) os.op = fc::json::to_string(oho->op); } FC_CAPTURE_LOG_AND_RETHROW( (oho) ) } @@ -402,7 +417,7 @@ bool elasticsearch_plugin_impl::add_elasticsearch( const account_id_type account const auto &ath = addNewEntry(stats_obj, account_id, oho); growStats(stats_obj, ath); - if(block_number > _elasticsearch_start_es_after_block) { + if(block_number > _options.start_es_after_block) { createBulkLine(ath); prepareBulk(ath.id); } @@ -470,20 +485,18 @@ void elasticsearch_plugin_impl::createBulkLine(const account_transaction_history bulk_line_struct.operation_type = op_type; bulk_line_struct.operation_id_num = ath.operation_id.instance.value; bulk_line_struct.block_data = bs; - if(_elasticsearch_visitor) + if(_options.visitor) bulk_line_struct.additional_data = vs; bulk_line = fc::json::to_string(bulk_line_struct, fc::json::legacy_generator); } void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id_type& ath_id) { - const std::string _id = fc::json::to_string(ath_id); fc::mutable_variant_object bulk_header; bulk_header["_index"] = index_name; - if(!is_es_version_7_or_above) + if( !is_es_version_7_or_above ) bulk_header["_type"] = "_doc"; - bulk_header["_id"] = fc::to_string(ath_id.space_id) + "." + fc::to_string(ath_id.type_id) + "." - + fc::to_string(ath_id.instance.value); + bulk_header["_id"] = std::string( ath_id ); prepare = graphene::utilities::createBulk(bulk_header, std::move(bulk_line)); std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk_lines)); prepare.clear(); @@ -523,9 +536,9 @@ void elasticsearch_plugin_impl::populateESstruct() { es.curl = curl; es.bulk_lines = std::move(bulk_lines); - es.elasticsearch_url = _elasticsearch_node_url; - es.auth = _elasticsearch_basic_auth; - es.index_prefix = _elasticsearch_index_prefix; + es.elasticsearch_url = _options.elasticsearch_url; + es.auth = _options.auth; + es.index_prefix = _options.index_prefix; es.endpoint = ""; es.query = ""; } @@ -582,53 +595,43 @@ void elasticsearch_plugin::plugin_set_program_options( void detail::elasticsearch_plugin_impl::init_program_options(const boost::program_options::variables_map& options) { - if (options.count("elasticsearch-node-url") > 0) { - _elasticsearch_node_url = options["elasticsearch-node-url"].as(); - } - if (options.count("elasticsearch-bulk-replay") > 0) { - _elasticsearch_bulk_replay = options["elasticsearch-bulk-replay"].as(); - } - if (options.count("elasticsearch-bulk-sync") > 0) { - _elasticsearch_bulk_sync = options["elasticsearch-bulk-sync"].as(); - } - if (options.count("elasticsearch-visitor") > 0) { - _elasticsearch_visitor = options["elasticsearch-visitor"].as(); - } - if (options.count("elasticsearch-basic-auth") > 0) { - _elasticsearch_basic_auth = options["elasticsearch-basic-auth"].as(); - } - if (options.count("elasticsearch-index-prefix") > 0) { - _elasticsearch_index_prefix = options["elasticsearch-index-prefix"].as(); - } - if (options.count("elasticsearch-operation-object") > 0) { - _elasticsearch_operation_object = options["elasticsearch-operation-object"].as(); - } - if (options.count("elasticsearch-start-es-after-block") > 0) { - _elasticsearch_start_es_after_block = options["elasticsearch-start-es-after-block"].as(); - } - if (options.count("elasticsearch-operation-string") > 0) { - _elasticsearch_operation_string = options["elasticsearch-operation-string"].as(); - } - if (options.count("elasticsearch-mode") > 0) { - const auto option_number = options["elasticsearch-mode"].as(); - if(option_number > mode::all) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Elasticsearch mode not valid"); - _elasticsearch_mode = static_cast(options["elasticsearch-mode"].as()); + _options.init( options ); +} + +void detail::elasticsearch_plugin_impl::plugin_options::init(const boost::program_options::variables_map& options) +{ + utilities::get_program_option( options, "elasticsearch-node-url", elasticsearch_url ); + utilities::get_program_option( options, "elasticsearch-basic-auth", auth ); + utilities::get_program_option( options, "elasticsearch-bulk-replay", bulk_replay ); + utilities::get_program_option( options, "elasticsearch-bulk-sync", bulk_sync ); + utilities::get_program_option( options, "elasticsearch-index-prefix", index_prefix ); + utilities::get_program_option( options, "elasticsearch-start-es-after-block", start_es_after_block ); + utilities::get_program_option( options, "elasticsearch-visitor", visitor ); + utilities::get_program_option( options, "elasticsearch-operation-object", operation_object ); + utilities::get_program_option( options, "elasticsearch-operation-string", operation_string ); + + auto es_mode = static_cast( elasticsearch_mode ); + utilities::get_program_option( options, "elasticsearch-mode", es_mode ); + if( es_mode > static_cast( mode::all ) ) + FC_THROW_EXCEPTION( graphene::chain::plugin_exception, "Elasticsearch mode not valid" ); + elasticsearch_mode = static_cast( es_mode ); + + if( mode::all == elasticsearch_mode && !operation_string ) + { + FC_THROW_EXCEPTION( graphene::chain::plugin_exception, + "If elasticsearch-mode is set to all then elasticsearch-operation-string need to be true"); } } void elasticsearch_plugin::plugin_initialize(const boost::program_options::variables_map& options) { - my->_oho_index = database().add_index< primary_index< operation_history_index > >(); - database().add_index< primary_index< account_transaction_history_index > >(); - my->init_program_options( options ); - if(my->_elasticsearch_mode != mode::only_query) { - if (my->_elasticsearch_mode == mode::all && !my->_elasticsearch_operation_string) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, - "If elasticsearch-mode is set to all then elasticsearch-operation-string need to be true"); + my->_oho_index = database().add_index< primary_index< operation_history_index > >(); + database().add_index< primary_index< account_transaction_history_index > >(); + if( my->_options.elasticsearch_mode != mode::only_query ) + { database().applied_block.connect([this](const signed_block &b) { if (!my->update_account_histories(b)) FC_THROW_EXCEPTION(graphene::chain::plugin_exception, @@ -638,11 +641,11 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia graphene::utilities::ES es; es.curl = my->curl; - es.elasticsearch_url = my->_elasticsearch_node_url; - es.auth = my->_elasticsearch_basic_auth; + es.elasticsearch_url = my->_options.elasticsearch_url; + es.auth = my->_options.auth; if(!graphene::utilities::checkES(es)) - FC_THROW( "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url) ); + FC_THROW( "ES database is not up in url ${url}", ("url", my->_options.elasticsearch_url) ); graphene::utilities::checkESVersion7OrAbove(es, my->is_es_version_7_or_above); } @@ -764,8 +767,8 @@ graphene::utilities::ES elasticsearch_plugin::prepareHistoryQuery(string query) graphene::utilities::ES es; es.curl = curl; - es.elasticsearch_url = my->_elasticsearch_node_url; - es.index_prefix = my->_elasticsearch_index_prefix; + es.elasticsearch_url = my->_options.elasticsearch_url; + es.index_prefix = my->_options.index_prefix; es.endpoint = es.index_prefix + "*/_doc/_search"; es.query = query; @@ -774,7 +777,7 @@ graphene::utilities::ES elasticsearch_plugin::prepareHistoryQuery(string query) mode elasticsearch_plugin::get_running_mode() { - return my->_elasticsearch_mode; + return my->_options.elasticsearch_mode; } } } diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index fbc012acba..3e6797e9f0 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -50,7 +50,7 @@ namespace detail class elasticsearch_plugin_impl; } -enum mode { only_save = 0 , only_query = 1, all = 2 }; +enum class mode { only_save = 0 , only_query = 1, all = 2 }; class elasticsearch_plugin : public graphene::app::plugin { From c9081889a12036258e29ba3d0d826976d8d0e031 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 10 Jan 2022 17:17:19 +0000 Subject: [PATCH 094/338] Refactor ES op his plugin code with new utilities --- .../plugins/elasticsearch/CMakeLists.txt | 12 +- .../elasticsearch/elasticsearch_plugin.cpp | 311 ++++++------------ .../elasticsearch/elasticsearch_plugin.hpp | 6 +- 3 files changed, 105 insertions(+), 224 deletions(-) diff --git a/libraries/plugins/elasticsearch/CMakeLists.txt b/libraries/plugins/elasticsearch/CMakeLists.txt index 0d00b84eeb..971de3a191 100644 --- a/libraries/plugins/elasticsearch/CMakeLists.txt +++ b/libraries/plugins/elasticsearch/CMakeLists.txt @@ -4,20 +4,12 @@ add_library( graphene_elasticsearch elasticsearch_plugin.cpp ) -find_curl() - -include_directories(${CURL_INCLUDE_DIRS}) if(MSVC) set_source_files_properties(elasticsearch_plugin.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) -if(CURL_STATICLIB) - SET_TARGET_PROPERTIES(graphene_elasticsearch PROPERTIES - COMPILE_DEFINITIONS "CURL_STATICLIB") -endif(CURL_STATICLIB) -target_link_libraries( graphene_elasticsearch graphene_chain graphene_app ${CURL_LIBRARIES} ) +target_link_libraries( graphene_elasticsearch graphene_chain graphene_app ) target_include_directories( graphene_elasticsearch - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" - PUBLIC "${CURL_INCLUDE_DIR}" ) + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) install( TARGETS diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index d095d3fbd6..746829d4eb 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include @@ -42,11 +41,7 @@ class elasticsearch_plugin_impl public: explicit elasticsearch_plugin_impl(elasticsearch_plugin& _plugin) : _self( _plugin ) - { - curl = curl_easy_init(); - curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2); - } - virtual ~elasticsearch_plugin_impl(); + { } private: friend class graphene::elasticsearch::elasticsearch_plugin; @@ -71,7 +66,7 @@ class elasticsearch_plugin_impl void init(const boost::program_options::variables_map& options); }; - bool update_account_histories( const signed_block& b ); + void update_account_histories( const signed_block& b ); graphene::chain::database& database() { @@ -85,48 +80,32 @@ class elasticsearch_plugin_impl uint32_t limit_documents = _options.bulk_replay; - CURL *curl; // curl handler + std::unique_ptr es; + vector bulk_lines; // vector of op lines - vector prepare; - graphene::utilities::ES es; int16_t op_type; operation_history_struct os; block_struct bs; visitor_struct vs; - bulk_struct bulk_line_struct; - std::string bulk_line; std::string index_name; bool is_sync = false; bool is_es_version_7_or_above = true; - bool add_elasticsearch( const account_id_type account_id, const optional& oho, - const uint32_t block_number ); - const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, - const account_id_type& account_id, - const optional & oho); - const account_statistics_object& getStatsObject(const account_id_type& account_id); - void growStats(const account_statistics_object& stats_obj, const account_transaction_history_object& ath); + void add_elasticsearch( const account_id_type& account_id, const optional& oho, + uint32_t block_number ); + void send_bulk(); + void getOperationType(const optional & oho); void doOperationHistory(const optional & oho); void doBlock(uint32_t trx_in_block, const signed_block& b); void doVisitor(const optional & oho); void checkState(const fc::time_point_sec& block_time); void cleanObjects(const account_transaction_history_id_type& ath, const account_id_type& account_id); - void createBulkLine(const account_transaction_history_object& ath); - void prepareBulk(const account_transaction_history_id_type& ath_id); - void populateESstruct(); + void init_program_options(const boost::program_options::variables_map& options); }; -elasticsearch_plugin_impl::~elasticsearch_plugin_impl() -{ - if (curl) { - curl_easy_cleanup(curl); - curl = nullptr; - } -} - static std::string generateIndexName( const fc::time_point_sec& block_date, const std::string& index_prefix ) { @@ -137,7 +116,7 @@ static std::string generateIndexName( const fc::time_point_sec& block_date, return index_name; } -bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b ) +void elasticsearch_plugin_impl::update_account_histories( const signed_block& b ) { checkState(b.timestamp); index_name = generateIndexName(b.timestamp, _options.index_prefix); @@ -221,41 +200,34 @@ bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b for( auto& account_id : impacted ) { - if(!add_elasticsearch( account_id, oho, b.block_num() )) - { - elog( "Error adding data to Elastic Search: block num ${b}, account ${a}, data ${d}", - ("b",b.block_num()) ("a",account_id) ("d", oho) ); - return false; - } + // Note: we send bulk if there are too many items in bulk_lines + add_elasticsearch( account_id, oho, b.block_num() ); } + } + // we send bulk at end of block when we are in sync for better real time client experience - if(is_sync) + if( is_sync && !bulk_lines.empty() ) + send_bulk(); + +} + +void elasticsearch_plugin_impl::send_bulk() +{ + if( !es->send_bulk( bulk_lines ) ) { - populateESstruct(); - if(es.bulk_lines.size() > 0) + elog( "Error sending ${n} lines of bulk data to ElasticSearch, the first lines are:", + ("n",bulk_lines.size()) ); + const auto log_max = std::min( bulk_lines.size(), size_t(10) ); + for( size_t i = 0; i < log_max; ++i ) { - prepare.clear(); - if(!graphene::utilities::SendBulk(std::move(es))) - { - // Note: although called with `std::move()`, `es` is not updated in `SendBulk()` - elog( "Error sending ${n} lines of bulk data to Elastic Search, the first lines are:", - ("n",es.bulk_lines.size()) ); - for( size_t i = 0; i < es.bulk_lines.size() && i < 10; ++i ) - { - edump( (es.bulk_lines[i]) ); - } - return false; - } - else - bulk_lines.clear(); + edump( (bulk_lines[i]) ); } + FC_THROW_EXCEPTION( graphene::chain::plugin_exception, + "Error populating ES database, we are going to keep trying." ); } - - if(bulk_lines.size() != limit_documents) - bulk_lines.reserve(limit_documents); - - return true; + bulk_lines.clear(); + bulk_lines.reserve(limit_documents); } void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) @@ -270,6 +242,7 @@ void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) limit_documents = _options.bulk_replay; is_sync = false; } + bulk_lines.reserve(limit_documents); } void elasticsearch_plugin_impl::getOperationType(const optional & oho) @@ -409,97 +382,51 @@ void elasticsearch_plugin_impl::doVisitor(const optional & oho, - const uint32_t block_number) -{ - const auto &stats_obj = getStatsObject(account_id); - const auto &ath = addNewEntry(stats_obj, account_id, oho); - growStats(stats_obj, ath); - - if(block_number > _options.start_es_after_block) { - createBulkLine(ath); - prepareBulk(ath.id); - } - cleanObjects(ath.id, account_id); - - if (curl && bulk_lines.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech - prepare.clear(); - populateESstruct(); - if(!graphene::utilities::SendBulk(std::move(es))) - { - // Note: although called with `std::move()`, `es` is not updated in `SendBulk()` - elog( "Error sending ${n} lines of bulk data to Elastic Search, the first lines are:", - ("n",es.bulk_lines.size()) ); - for( size_t i = 0; i < es.bulk_lines.size() && i < 10; ++i ) - { - edump( (es.bulk_lines[i]) ); - } - return false; - } - else - bulk_lines.clear(); - } - - return true; -} - -const account_statistics_object& elasticsearch_plugin_impl::getStatsObject(const account_id_type& account_id) +void elasticsearch_plugin_impl::add_elasticsearch( const account_id_type& account_id, + const optional& oho, + uint32_t block_number ) { graphene::chain::database& db = database(); - const auto &stats_obj = db.get_account_stats_by_owner(account_id); - return stats_obj; -} + const auto &stats_obj = db.get_account_stats_by_owner( account_id ); -const account_transaction_history_object& elasticsearch_plugin_impl::addNewEntry( - const account_statistics_object& stats_obj, - const account_id_type& account_id, - const optional & oho) -{ - graphene::chain::database& db = database(); - const auto &ath = db.create([&](account_transaction_history_object &obj) { + const auto &ath = db.create( + [&oho,&account_id,&stats_obj]( account_transaction_history_object &obj ) { obj.operation_id = oho->id; obj.account = account_id; obj.sequence = stats_obj.total_ops + 1; obj.next = stats_obj.most_recent_op; }); - return ath; -} - -void elasticsearch_plugin_impl::growStats(const account_statistics_object& stats_obj, - const account_transaction_history_object& ath) -{ - graphene::chain::database& db = database(); - db.modify(stats_obj, [&](account_statistics_object &obj) { + db.modify( stats_obj, [&ath]( account_statistics_object &obj ) { obj.most_recent_op = ath.id; obj.total_ops = ath.sequence; }); -} -void elasticsearch_plugin_impl::createBulkLine(const account_transaction_history_object& ath) -{ - bulk_line_struct.account_history = ath; - bulk_line_struct.operation_history = os; - bulk_line_struct.operation_type = op_type; - bulk_line_struct.operation_id_num = ath.operation_id.instance.value; - bulk_line_struct.block_data = bs; - if(_options.visitor) - bulk_line_struct.additional_data = vs; - bulk_line = fc::json::to_string(bulk_line_struct, fc::json::legacy_generator); -} - -void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id_type& ath_id) -{ - fc::mutable_variant_object bulk_header; - bulk_header["_index"] = index_name; - if( !is_es_version_7_or_above ) - bulk_header["_type"] = "_doc"; - bulk_header["_id"] = std::string( ath_id ); - prepare = graphene::utilities::createBulk(bulk_header, std::move(bulk_line)); - std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk_lines)); - prepare.clear(); + if( block_number > _options.start_es_after_block ) + { + bulk_struct bulk_line_struct; + bulk_line_struct.account_history = ath; + bulk_line_struct.operation_history = os; + bulk_line_struct.operation_type = op_type; + bulk_line_struct.operation_id_num = ath.operation_id.instance.value; + bulk_line_struct.block_data = bs; + if(_options.visitor) + bulk_line_struct.additional_data = vs; + auto bulk_line = fc::json::to_string(bulk_line_struct, fc::json::legacy_generator); + + fc::mutable_variant_object bulk_header; + bulk_header["_index"] = index_name; + if( !is_es_version_7_or_above ) + bulk_header["_type"] = "_doc"; + bulk_header["_id"] = std::string( ath.id ); + auto prepare = graphene::utilities::createBulk(bulk_header, std::move(bulk_line)); + std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk_lines)); + + if( bulk_lines.size() >= limit_documents ) + send_bulk(); + } + cleanObjects(ath.id, account_id); } void elasticsearch_plugin_impl::cleanObjects( const account_transaction_history_id_type& ath_id, @@ -532,17 +459,6 @@ void elasticsearch_plugin_impl::cleanObjects( const account_transaction_history_ } } -void elasticsearch_plugin_impl::populateESstruct() -{ - es.curl = curl; - es.bulk_lines = std::move(bulk_lines); - es.elasticsearch_url = _options.elasticsearch_url; - es.auth = _options.auth; - es.index_prefix = _options.index_prefix; - es.endpoint = ""; - es.query = ""; -} - } // end namespace detail elasticsearch_plugin::elasticsearch_plugin(graphene::app::application& app) : @@ -596,6 +512,12 @@ void elasticsearch_plugin::plugin_set_program_options( void detail::elasticsearch_plugin_impl::init_program_options(const boost::program_options::variables_map& options) { _options.init( options ); + + es = std::make_unique( _options.elasticsearch_url, _options.auth ); + + FC_ASSERT( es->check_status(), "ES database is not up in url ${url}", ("url", _options.elasticsearch_url) ); + + es->check_version_7_or_above( is_es_version_7_or_above ); } void detail::elasticsearch_plugin_impl::plugin_options::init(const boost::program_options::variables_map& options) @@ -633,21 +555,9 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia if( my->_options.elasticsearch_mode != mode::only_query ) { database().applied_block.connect([this](const signed_block &b) { - if (!my->update_account_histories(b)) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, - "Error populating ES database, we are going to keep trying."); + my->update_account_histories(b); }); } - - graphene::utilities::ES es; - es.curl = my->curl; - es.elasticsearch_url = my->_options.elasticsearch_url; - es.auth = my->_options.auth; - - if(!graphene::utilities::checkES(es)) - FC_THROW( "ES database is not up in url ${url}", ("url", my->_options.elasticsearch_url) ); - - graphene::utilities::checkESVersion7OrAbove(es, my->is_es_version_7_or_above); } void elasticsearch_plugin::plugin_startup() @@ -655,6 +565,27 @@ void elasticsearch_plugin::plugin_startup() // Nothing to do } +static operation_history_object fromEStoOperation(const variant& source) +{ + operation_history_object result; + + const auto operation_id = source["account_history"]["operation_id"]; + fc::from_variant( operation_id, result.id, GRAPHENE_MAX_NESTED_OBJECTS ); + + const auto op = fc::json::from_string(source["operation_history"]["op"].as_string()); + fc::from_variant( op, result.op, GRAPHENE_MAX_NESTED_OBJECTS ); + + const auto operation_result = fc::json::from_string(source["operation_history"]["operation_result"].as_string()); + fc::from_variant( operation_result, result.result, GRAPHENE_MAX_NESTED_OBJECTS ); + + result.block_num = source["block_data"]["block_num"].as_uint64(); + result.trx_in_block = source["operation_history"]["trx_in_block"].as_uint64(); + result.op_in_trx = source["operation_history"]["op_in_trx"].as_uint64(); + result.trx_in_block = source["operation_history"]["virtual_op"].as_uint64(); + + return result; +} + operation_history_object elasticsearch_plugin::get_operation_by_id(operation_history_id_type id) { const string operation_id_string = std::string(object_id_type(id)); @@ -670,8 +601,7 @@ operation_history_object elasticsearch_plugin::get_operation_by_id(operation_his } )"; - auto es = prepareHistoryQuery(query); - const auto response = graphene::utilities::simpleQuery(es); + const auto response = my->es->query( my->_options.index_prefix + "*/_doc/_search", query ); variant variant_response = fc::json::from_string(response); const auto source = variant_response["hits"]["hits"][size_t(0)]["_source"]; return fromEStoOperation(source); @@ -712,69 +642,32 @@ vector elasticsearch_plugin::get_account_history( } )"; - auto es = prepareHistoryQuery(query); - vector result; - if(!graphene::utilities::checkES(es)) + if( !my->es->check_status() ) return result; - const auto response = graphene::utilities::simpleQuery(es); + const auto response = my->es->query( my->_options.index_prefix + "*/_doc/_search", query ); + variant variant_response = fc::json::from_string(response); const auto hits = variant_response["hits"]["total"]; - uint32_t size; + size_t size; if( hits.is_object() ) // ES-7 ? - size = static_cast(hits["value"].as_uint64()); + size = static_cast(hits["value"].as_uint64()); else // probably ES-6 - size = static_cast(hits.as_uint64()); - size = std::min( size, limit ); + size = static_cast(hits.as_uint64()); + size = std::min( size, size_t(limit) ); - for(unsigned i=0; i_options.elasticsearch_url; - es.index_prefix = my->_options.index_prefix; - es.endpoint = es.index_prefix + "*/_doc/_search"; - es.query = query; - - return es; -} - mode elasticsearch_plugin::get_running_mode() { return my->_options.elasticsearch_mode; diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index 3e6797e9f0..63e50619e6 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -71,12 +71,8 @@ class elasticsearch_plugin : public graphene::app::plugin operation_history_id_type stop, unsigned limit, operation_history_id_type start); mode get_running_mode(); - friend class detail::elasticsearch_plugin_impl; - std::unique_ptr my; - private: - operation_history_object fromEStoOperation(variant source); - graphene::utilities::ES prepareHistoryQuery(string query); + std::unique_ptr my; }; From 7dd739d6817e0c4be75094245cfb7533b2a42095 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 10 Jan 2022 22:42:29 +0000 Subject: [PATCH 095/338] Refactor curl_wrapper constructor --- libraries/utilities/elasticsearch.cpp | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 38cbddaa09..80690eee7b 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -222,18 +222,24 @@ std::string doCurl(CurlRequest& curl) return CurlReadBuffer; } -curl_wrapper::curl_wrapper() +static CURL* init_curl() { - curl.reset( curl_easy_init() ); - if( !curl ) - FC_THROW( "Unable to init cURL" ); - - curl_easy_setopt( curl.get(), CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2 ); + CURL* curl = curl_easy_init(); + FC_ASSERT( curl, "Unable to init cURL" ); + return curl; +} - request_headers.reset( curl_slist_append( NULL, "Content-Type: application/json" ) ); - if( !request_headers ) - FC_THROW( "Unable to init cURL request headers" ); +static curl_slist* init_request_headers() +{ + curl_slist* request_headers = curl_slist_append( NULL, "Content-Type: application/json" ); + FC_ASSERT( request_headers, "Unable to init cURL request headers" ); + return request_headers; +} +curl_wrapper::curl_wrapper() +: curl( init_curl() ), request_headers( init_request_headers() ) +{ + curl_easy_setopt( curl.get(), CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2 ); curl_easy_setopt( curl.get(), CURLOPT_HTTPHEADER, request_headers.get() ); curl_easy_setopt( curl.get(), CURLOPT_USERAGENT, "bitshares-core/6.1" ); } From 690452d4249ef9243fd31b9c38b73e6054717375 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 10 Jan 2022 22:48:39 +0000 Subject: [PATCH 096/338] Fix macOS build by getting around the 'long' type --- libraries/utilities/elasticsearch.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 80690eee7b..a8d489127f 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -309,7 +309,7 @@ std::string es_client::get_version() const const auto response = curl.get( base_url, auth ); if( response.code != 200 ) FC_THROW( "Error on es_client::get_version(): code = ${code}, message = ${message} ", - ("code", response.code) ("message", response.content) ); + ("code", int64_t(response.code)) ("message", response.content) ); fc::variant content = fc::json::from_string( response.content ); return content["version"]["number"].as_string(); From 9b82b1e019af6c57036860d5ca54ea2825e3567e Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 10 Jan 2022 22:52:52 +0000 Subject: [PATCH 097/338] Fix MinGW build by avoiding the DELETE keyword --- libraries/utilities/elasticsearch.cpp | 4 ++-- .../graphene/utilities/elasticsearch.hpp | 22 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index a8d489127f..330e5b2d71 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -273,8 +273,8 @@ curl_wrapper::response curl_wrapper::request( curl_wrapper::http_request_method const auto* p_custom_request = custom_request.empty() ? NULL : custom_request.c_str(); curl_easy_setopt( curl.get(), CURLOPT_CUSTOMREQUEST, p_custom_request ); - if( curl_wrapper::http_request_method::POST == method - || curl_wrapper::http_request_method::PUT == method ) + if( curl_wrapper::http_request_method::_POST == method + || curl_wrapper::http_request_method::_PUT == method ) { curl_easy_setopt( curl.get(), CURLOPT_HTTPGET, false ); curl_easy_setopt( curl.get(), CURLOPT_POST, true ); diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 7eac6e6ee4..7cf8e584ff 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -40,13 +40,13 @@ class curl_wrapper // Note: the numbers are used in the request() function. If we need to update or add, please check the function enum class http_request_method { - GET = 0, - POST = 1, - HEAD = 2, - PUT = 3, - DELETE = 4, - PATCH = 5, - OPTIONS = 6 + _GET = 0, + _POST = 1, + _HEAD = 2, + _PUT = 3, + _DELETE = 4, + _PATCH = 5, + _OPTIONS = 6 }; struct response @@ -61,16 +61,16 @@ class curl_wrapper const std::string& query ) const; response get( const std::string& url, const std::string& auth ) const - { return request( http_request_method::GET, url, auth, "" ); } + { return request( http_request_method::_GET, url, auth, "" ); } response del( const std::string& url, const std::string& auth ) const - { return request( http_request_method::DELETE, url, auth, "" ); } + { return request( http_request_method::_DELETE, url, auth, "" ); } response post( const std::string& url, const std::string& auth, const std::string& query ) const - { return request( http_request_method::POST, url, auth, query ); } + { return request( http_request_method::_POST, url, auth, query ); } response put( const std::string& url, const std::string& auth, const std::string& query ) const - { return request( http_request_method::PUT, url, auth, query ); } + { return request( http_request_method::_PUT, url, auth, query ); } private: From b8f06efb41d4c12c44366b5f4b82612149eac7ac Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 11 Jan 2022 11:44:57 +0000 Subject: [PATCH 098/338] Set TLS version to v1.2 right after curl_easy_init --- libraries/utilities/elasticsearch.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 330e5b2d71..16568df42e 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -225,8 +225,12 @@ std::string doCurl(CurlRequest& curl) static CURL* init_curl() { CURL* curl = curl_easy_init(); - FC_ASSERT( curl, "Unable to init cURL" ); - return curl; + if( curl ) + { + curl_easy_setopt( curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2 ); + return curl; + } + FC_THROW( "Unable to init cURL" ); } static curl_slist* init_request_headers() @@ -239,7 +243,6 @@ static curl_slist* init_request_headers() curl_wrapper::curl_wrapper() : curl( init_curl() ), request_headers( init_request_headers() ) { - curl_easy_setopt( curl.get(), CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2 ); curl_easy_setopt( curl.get(), CURLOPT_HTTPHEADER, request_headers.get() ); curl_easy_setopt( curl.get(), CURLOPT_USERAGENT, "bitshares-core/6.1" ); } From 03f1fd5442682bb8298bb121a3291d6e05f686d2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 15 Jan 2022 17:13:41 +0000 Subject: [PATCH 099/338] Refactor ES utilities code to fix code smells --- libraries/utilities/elasticsearch.cpp | 35 +++++------ .../graphene/utilities/elasticsearch.hpp | 61 +++++++++++-------- 2 files changed, 52 insertions(+), 44 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 16568df42e..e5329df0fe 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -90,9 +90,9 @@ std::string simpleQuery(ES& es) return doCurl(curl_request); } -static bool handle_bulk_response( long http_code, const std::string& curl_read_buffer ) +static bool handle_bulk_response( uint16_t http_code, const std::string& curl_read_buffer ) { - if( 200 == http_code ) + if( curl_wrapper::http_response_code::HTTP_200 == http_code ) { // all good, but check errors in response fc::variant j = fc::json::from_string(curl_read_buffer); @@ -105,11 +105,11 @@ static bool handle_bulk_response( long http_code, const std::string& curl_read_b return true; } - if( 413 == http_code ) + if( curl_wrapper::http_response_code::HTTP_413 == http_code ) { elog( "413 error: Request too large. Can be low disk space. ${e}", ("e", curl_read_buffer) ); } - else if( 401 == http_code ) + else if( curl_wrapper::http_response_code::HTTP_401 == http_code ) { elog( "401 error: Unauthorized. ${e}", ("e", curl_read_buffer) ); } @@ -222,7 +222,7 @@ std::string doCurl(CurlRequest& curl) return CurlReadBuffer; } -static CURL* init_curl() +CURL* curl_wrapper::init_curl() { CURL* curl = curl_easy_init(); if( curl ) @@ -233,7 +233,7 @@ static CURL* init_curl() FC_THROW( "Unable to init cURL" ); } -static curl_slist* init_request_headers() +curl_slist* curl_wrapper::init_request_headers() { curl_slist* request_headers = curl_slist_append( NULL, "Content-Type: application/json" ); FC_ASSERT( request_headers, "Unable to init cURL request headers" ); @@ -241,18 +241,17 @@ static curl_slist* init_request_headers() } curl_wrapper::curl_wrapper() -: curl( init_curl() ), request_headers( init_request_headers() ) { curl_easy_setopt( curl.get(), CURLOPT_HTTPHEADER, request_headers.get() ); curl_easy_setopt( curl.get(), CURLOPT_USERAGENT, "bitshares-core/6.1" ); } -curl_wrapper::response curl_wrapper::request( curl_wrapper::http_request_method method, - const std::string& url, - const std::string& auth, - const std::string& query ) const +curl_wrapper::http_response curl_wrapper::request( curl_wrapper::http_request_method method, + const std::string& url, + const std::string& auth, + const std::string& query ) const { - curl_wrapper::response resp; + curl_wrapper::http_response resp; // Note: the variable curl has a long lifetime, it only gets initialized once, then be used many times, // thus we need to clear old data @@ -276,8 +275,8 @@ curl_wrapper::response curl_wrapper::request( curl_wrapper::http_request_method const auto* p_custom_request = custom_request.empty() ? NULL : custom_request.c_str(); curl_easy_setopt( curl.get(), CURLOPT_CUSTOMREQUEST, p_custom_request ); - if( curl_wrapper::http_request_method::_POST == method - || curl_wrapper::http_request_method::_PUT == method ) + if( curl_wrapper::http_request_method::HTTP_POST == method + || curl_wrapper::http_request_method::HTTP_PUT == method ) { curl_easy_setopt( curl.get(), CURLOPT_HTTPGET, false ); curl_easy_setopt( curl.get(), CURLOPT_POST, true ); @@ -294,7 +293,9 @@ curl_wrapper::response curl_wrapper::request( curl_wrapper::http_request_method curl_easy_setopt( curl.get(), CURLOPT_WRITEDATA, (void *)(&resp.content) ); curl_easy_perform( curl.get() ); - curl_easy_getinfo( curl.get(), CURLINFO_RESPONSE_CODE, &resp.code ); + long code; + curl_easy_getinfo( curl.get(), CURLINFO_RESPONSE_CODE, &code ); + resp.code = static_cast( code ); return resp; } @@ -310,9 +311,9 @@ bool es_client::check_status() const std::string es_client::get_version() const { try { const auto response = curl.get( base_url, auth ); - if( response.code != 200 ) + if( !response.is_200() ) FC_THROW( "Error on es_client::get_version(): code = ${code}, message = ${message} ", - ("code", int64_t(response.code)) ("message", response.content) ); + ("code", response.code) ("message", response.content) ); fc::variant content = fc::json::from_string( response.content ); return content["version"]["number"].as_string(); diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 7cf8e584ff..f4b7df523f 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -40,40 +40,51 @@ class curl_wrapper // Note: the numbers are used in the request() function. If we need to update or add, please check the function enum class http_request_method { - _GET = 0, - _POST = 1, - _HEAD = 2, - _PUT = 3, - _DELETE = 4, - _PATCH = 5, - _OPTIONS = 6 + HTTP_GET = 0, + HTTP_POST = 1, + HTTP_HEAD = 2, + HTTP_PUT = 3, + HTTP_DELETE = 4, + HTTP_PATCH = 5, + HTTP_OPTIONS = 6 }; - struct response + struct http_response_code { - long code; + static constexpr uint16_t HTTP_200 = 200; + static constexpr uint16_t HTTP_401 = 401; + static constexpr uint16_t HTTP_413 = 413; + }; + + struct http_response + { + uint16_t code; std::string content; + bool is_200() const { return ( http_response_code::HTTP_200 == code ); } }; - response request( http_request_method method, - const std::string& url, - const std::string& auth, - const std::string& query ) const; + http_response request( http_request_method method, + const std::string& url, + const std::string& auth, + const std::string& query ) const; - response get( const std::string& url, const std::string& auth ) const - { return request( http_request_method::_GET, url, auth, "" ); } + http_response get( const std::string& url, const std::string& auth ) const + { return request( http_request_method::HTTP_GET, url, auth, "" ); } - response del( const std::string& url, const std::string& auth ) const - { return request( http_request_method::_DELETE, url, auth, "" ); } + http_response del( const std::string& url, const std::string& auth ) const + { return request( http_request_method::HTTP_DELETE, url, auth, "" ); } - response post( const std::string& url, const std::string& auth, const std::string& query ) const - { return request( http_request_method::_POST, url, auth, query ); } + http_response post( const std::string& url, const std::string& auth, const std::string& query ) const + { return request( http_request_method::HTTP_POST, url, auth, query ); } - response put( const std::string& url, const std::string& auth, const std::string& query ) const - { return request( http_request_method::_PUT, url, auth, query ); } + http_response put( const std::string& url, const std::string& auth, const std::string& query ) const + { return request( http_request_method::HTTP_PUT, url, auth, query ); } private: + static CURL* init_curl(); + static curl_slist* init_request_headers(); + struct curl_deleter { void operator()( CURL* curl ) const @@ -92,8 +103,8 @@ class curl_wrapper } }; - std::unique_ptr curl; - std::unique_ptr request_headers; + std::unique_ptr curl { init_curl() }; + std::unique_ptr request_headers { init_request_headers() }; }; class es_client @@ -113,10 +124,6 @@ class es_client std::string base_url; std::string auth; curl_wrapper curl; - //std::string index_prefix; // bitshares-, objects- - //std::string endpoint; // index_prefix + "*/_doc/_search"; - //std::string query; // json - //std::vector bulk_lines; }; class ES { From 9b9d588134bf975a1f452d5b39c7c59068ed72ff Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 15 Jan 2022 17:31:48 +0000 Subject: [PATCH 100/338] Fix code smells in ES op his plugin --- .../elasticsearch/elasticsearch_plugin.cpp | 29 ++++++++++--------- .../elasticsearch/elasticsearch_plugin.hpp | 11 ++++--- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 746829d4eb..b3f3910e6c 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -194,11 +194,11 @@ void elasticsearch_plugin_impl::update_account_histories( const signed_block& b } } - for( auto& a : other ) - for( auto& item : a.account_auths ) + for( const auto& a : other ) + for( const auto& item : a.account_auths ) impacted.insert( item.first ); - for( auto& account_id : impacted ) + for( const auto& account_id : impacted ) { // Note: we send bulk if there are too many items in bulk_lines add_elasticsearch( account_id, oho, b.block_num() ); @@ -586,7 +586,7 @@ static operation_history_object fromEStoOperation(const variant& source) return result; } -operation_history_object elasticsearch_plugin::get_operation_by_id(operation_history_id_type id) +operation_history_object elasticsearch_plugin::get_operation_by_id( const operation_history_id_type& id ) const { const string operation_id_string = std::string(object_id_type(id)); @@ -608,12 +608,12 @@ operation_history_object elasticsearch_plugin::get_operation_by_id(operation_his } vector elasticsearch_plugin::get_account_history( - const account_id_type account_id, - operation_history_id_type stop = operation_history_id_type(), - unsigned limit = 100, - operation_history_id_type start = operation_history_id_type()) + const account_id_type& account_id, + const operation_history_id_type& stop, + uint64_t limit, + const operation_history_id_type& start ) const { - const string account_id_string = std::string(object_id_type(account_id)); + const string account_id_string = std::string( account_id ); const auto stop_number = stop.instance.value; const auto start_number = start.instance.value; @@ -623,6 +623,7 @@ vector elasticsearch_plugin::get_account_history( range = " AND operation_id_num: ["+fc::to_string(stop_number)+" TO "+fc::to_string(start_number)+"]"; else if(stop_number > 0) range = " AND operation_id_num: {"+fc::to_string(stop_number)+" TO "+fc::to_string(start_number)+"]"; + // FIXME the code above is either redundant or buggy const string query = R"( { @@ -654,21 +655,21 @@ vector elasticsearch_plugin::get_account_history( const auto hits = variant_response["hits"]["total"]; size_t size; if( hits.is_object() ) // ES-7 ? - size = static_cast(hits["value"].as_uint64()); + size = hits["value"].as_uint64(); else // probably ES-6 - size = static_cast(hits.as_uint64()); + size = hits.as_uint64(); size = std::min( size, size_t(limit) ); const auto& data = variant_response["hits"]["hits"]; - for(size_t i=0; i_options.elasticsearch_mode; } diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index 63e50619e6..d82235755d 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -66,10 +66,13 @@ class elasticsearch_plugin : public graphene::app::plugin void plugin_initialize(const boost::program_options::variables_map& options) override; void plugin_startup() override; - operation_history_object get_operation_by_id(operation_history_id_type id); - vector get_account_history(const account_id_type account_id, - operation_history_id_type stop, unsigned limit, operation_history_id_type start); - mode get_running_mode(); + operation_history_object get_operation_by_id(const operation_history_id_type& id) const; + vector get_account_history( + const account_id_type& account_id, + const operation_history_id_type& stop = operation_history_id_type(), + uint64_t limit = 100, + const operation_history_id_type& start = operation_history_id_type() ) const; + mode get_running_mode() const; private: std::unique_ptr my; From 5473e075e89d8d62cd01f3be2611f8d8821e3504 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 15 Jan 2022 22:58:39 +0000 Subject: [PATCH 101/338] Move function implementation to cpp file --- libraries/utilities/elasticsearch.cpp | 39 +++++++++++++++++++ .../graphene/utilities/elasticsearch.hpp | 29 ++++---------- 2 files changed, 46 insertions(+), 22 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index e5329df0fe..748fb78f20 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -222,6 +222,11 @@ std::string doCurl(CurlRequest& curl) return CurlReadBuffer; } +bool curl_wrapper::http_response::is_200() const +{ + return ( http_response_code::HTTP_200 == code ); +} + CURL* curl_wrapper::init_curl() { CURL* curl = curl_easy_init(); @@ -246,6 +251,18 @@ curl_wrapper::curl_wrapper() curl_easy_setopt( curl.get(), CURLOPT_USERAGENT, "bitshares-core/6.1" ); } +void curl_wrapper::curl_deleter::operator()( CURL* curl ) const +{ + if( !curl ) + curl_easy_cleanup( curl ); +} + +void curl_wrapper::curl_slist_deleter::operator()( curl_slist* slist ) const +{ + if( !slist ) + curl_slist_free_all( slist ); +} + curl_wrapper::http_response curl_wrapper::request( curl_wrapper::http_request_method method, const std::string& url, const std::string& auth, @@ -300,6 +317,28 @@ curl_wrapper::http_response curl_wrapper::request( curl_wrapper::http_request_me return resp; } +curl_wrapper::http_response curl_wrapper::get( const std::string& url, const std::string& auth ) const +{ + return request( http_request_method::HTTP_GET, url, auth, "" ); +} + +curl_wrapper::http_response curl_wrapper::del( const std::string& url, const std::string& auth ) const +{ + return request( http_request_method::HTTP_DELETE, url, auth, "" ); +} + +curl_wrapper::http_response curl_wrapper::post( const std::string& url, const std::string& auth, + const std::string& query ) const +{ + return request( http_request_method::HTTP_POST, url, auth, query ); +} + +curl_wrapper::http_response curl_wrapper::put( const std::string& url, const std::string& auth, + const std::string& query ) const +{ + return request( http_request_method::HTTP_PUT, url, auth, query ); +} + bool es_client::check_status() const { const auto response = curl.get( base_url + "_nodes", auth ); diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index f4b7df523f..80d3b8e486 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -60,7 +60,7 @@ class curl_wrapper { uint16_t code; std::string content; - bool is_200() const { return ( http_response_code::HTTP_200 == code ); } + bool is_200() const; ///< @return if @ref code is 200 }; http_response request( http_request_method method, @@ -68,17 +68,10 @@ class curl_wrapper const std::string& auth, const std::string& query ) const; - http_response get( const std::string& url, const std::string& auth ) const - { return request( http_request_method::HTTP_GET, url, auth, "" ); } - - http_response del( const std::string& url, const std::string& auth ) const - { return request( http_request_method::HTTP_DELETE, url, auth, "" ); } - - http_response post( const std::string& url, const std::string& auth, const std::string& query ) const - { return request( http_request_method::HTTP_POST, url, auth, query ); } - - http_response put( const std::string& url, const std::string& auth, const std::string& query ) const - { return request( http_request_method::HTTP_PUT, url, auth, query ); } + http_response get( const std::string& url, const std::string& auth ) const; + http_response del( const std::string& url, const std::string& auth ) const; + http_response post( const std::string& url, const std::string& auth, const std::string& query ) const; + http_response put( const std::string& url, const std::string& auth, const std::string& query ) const; private: @@ -87,20 +80,12 @@ class curl_wrapper struct curl_deleter { - void operator()( CURL* curl ) const - { - if( !curl ) - curl_easy_cleanup( curl ); - } + void operator()( CURL* curl ) const; }; struct curl_slist_deleter { - void operator()( curl_slist* slist ) const - { - if( !slist ) - curl_slist_free_all( slist ); - } + void operator()( curl_slist* slist ) const; }; std::unique_ptr curl { init_curl() }; From ea444a5511c7025890b5e92acaf2b427a8c2c068 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 15 Jan 2022 23:00:17 +0000 Subject: [PATCH 102/338] Fix code smells --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 +- libraries/utilities/elasticsearch.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index b3f3910e6c..27d9b19617 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -613,7 +613,7 @@ vector elasticsearch_plugin::get_account_history( uint64_t limit, const operation_history_id_type& start ) const { - const string account_id_string = std::string( account_id ); + const auto account_id_string = std::string( account_id ); const auto stop_number = stop.instance.value; const auto start_number = start.instance.value; diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 748fb78f20..6d6388e2d0 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -128,11 +128,11 @@ static std::string joinBulkLines(const std::vector& bulk) return bulking; } -static long getResponseCode(CURL *handler) +static uint16_t getResponseCode(CURL *handler) { long http_code = 0; curl_easy_getinfo (handler, CURLINFO_RESPONSE_CODE, &http_code); - return http_code; + return static_cast(http_code); } bool SendBulk(ES&& es) From 5afb240907e1ce1a534ea96c8462faa29d940844 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 15 Jan 2022 23:40:40 +0000 Subject: [PATCH 103/338] Refactor ES objects plugin code with new utilities --- libraries/plugins/es_objects/CMakeLists.txt | 9 +-- libraries/plugins/es_objects/es_objects.cpp | 71 +++++++++------------ 2 files changed, 31 insertions(+), 49 deletions(-) diff --git a/libraries/plugins/es_objects/CMakeLists.txt b/libraries/plugins/es_objects/CMakeLists.txt index 2cae2ffde4..926e157f44 100644 --- a/libraries/plugins/es_objects/CMakeLists.txt +++ b/libraries/plugins/es_objects/CMakeLists.txt @@ -4,18 +4,11 @@ add_library( graphene_es_objects es_objects.cpp ) -find_curl() - -include_directories(${CURL_INCLUDE_DIRS}) -if(CURL_STATICLIB) - SET_TARGET_PROPERTIES(graphene_es_objects PROPERTIES - COMPILE_DEFINITIONS "CURL_STATICLIB") -endif(CURL_STATICLIB) if(MSVC) set_source_files_properties(es_objects.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) -target_link_libraries( graphene_es_objects graphene_chain graphene_app ${CURL_LIBRARIES} ) +target_link_libraries( graphene_es_objects graphene_chain graphene_app ) target_include_directories( graphene_es_objects PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 8814179225..5aa6d8fa43 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -24,7 +24,6 @@ #include -#include #include #include #include @@ -50,11 +49,7 @@ class es_objects_plugin_impl public: explicit es_objects_plugin_impl(es_objects_plugin& _plugin) : _self( _plugin ) - { - curl = curl_easy_init(); - curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2); - } - virtual ~es_objects_plugin_impl(); + { } private: friend class graphene::es_objects::es_objects_plugin; @@ -118,10 +113,9 @@ class es_objects_plugin_impl uint32_t limit_documents = _options.bulk_replay; - CURL *curl; // curl handler - vector bulk; - vector prepare; + std::unique_ptr es; + vector bulk_lines; uint32_t block_number; fc::time_point_sec block_time; @@ -195,6 +189,8 @@ void es_objects_plugin_impl::index_database(const vector& ids, a else limit_documents = _options.bulk_replay; + bulk_lines.reserve(limit_documents); + static const unordered_map data_type_map = { { account_id_type::space_type, _options.accounts }, { account_balance_id_type::space_type, _options.balances }, @@ -259,9 +255,8 @@ void es_objects_plugin_impl::remove_from_database( delete_line["_type"] = "_doc"; fc::mutable_variant_object final_delete_line; final_delete_line["delete"] = std::move( delete_line ); - prepare.push_back( fc::json::to_string(final_delete_line) ); - std::move( prepare.begin(), prepare.end(), std::back_inserter(bulk) ); - prepare.clear(); + + bulk_lines.push_back( fc::json::to_string(final_delete_line) ); send_bulk_if_ready(); } @@ -289,36 +284,33 @@ void es_objects_plugin_impl::prepareTemplate( string data = fc::json::to_string(o, fc::json::legacy_generator); - prepare = graphene::utilities::createBulk(bulk_header, std::move(data)); - std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); - prepare.clear(); + auto prepare = graphene::utilities::createBulk(bulk_header, std::move(data)); + std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk_lines)); send_bulk_if_ready(); } void es_objects_plugin_impl::send_bulk_if_ready( bool force ) { - if( !curl || bulk.empty() ) + if( bulk_lines.empty() ) return; - if( !force && bulk.size() < limit_documents ) + if( !force && bulk_lines.size() < limit_documents ) return; // send data to elasticsearch when being forced or bulk is too large - graphene::utilities::ES es; - es.curl = curl; - es.bulk_lines = bulk; - es.elasticsearch_url = _options.elasticsearch_url; - es.auth = _options.auth; - if( !graphene::utilities::SendBulk(std::move(es)) ) - FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error sending bulk data."); - bulk.clear(); -} - -es_objects_plugin_impl::~es_objects_plugin_impl() -{ - if (curl) { - curl_easy_cleanup(curl); - curl = nullptr; + if( !es->send_bulk( bulk_lines ) ) + { + elog( "Error sending ${n} lines of bulk data to ElasticSearch, the first lines are:", + ("n",bulk_lines.size()) ); + const auto log_max = std::min( bulk_lines.size(), size_t(10) ); + for( size_t i = 0; i < log_max; ++i ) + { + edump( (bulk_lines[i]) ); + } + FC_THROW_EXCEPTION( graphene::chain::plugin_exception, + "Error populating ES database, we are going to keep trying." ); } + bulk_lines.clear(); + bulk_lines.reserve(limit_documents); } } // end namespace detail @@ -406,6 +398,12 @@ void es_objects_plugin::plugin_set_program_options( void detail::es_objects_plugin_impl::init_program_options(const boost::program_options::variables_map& options) { _options.init( options ); + + es = std::make_unique( _options.elasticsearch_url, _options.auth ); + + FC_ASSERT( es->check_status(), "ES database is not up in url ${url}", ("url", _options.elasticsearch_url) ); + + es->check_version_7_or_above( is_es_version_7_or_above ); } void detail::es_objects_plugin_impl::plugin_options::init(const boost::program_options::variables_map& options) @@ -451,15 +449,6 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable my->on_objects_delete( ids ); }); - graphene::utilities::ES es; - es.curl = my->curl; - es.elasticsearch_url = my->_options.elasticsearch_url; - es.auth = my->_options.auth; - - if(!graphene::utilities::checkES(es)) - FC_THROW( "ES database is not up in url ${url}", ("url", my->_options.elasticsearch_url) ); - - graphene::utilities::checkESVersion7OrAbove(es, my->is_es_version_7_or_above); } void es_objects_plugin::plugin_startup() From 981a429d0e5d778db55c95f92ba7c86ecb794cff Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 16 Jan 2022 14:03:32 +0000 Subject: [PATCH 104/338] Rename a parameter to avoid shadowing a member --- libraries/utilities/elasticsearch.cpp | 6 +++--- .../utilities/include/graphene/utilities/elasticsearch.hpp | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 6d6388e2d0..d3f8da32bc 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -251,10 +251,10 @@ curl_wrapper::curl_wrapper() curl_easy_setopt( curl.get(), CURLOPT_USERAGENT, "bitshares-core/6.1" ); } -void curl_wrapper::curl_deleter::operator()( CURL* curl ) const +void curl_wrapper::curl_deleter::operator()( CURL* p_curl ) const { - if( !curl ) - curl_easy_cleanup( curl ); + if( !p_curl ) + curl_easy_cleanup( p_curl ); } void curl_wrapper::curl_slist_deleter::operator()( curl_slist* slist ) const diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 80d3b8e486..4a8962c0de 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -80,7 +80,7 @@ class curl_wrapper struct curl_deleter { - void operator()( CURL* curl ) const; + void operator()( CURL* p_curl ) const; }; struct curl_slist_deleter From ab3264d5530c793a3cd8f96c5a26e129ba0a91a9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 16 Jan 2022 15:29:40 +0000 Subject: [PATCH 105/338] Move old ES utilities code to tests Because some code is still used in tests. --- libraries/utilities/elasticsearch.cpp | 147 +---------------- .../graphene/utilities/elasticsearch.hpp | 33 +--- tests/common/database_fixture.cpp | 1 + tests/common/elasticsearch.cpp | 153 ++++++++++++++++++ tests/common/elasticsearch.hpp | 62 +++++++ tests/elasticsearch/main.cpp | 3 +- 6 files changed, 220 insertions(+), 179 deletions(-) create mode 100644 tests/common/elasticsearch.cpp create mode 100644 tests/common/elasticsearch.hpp diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index d3f8da32bc..a18959cdec 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -24,7 +24,7 @@ #include #include -#include + #include #include @@ -36,60 +36,6 @@ static size_t curl_write_function(void *contents, size_t size, size_t nmemb, voi namespace graphene { namespace utilities { -bool checkES(ES& es) -{ - graphene::utilities::CurlRequest curl_request; - curl_request.handler = es.curl; - curl_request.url = es.elasticsearch_url + "_nodes"; - curl_request.auth = es.auth; - curl_request.type = "GET"; - - if(doCurl(curl_request).empty()) - return false; - return true; - -} - -std::string getESVersion(ES& es) -{ - graphene::utilities::CurlRequest curl_request; - curl_request.handler = es.curl; - curl_request.url = es.elasticsearch_url; - curl_request.auth = es.auth; - curl_request.type = "GET"; - - fc::variant response = fc::json::from_string(doCurl(curl_request)); - - return response["version"]["number"].as_string(); -} - -void checkESVersion7OrAbove(ES& es, bool& result) noexcept -{ - static const int64_t version_7 = 7; - try { - const auto es_version = graphene::utilities::getESVersion(es); - auto dot_pos = es_version.find('.'); - result = ( std::stoi(es_version.substr(0,dot_pos)) >= version_7 ); - } - catch( ... ) - { - wlog( "Unable to get ES version, assuming it is 7 or above" ); - result = true; - } -} - -std::string simpleQuery(ES& es) -{ - graphene::utilities::CurlRequest curl_request; - curl_request.handler = es.curl; - curl_request.url = es.elasticsearch_url + es.endpoint; - curl_request.auth = es.auth; - curl_request.type = "POST"; - curl_request.query = es.query; - - return doCurl(curl_request); -} - static bool handle_bulk_response( uint16_t http_code, const std::string& curl_read_buffer ) { if( curl_wrapper::http_response_code::HTTP_200 == http_code ) @@ -120,39 +66,6 @@ static bool handle_bulk_response( uint16_t http_code, const std::string& curl_re return false; } -static std::string joinBulkLines(const std::vector& bulk) -{ - auto bulking = boost::algorithm::join(bulk, "\n"); - bulking = bulking + "\n"; - - return bulking; -} - -static uint16_t getResponseCode(CURL *handler) -{ - long http_code = 0; - curl_easy_getinfo (handler, CURLINFO_RESPONSE_CODE, &http_code); - return static_cast(http_code); -} - -bool SendBulk(ES&& es) -{ - std::string bulking = joinBulkLines(es.bulk_lines); - - graphene::utilities::CurlRequest curl_request; - curl_request.handler = es.curl; - curl_request.url = es.elasticsearch_url + "_bulk"; - curl_request.auth = es.auth; - curl_request.type = "POST"; - curl_request.query = std::move(bulking); - - auto curlResponse = doCurl(curl_request); - - if(handle_bulk_response(getResponseCode(curl_request.handler), curlResponse)) - return true; - return false; -} - std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data) { std::vector bulk; @@ -164,64 +77,6 @@ std::vector createBulk(const fc::mutable_variant_object& bulk_heade return bulk; } -bool deleteAll(ES& es) -{ - graphene::utilities::CurlRequest curl_request; - curl_request.handler = es.curl; - curl_request.url = es.elasticsearch_url + es.index_prefix + "*"; - curl_request.auth = es.auth; - curl_request.type = "DELETE"; - - auto curl_response = doCurl(curl_request); - if(curl_response.empty()) - return false; - else - return true; -} -std::string getEndPoint(ES& es) -{ - graphene::utilities::CurlRequest curl_request; - curl_request.handler = es.curl; - curl_request.url = es.elasticsearch_url + es.endpoint; - curl_request.auth = es.auth; - curl_request.type = "GET"; - - return doCurl(curl_request); -} - -std::string doCurl(CurlRequest& curl) -{ - std::string CurlReadBuffer; - struct curl_slist *headers = NULL; - headers = curl_slist_append(headers, "Content-Type: application/json"); - - // Note: the variable curl.handler has a long lifetime, it only gets initialized once, then be used many times, - // thus we need to clear old data - curl_easy_setopt(curl.handler, CURLOPT_HTTPHEADER, headers); - curl_easy_setopt(curl.handler, CURLOPT_URL, curl.url.c_str()); - curl_easy_setopt(curl.handler, CURLOPT_CUSTOMREQUEST, curl.type.c_str()); // this is OK - if(curl.type == "POST") - { - curl_easy_setopt(curl.handler, CURLOPT_HTTPGET, false); - curl_easy_setopt(curl.handler, CURLOPT_POST, true); - curl_easy_setopt(curl.handler, CURLOPT_POSTFIELDS, curl.query.c_str()); - } - else // GET or DELETE (only these are used in this file) - { - curl_easy_setopt(curl.handler, CURLOPT_POSTFIELDS, NULL); - curl_easy_setopt(curl.handler, CURLOPT_POST, false); - curl_easy_setopt(curl.handler, CURLOPT_HTTPGET, true); - } - curl_easy_setopt(curl.handler, CURLOPT_WRITEFUNCTION, curl_write_function); - curl_easy_setopt(curl.handler, CURLOPT_WRITEDATA, (void *)&CurlReadBuffer); - curl_easy_setopt(curl.handler, CURLOPT_USERAGENT, "libcrp/0.1"); - if(!curl.auth.empty()) - curl_easy_setopt(curl.handler, CURLOPT_USERPWD, curl.auth.c_str()); - curl_easy_perform(curl.handler); - - return CurlReadBuffer; -} - bool curl_wrapper::http_response::is_200() const { return ( http_response_code::HTTP_200 == code ); diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 4a8962c0de..3b72d9b2aa 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -27,7 +27,7 @@ #include #include -#include + #include namespace graphene { namespace utilities { @@ -111,36 +111,7 @@ class es_client curl_wrapper curl; }; - class ES { - public: - CURL *curl; - std::vector bulk_lines; - std::string elasticsearch_url; - std::string index_prefix; - std::string auth; - std::string endpoint; - std::string query; - }; - class CurlRequest { - public: - CURL *handler; - std::string url; - std::string type; - std::string auth; - std::string query; - }; - - bool SendBulk(ES&& es); - bool checkES(ES& es); - std::string getESVersion(ES& es); - void checkESVersion7OrAbove(ES& es, bool& result) noexcept; - std::string simpleQuery(ES& es); - bool deleteAll(ES& es); - std::string getEndPoint(ES& es); - - std::string doCurl(CurlRequest& curl); - - std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); +std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); struct es_data_adaptor { diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index a46d273146..e689a78312 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -48,6 +48,7 @@ #include #include "database_fixture.hpp" +#include "elasticsearch.hpp" using namespace graphene::chain::test; diff --git a/tests/common/elasticsearch.cpp b/tests/common/elasticsearch.cpp new file mode 100644 index 0000000000..1393f35872 --- /dev/null +++ b/tests/common/elasticsearch.cpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "elasticsearch.hpp" + +#include +#include +#include +#include + +static size_t curl_write_function(void *contents, size_t size, size_t nmemb, void *userp) +{ + ((std::string*)userp)->append((char*)contents, size * nmemb); + return size * nmemb; +} + +namespace graphene { namespace utilities { + +bool checkES(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + "_nodes"; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + if(doCurl(curl_request).empty()) + return false; + return true; + +} + +std::string getESVersion(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + fc::variant response = fc::json::from_string(doCurl(curl_request)); + + return response["version"]["number"].as_string(); +} + +void checkESVersion7OrAbove(ES& es, bool& result) noexcept +{ + static const int64_t version_7 = 7; + try { + const auto es_version = graphene::utilities::getESVersion(es); + auto dot_pos = es_version.find('.'); + result = ( std::stoi(es_version.substr(0,dot_pos)) >= version_7 ); + } + catch( ... ) + { + wlog( "Unable to get ES version, assuming it is 7 or above" ); + result = true; + } +} + +std::string simpleQuery(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.endpoint; + curl_request.auth = es.auth; + curl_request.type = "POST"; + curl_request.query = es.query; + + return doCurl(curl_request); +} + +bool deleteAll(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.index_prefix + "*"; + curl_request.auth = es.auth; + curl_request.type = "DELETE"; + + auto curl_response = doCurl(curl_request); + if(curl_response.empty()) + return false; + else + return true; +} + +std::string getEndPoint(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.endpoint; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + return doCurl(curl_request); +} + +std::string doCurl(CurlRequest& curl) +{ + std::string CurlReadBuffer; + struct curl_slist *headers = NULL; + headers = curl_slist_append(headers, "Content-Type: application/json"); + + // Note: the variable curl.handler has a long lifetime, it only gets initialized once, then be used many times, + // thus we need to clear old data + curl_easy_setopt(curl.handler, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl.handler, CURLOPT_URL, curl.url.c_str()); + curl_easy_setopt(curl.handler, CURLOPT_CUSTOMREQUEST, curl.type.c_str()); // this is OK + if(curl.type == "POST") + { + curl_easy_setopt(curl.handler, CURLOPT_HTTPGET, false); + curl_easy_setopt(curl.handler, CURLOPT_POST, true); + curl_easy_setopt(curl.handler, CURLOPT_POSTFIELDS, curl.query.c_str()); + } + else // GET or DELETE (only these are used in this file) + { + curl_easy_setopt(curl.handler, CURLOPT_POSTFIELDS, NULL); + curl_easy_setopt(curl.handler, CURLOPT_POST, false); + curl_easy_setopt(curl.handler, CURLOPT_HTTPGET, true); + } + curl_easy_setopt(curl.handler, CURLOPT_WRITEFUNCTION, curl_write_function); + curl_easy_setopt(curl.handler, CURLOPT_WRITEDATA, (void *)&CurlReadBuffer); + curl_easy_setopt(curl.handler, CURLOPT_USERAGENT, "libcrp/0.1"); + if(!curl.auth.empty()) + curl_easy_setopt(curl.handler, CURLOPT_USERPWD, curl.auth.c_str()); + curl_easy_perform(curl.handler); + + return CurlReadBuffer; +} + +} } // graphene::utilities diff --git a/tests/common/elasticsearch.hpp b/tests/common/elasticsearch.hpp new file mode 100644 index 0000000000..2fe2648975 --- /dev/null +++ b/tests/common/elasticsearch.hpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include + +#include + +namespace graphene { namespace utilities { + + class ES { + public: + CURL *curl; + std::vector bulk_lines; + std::string elasticsearch_url; + std::string index_prefix; + std::string auth; + std::string endpoint; + std::string query; + }; + + class CurlRequest { + public: + CURL *handler; + std::string url; + std::string type; + std::string auth; + std::string query; + }; + + bool checkES(ES& es); + std::string getESVersion(ES& es); + void checkESVersion7OrAbove(ES& es, bool& result) noexcept; + std::string simpleQuery(ES& es); + bool deleteAll(ES& es); + std::string getEndPoint(ES& es); + + std::string doCurl(CurlRequest& curl); + +} } // graphene::utilities diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 6e82f27530..1e60ed4de3 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -31,9 +31,8 @@ #include #include "../common/init_unit_test_suite.hpp" - #include "../common/database_fixture.hpp" - +#include "../common/elasticsearch.hpp" #include "../common/utils.hpp" #define ES_WAIT_TIME (fc::milliseconds(10000)) From bfa7f6c5d46780346689c38e10b1b74dd9290ae4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 16 Jan 2022 23:06:39 +0000 Subject: [PATCH 106/338] Fix deleters --- libraries/utilities/elasticsearch.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index a18959cdec..f49d1eef4c 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -108,13 +108,13 @@ curl_wrapper::curl_wrapper() void curl_wrapper::curl_deleter::operator()( CURL* p_curl ) const { - if( !p_curl ) + if( p_curl ) curl_easy_cleanup( p_curl ); } void curl_wrapper::curl_slist_deleter::operator()( curl_slist* slist ) const { - if( !slist ) + if( slist ) curl_slist_free_all( slist ); } From 5fdd7ea1cd990c91ec1e0bfc0261162ed01510d9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 17 Jan 2022 21:22:33 +0000 Subject: [PATCH 107/338] Reduce data copying and processing in ES plugin --- .../elasticsearch/elasticsearch_plugin.cpp | 51 ++++++++----------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 27d9b19617..b72bc13f66 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -84,10 +84,8 @@ class elasticsearch_plugin_impl vector bulk_lines; // vector of op lines - int16_t op_type; - operation_history_struct os; - block_struct bs; - visitor_struct vs; + bulk_struct bulk_line_struct; + std::string index_name; bool is_sync = false; bool is_es_version_7_or_above = true; @@ -96,10 +94,9 @@ class elasticsearch_plugin_impl uint32_t block_number ); void send_bulk(); - void getOperationType(const optional & oho); - void doOperationHistory(const optional & oho); - void doBlock(uint32_t trx_in_block, const signed_block& b); - void doVisitor(const optional & oho); + void doOperationHistory(const optional & oho, operation_history_struct& os); + void doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs); + void doVisitor(const optional & oho, visitor_struct& vs); void checkState(const fc::time_point_sec& block_time); void cleanObjects(const account_transaction_history_id_type& ath, const account_id_type& account_id); @@ -159,11 +156,15 @@ void elasticsearch_plugin_impl::update_account_histories( const signed_block& b oho = create_oho(); // populate what we can before impacted loop - getOperationType(oho); - doOperationHistory(oho); - doBlock(oho->trx_in_block, b); - if(_options.visitor) - doVisitor(oho); + if( o_op->block_num > _options.start_es_after_block ) + { + bulk_line_struct.operation_type = oho->op.which(); + bulk_line_struct.operation_id_num = oho->id.instance(); + doOperationHistory( oho, bulk_line_struct.operation_history ); + doBlock( oho->trx_in_block, b, bulk_line_struct.block_data ); + if( _options.visitor ) + doVisitor( oho, *bulk_line_struct.additional_data ); + } const operation_history_object& op = *o_op; @@ -245,13 +246,8 @@ void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) bulk_lines.reserve(limit_documents); } -void elasticsearch_plugin_impl::getOperationType(const optional & oho) -{ - if (!oho->id.is_null()) - op_type = oho->op.which(); -} - -void elasticsearch_plugin_impl::doOperationHistory(const optional & oho) +void elasticsearch_plugin_impl::doOperationHistory( const optional & oho, + operation_history_struct& os ) { try { os.trx_in_block = oho->trx_in_block; os.op_in_trx = oho->op_in_trx; @@ -271,7 +267,7 @@ void elasticsearch_plugin_impl::doOperationHistory(const optional op); } FC_CAPTURE_LOG_AND_RETHROW( (oho) ) } -void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_block& b) +void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs) { std::string trx_id = ""; if(trx_in_block < b.transactions.size()) @@ -336,7 +332,7 @@ struct operation_visitor } }; -void elasticsearch_plugin_impl::doVisitor(const optional & oho) +void elasticsearch_plugin_impl::doVisitor(const optional & oho, visitor_struct& vs) { graphene::chain::database& db = database(); @@ -405,14 +401,8 @@ void elasticsearch_plugin_impl::add_elasticsearch( const account_id_type& accoun if( block_number > _options.start_es_after_block ) { - bulk_struct bulk_line_struct; bulk_line_struct.account_history = ath; - bulk_line_struct.operation_history = os; - bulk_line_struct.operation_type = op_type; - bulk_line_struct.operation_id_num = ath.operation_id.instance.value; - bulk_line_struct.block_data = bs; - if(_options.visitor) - bulk_line_struct.additional_data = vs; + auto bulk_line = fc::json::to_string(bulk_line_struct, fc::json::legacy_generator); fc::mutable_variant_object bulk_header; @@ -513,6 +503,9 @@ void detail::elasticsearch_plugin_impl::init_program_options(const boost::progra { _options.init( options ); + if( _options.visitor ) + bulk_line_struct.additional_data = visitor_struct(); + es = std::make_unique( _options.elasticsearch_url, _options.auth ); FC_ASSERT( es->check_status(), "ES database is not up in url ${url}", ("url", _options.elasticsearch_url) ); From c3e53ddd0662c6922de0a01f815a136b205ec4a4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 17 Jan 2022 23:50:06 +0000 Subject: [PATCH 108/338] Fix code smells --- .../plugins/elasticsearch/elasticsearch_plugin.cpp | 14 +++++++------- .../elasticsearch/elasticsearch_plugin.hpp | 12 ++++++------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index b72bc13f66..31b870a289 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -94,9 +94,9 @@ class elasticsearch_plugin_impl uint32_t block_number ); void send_bulk(); - void doOperationHistory(const optional & oho, operation_history_struct& os); - void doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs); - void doVisitor(const optional & oho, visitor_struct& vs); + void doOperationHistory(const optional & oho, operation_history_struct& os) const; + void doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs) const; + void doVisitor(const optional & oho, visitor_struct& vs) const; void checkState(const fc::time_point_sec& block_time); void cleanObjects(const account_transaction_history_id_type& ath, const account_id_type& account_id); @@ -247,7 +247,7 @@ void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) } void elasticsearch_plugin_impl::doOperationHistory( const optional & oho, - operation_history_struct& os ) + operation_history_struct& os ) const { try { os.trx_in_block = oho->trx_in_block; os.op_in_trx = oho->op_in_trx; @@ -267,7 +267,7 @@ void elasticsearch_plugin_impl::doOperationHistory( const optional op); } FC_CAPTURE_LOG_AND_RETHROW( (oho) ) } -void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs) +void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs) const { std::string trx_id = ""; if(trx_in_block < b.transactions.size()) @@ -332,9 +332,9 @@ struct operation_visitor } }; -void elasticsearch_plugin_impl::doVisitor(const optional & oho, visitor_struct& vs) +void elasticsearch_plugin_impl::doVisitor(const optional & oho, visitor_struct& vs) const { - graphene::chain::database& db = database(); + const graphene::chain::database& db = _self.database(); operation_visitor o_v; oho->op.visit(o_v); diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index d82235755d..1b6c768a1b 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -80,17 +80,17 @@ class elasticsearch_plugin : public graphene::app::plugin struct operation_history_struct { - int trx_in_block; - int op_in_trx; + uint16_t trx_in_block; + uint16_t op_in_trx; std::string operation_result; - int virtual_op; + uint32_t virtual_op; std::string op; variant op_object; variant operation_result_object; }; struct block_struct { - int block_num; + uint32_t block_num; fc::time_point_sec block_time; std::string trx_id; }; @@ -136,8 +136,8 @@ struct visitor_struct { struct bulk_struct { account_transaction_history_object account_history; operation_history_struct operation_history; - int operation_type; - int operation_id_num; + int64_t operation_type; + uint64_t operation_id_num; block_struct block_data; optional additional_data; }; From 13439d1877136462371f30d6b00a0c82069080ce Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 20 Jan 2022 20:29:25 +0000 Subject: [PATCH 109/338] Bump FC --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 02b7593a96..7db6fbaee4 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 02b7593a96b02d9966358c59d22f344d86fa9a19 +Subproject commit 7db6fbaee44e0f991a66bfd82f37f62c40a8073e From 831057c1293fd0626191b1eb000e694c0c7ae3a5 Mon Sep 17 00:00:00 2001 From: Abit Date: Wed, 9 Mar 2022 12:46:57 +0100 Subject: [PATCH 110/338] Remove a seed node --- libraries/egenesis/seed-nodes.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/egenesis/seed-nodes.txt b/libraries/egenesis/seed-nodes.txt index 88f4b5e7f9..cac4f4e412 100644 --- a/libraries/egenesis/seed-nodes.txt +++ b/libraries/egenesis/seed-nodes.txt @@ -8,5 +8,4 @@ "seed1.bitshares.im:1776", // clone (USA) "seed2.bitshares.im:1776", // clone (Japan) "seed.bitshares.org:666", // bitshares.org (France) -"seed.bitshares.rocks:1776", // bitshares.rocks (USA) "seeds.btsnodes.com:1776", // Community From 556bc94dcd2c52d59355568c8cfb96359878f25c Mon Sep 17 00:00:00 2001 From: Abit Date: Thu, 17 Mar 2022 12:49:47 +0100 Subject: [PATCH 111/338] Update seed nodes --- libraries/egenesis/seed-nodes.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/libraries/egenesis/seed-nodes.txt b/libraries/egenesis/seed-nodes.txt index cac4f4e412..8757157186 100644 --- a/libraries/egenesis/seed-nodes.txt +++ b/libraries/egenesis/seed-nodes.txt @@ -1,11 +1,9 @@ -// https://bitsharestalk.org/index.php/topic,23715.0.html "seed01.liondani.com:1776", // liondani (Germany) -"bts-seed1.abit-more.com:62015", // abit (China) +"bts-seed1.abit-more.com:62015", // abit (Germany) "seed.roelandp.nl:1776", // roelandp (Canada) "seed1.xbts.io:1776", // xbts.io (Germany) "seed2.xbts.io:1776", // xbts.io (Germany) "seed4.xbts.io:1776", // xbts.io (Germany) "seed1.bitshares.im:1776", // clone (USA) "seed2.bitshares.im:1776", // clone (Japan) -"seed.bitshares.org:666", // bitshares.org (France) "seeds.btsnodes.com:1776", // Community From 2b66ec934062d89d13f5b9297ea22a2a40d8dec4 Mon Sep 17 00:00:00 2001 From: Abit Date: Mon, 4 Apr 2022 17:23:09 +0200 Subject: [PATCH 112/338] Upgrade zlib version from 1.2.11 to 1.2.12 --- .github/workflows/build-and-test.win.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.win.yml b/.github/workflows/build-and-test.win.yml index eeb942b79e..cc2fccc1a0 100644 --- a/.github/workflows/build-and-test.win.yml +++ b/.github/workflows/build-and-test.win.yml @@ -8,7 +8,7 @@ env: BOOST_DOTTED_VERSION: 1.69.0 CURL_VERSION: 7.67.0 OPENSSL_VERSION: 1.1.1d - ZLIB_VERSION: 1.2.11 + ZLIB_VERSION: 1.2.12 jobs: prepare-mingw64-libs: name: Build required 3rd-party libraries From 46529b8158c1b885b5c6c9464bcc5f0f6ba340e6 Mon Sep 17 00:00:00 2001 From: Abit Date: Fri, 6 May 2022 21:02:39 +0200 Subject: [PATCH 113/338] Fix SonarScanner version detection --- .github/workflows/sonar-scan.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sonar-scan.yml b/.github/workflows/sonar-scan.yml index f42b2260b8..ffb5bfaf01 100644 --- a/.github/workflows/sonar-scan.yml +++ b/.github/workflows/sonar-scan.yml @@ -17,8 +17,9 @@ jobs: steps: - name: Download and install latest SonarScanner CLI tool run: | - SONAR_SCANNER_VERSION=`curl https://github.com/SonarSource/sonar-scanner-cli/releases/latest \ - 2>/dev/null | cut -f2 -d'"' | cut -f8 -d'/'` + SONAR_SCANNER_VERSION=`curl -w %{redirect_url} \ + https://github.com/SonarSource/sonar-scanner-cli/releases/latest \ + 2>/dev/null | cut -f8 -d'/'` SONAR_DOWNLOAD_PATH=https://binaries.sonarsource.com/Distribution/sonar-scanner-cli curl --create-dirs -sSLo $HOME/.sonar/sonar-scanner.zip \ $SONAR_DOWNLOAD_PATH/sonar-scanner-cli-$SONAR_SCANNER_VERSION-linux.zip From 6ec4859a2d2bacb06e3ef03b5004ace5911c7d53 Mon Sep 17 00:00:00 2001 From: Abit Date: Thu, 30 Jun 2022 16:04:15 +0200 Subject: [PATCH 114/338] Update connection_rejected_message member order To accommodate reflection order. --- libraries/net/include/graphene/net/core_messages.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/net/include/graphene/net/core_messages.hpp b/libraries/net/include/graphene/net/core_messages.hpp index e270dbb249..bce3fd0522 100644 --- a/libraries/net/include/graphene/net/core_messages.hpp +++ b/libraries/net/include/graphene/net/core_messages.hpp @@ -245,8 +245,8 @@ namespace graphene { namespace net { std::string user_agent; uint32_t core_protocol_version; fc::ip::endpoint remote_endpoint; - std::string reason_string; fc::enum_type reason_code; + std::string reason_string; connection_rejected_message() {} connection_rejected_message(const std::string& user_agent, uint32_t core_protocol_version, @@ -255,8 +255,8 @@ namespace graphene { namespace net { user_agent(user_agent), core_protocol_version(core_protocol_version), remote_endpoint(remote_endpoint), - reason_string(reason_string), - reason_code(reason_code) + reason_code(reason_code), + reason_string(reason_string) {} }; From c847a3be873df1ad0231c54d94fdf07c6cd7083a Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 3 Jul 2022 17:51:12 +0000 Subject: [PATCH 115/338] Limit recursion depth when adapting objects for ES --- .../elasticsearch/elasticsearch_plugin.cpp | 27 ++++-- libraries/plugins/es_objects/es_objects.cpp | 10 ++- libraries/utilities/elasticsearch.cpp | 87 +++++++++++++------ .../graphene/utilities/elasticsearch.hpp | 15 ++-- 4 files changed, 98 insertions(+), 41 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 31b870a289..14d20b6d20 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -55,6 +55,9 @@ class elasticsearch_plugin_impl std::string index_prefix = "bitshares-"; + /// For the "index.mapping.depth.limit" setting in ES. The default value is 20. + uint16_t max_mapping_depth = 20; + uint32_t start_es_after_block = 0; bool visitor = false; @@ -257,11 +260,13 @@ void elasticsearch_plugin_impl::doOperationHistory( const optional op.visit(fc::from_static_variant(os.op_object, FC_PACK_MAX_DEPTH)); - os.op_object = graphene::utilities::es_data_adaptor::adapt( os.op_object.get_object() ); + os.op_object = graphene::utilities::es_data_adaptor::adapt( os.op_object.get_object(), + _options.max_mapping_depth - 2 ); // operation_result variant v; fc::to_variant( oho->result, v, FC_PACK_MAX_DEPTH ); - os.operation_result_object = graphene::utilities::es_data_adaptor::adapt_static_variant( v.get_array() ); + os.operation_result_object = graphene::utilities::es_data_adaptor::adapt_static_variant( v.get_array(), + _options.max_mapping_depth - 2 ); } if(_options.operation_string) os.op = fc::json::to_string(oho->op); @@ -477,20 +482,23 @@ void elasticsearch_plugin::plugin_set_program_options( cli.add_options() ("elasticsearch-node-url", boost::program_options::value(), "Elastic Search database node url(http://localhost:9200/)") + ("elasticsearch-basic-auth", boost::program_options::value(), + "Pass basic auth to elasticsearch database('')") ("elasticsearch-bulk-replay", boost::program_options::value(), "Number of bulk documents to index on replay(10000)") ("elasticsearch-bulk-sync", boost::program_options::value(), "Number of bulk documents to index on a syncronied chain(100)") - ("elasticsearch-visitor", boost::program_options::value(), - "Use visitor to index additional data(slows down the replay(false))") - ("elasticsearch-basic-auth", boost::program_options::value(), - "Pass basic auth to elasticsearch database('')") ("elasticsearch-index-prefix", boost::program_options::value(), "Add a prefix to the index(bitshares-)") - ("elasticsearch-operation-object", boost::program_options::value(), - "Save operation as object(true)") + ("elasticsearch-max-mapping-depth", boost::program_options::value(), + "The maximum index mapping depth (index.mapping.depth.limit) setting in ES, " + "should be >=2. (20)") ("elasticsearch-start-es-after-block", boost::program_options::value(), "Start doing ES job after block(0)") + ("elasticsearch-visitor", boost::program_options::value(), + "Use visitor to index additional data(slows down the replay(false))") + ("elasticsearch-operation-object", boost::program_options::value(), + "Save operation as object(true)") ("elasticsearch-operation-string", boost::program_options::value(), "Save operation as string. Needed to serve history api calls(false)") ("elasticsearch-mode", boost::program_options::value(), @@ -520,11 +528,14 @@ void detail::elasticsearch_plugin_impl::plugin_options::init(const boost::progra utilities::get_program_option( options, "elasticsearch-bulk-replay", bulk_replay ); utilities::get_program_option( options, "elasticsearch-bulk-sync", bulk_sync ); utilities::get_program_option( options, "elasticsearch-index-prefix", index_prefix ); + utilities::get_program_option( options, "elasticsearch-max-mapping-depth", max_mapping_depth ); utilities::get_program_option( options, "elasticsearch-start-es-after-block", start_es_after_block ); utilities::get_program_option( options, "elasticsearch-visitor", visitor ); utilities::get_program_option( options, "elasticsearch-operation-object", operation_object ); utilities::get_program_option( options, "elasticsearch-operation-string", operation_string ); + FC_ASSERT( max_mapping_depth >= 2, "The minimum value of elasticsearch-max-mapping-depth is 2" ); + auto es_mode = static_cast( elasticsearch_mode ); utilities::get_program_option( options, "elasticsearch-mode", es_mode ); if( es_mode > static_cast( mode::all ) ) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 5aa6d8fa43..fb3fee5776 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -82,6 +82,10 @@ class es_objects_plugin_impl object_options budget { true, false, true, "budget" }; std::string index_prefix = "objects-"; + + /// For the "index.mapping.depth.limit" setting in ES. The default value is 20. + uint16_t max_mapping_depth = 20; + uint32_t start_es_after_block = 0; bool sync_db_on_startup = false; @@ -276,7 +280,8 @@ void es_objects_plugin_impl::prepareTemplate( fc::variant blockchain_object_variant; fc::to_variant( blockchain_object, blockchain_object_variant, GRAPHENE_NET_MAX_NESTED_OBJECTS ); - fc::mutable_variant_object o( utilities::es_data_adaptor::adapt( blockchain_object_variant.get_object() ) ); + fc::mutable_variant_object o( utilities::es_data_adaptor::adapt( blockchain_object_variant.get_object(), + _options.max_mapping_depth ) ); o["object_id"] = string(blockchain_object.id); o["block_time"] = block_time; @@ -384,6 +389,8 @@ void es_objects_plugin::plugin_set_program_options( ("es-objects-index-prefix", boost::program_options::value(), "Add a prefix to the index(objects-)") + ("es-objects-max-mapping-depth", boost::program_options::value(), + "The maximum index mapping depth (index.mapping.depth.limit) setting in ES (20)") ("es-objects-keep-only-current", boost::program_options::value(), "Deprecated. Please use the store-updates or no-delete options. " "Keep only current state of the objects(true)") @@ -428,6 +435,7 @@ void detail::es_objects_plugin_impl::plugin_options::init(const boost::program_o utilities::get_program_option( options, "es-objects-asset-bitasset-store-updates", asset_bitasset.store_updates ); utilities::get_program_option( options, "es-objects-budget-records", budget.enabled ); utilities::get_program_option( options, "es-objects-index-prefix", index_prefix ); + utilities::get_program_option( options, "es-objects-max-mapping-depth", max_mapping_depth ); utilities::get_program_option( options, "es-objects-start-es-after-block", start_es_after_block ); utilities::get_program_option( options, "es-objects-sync-db-on-startup", sync_db_on_startup ); } diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index f49d1eef4c..053fc725f5 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -260,8 +260,15 @@ std::string es_client::query( const std::string& path, const std::string& query return response.content; } -fc::variant es_data_adaptor::adapt(const fc::variant_object& op) +fc::variant es_data_adaptor::adapt( const fc::variant_object& op, uint16_t max_depth ) { + if( 0 == max_depth ) + { + fc::variant v; + fc::to_variant(fc::json::to_string(op), v, FC_PACK_MAX_DEPTH); + return v; + } + fc::mutable_variant_object o(op); // Note: these fields are maps, but were stored in ES as flattened arrays @@ -297,12 +304,14 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) const auto& vo = element.get_object(); if( vo.contains(name.c_str()) ) // transfer_operation.amount.amount keys_to_rename.emplace_back(name); - element = adapt(vo); + element = adapt( vo, max_depth - 1 ); } else if( element.is_array() ) { auto& array = element.get_array(); - if( to_string_fields.find(name) != to_string_fields.end() ) + if( 1u == max_depth ) + element = fc::json::to_string(element); + else if( to_string_fields.find(name) != to_string_fields.end() ) { // make a backup and convert to string original_arrays.emplace_back( name, array ); @@ -313,10 +322,10 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) // make a backup and adapt the original auto backup = array; original_arrays.emplace_back( name, backup ); - adapt(array); + in_situ_adapt( array, max_depth - 1 ); } else - adapt(array); + in_situ_adapt( array, max_depth - 1 ); } } @@ -345,7 +354,7 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) auto type = data_type::map_type; if( to_string_fields.find(name) != to_string_fields.end() ) type = to_string_fields.at(name); - o[name + "_object"] = adapt( value, type ); + o[name + "_object"] = adapt( value, type, max_depth - 1 ); } fc::variant v; @@ -353,10 +362,17 @@ fc::variant es_data_adaptor::adapt(const fc::variant_object& op) return v; } -fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type ) +fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type, uint16_t max_depth ) { + if( 0 == max_depth ) + { + fc::variant nv; + fc::to_variant(fc::json::to_string(v), nv, FC_PACK_MAX_DEPTH); + return nv; + } + if( data_type::static_variant_type == type ) - return adapt_static_variant(v); + return adapt_static_variant( v, max_depth ); // map_type or array_type fc::variants vs; @@ -366,12 +382,12 @@ fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type ) if( item.is_array() ) { if( data_type::map_type == type ) - vs.push_back( adapt_map_item( item.get_array() ) ); + vs.push_back( adapt_map_item( item.get_array(), max_depth - 1 ) ); else // assume it is a static_variant array - vs.push_back( adapt_static_variant( item.get_array() ) ); + vs.push_back( adapt_static_variant( item.get_array(), max_depth - 1 ) ); } else if( item.is_object() ) // object array - vs.push_back( adapt( item.get_object() ) ); + vs.push_back( adapt( item.get_object(), max_depth - 1 ) ); else wlog( "Type of item is unexpected: ${item}", ("item", item) ); } @@ -382,10 +398,10 @@ fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type ) } void es_data_adaptor::extract_data_from_variant( - const fc::variant& v, fc::mutable_variant_object& mv, const std::string& prefix ) + const fc::variant& v, fc::mutable_variant_object& mv, const std::string& prefix, uint16_t max_depth ) { - if( v.is_object() ) - mv[prefix + "_object"] = adapt( v.get_object() ); + if( v.is_object() && 0 < max_depth ) + mv[prefix + "_object"] = adapt( v.get_object(), max_depth - 1 ); else if( v.is_int64() || v.is_uint64() ) mv[prefix + "_int"] = v; else if( v.is_bool() ) @@ -398,40 +414,61 @@ void es_data_adaptor::extract_data_from_variant( // and static_variants (i.e. in custom authorities) and maps (if any) are converted to strings too. } -fc::variant es_data_adaptor::adapt_map_item( const fc::variants& v ) +fc::variant es_data_adaptor::adapt_map_item( const fc::variants& v, uint16_t max_depth ) { + if( 0 == max_depth ) + { + fc::variant nv; + fc::to_variant(fc::json::to_string(v), nv, FC_PACK_MAX_DEPTH); + return nv; + } + FC_ASSERT( v.size() == 2, "Internal error" ); fc::mutable_variant_object mv; - extract_data_from_variant( v[0], mv, "key" ); - extract_data_from_variant( v[1], mv, "data" ); + extract_data_from_variant( v[0], mv, "key", max_depth - 1 ); + extract_data_from_variant( v[1], mv, "data", max_depth - 1 ); fc::variant nv; fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); return nv; } -fc::variant es_data_adaptor::adapt_static_variant( const fc::variants& v ) +fc::variant es_data_adaptor::adapt_static_variant( const fc::variants& v, uint16_t max_depth ) { + if( 0 == max_depth ) + { + fc::variant nv; + fc::to_variant(fc::json::to_string(v), nv, FC_PACK_MAX_DEPTH); + return nv; + } + FC_ASSERT( v.size() == 2, "Internal error" ); fc::mutable_variant_object mv; mv["which"] = v[0]; - extract_data_from_variant( v[1], mv, "data" ); + extract_data_from_variant( v[1], mv, "data", max_depth - 1 ); fc::variant nv; fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); return nv; } -void es_data_adaptor::adapt(fc::variants& v) +void es_data_adaptor::in_situ_adapt( fc::variants& v, uint16_t max_depth ) { - for (auto& array_element : v) + FC_ASSERT( max_depth > 0, "Internal error" ); + + for( auto& array_element : v ) { - if (array_element.is_object()) - array_element = adapt(array_element.get_object()); - else if (array_element.is_array()) - adapt(array_element.get_array()); + if( array_element.is_object() ) + array_element = adapt( array_element.get_object(), max_depth - 1 ); + else if( array_element.is_array() ) + { + if( 1u == max_depth ) + array_element = fc::json::to_string( array_element ); + else + in_situ_adapt( array_element.get_array(), max_depth - 1 ); + } else array_element = array_element.as_string(); } diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp index 3b72d9b2aa..45d8982f62 100644 --- a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -122,21 +122,22 @@ struct es_data_adaptor array_type // can be simple arrays, object arrays, static_variant arrays, or even nested arrays }; - static fc::variant adapt( const fc::variant_object& op ); + static fc::variant adapt( const fc::variant_object& op, uint16_t max_depth ); - static fc::variant adapt( const fc::variants& v, data_type type ); + static fc::variant adapt( const fc::variants& v, data_type type, uint16_t max_depth ); - static fc::variant adapt_map_item( const fc::variants& v ); + static fc::variant adapt_map_item( const fc::variants& v, uint16_t max_depth ); - static fc::variant adapt_static_variant( const fc::variants& v ); + static fc::variant adapt_static_variant( const fc::variants& v, uint16_t max_depth ); - /// In-place update - static void adapt( fc::variants& v ); + /// Update directly, no return + static void in_situ_adapt( fc::variants& v, uint16_t max_depth ); /// Extract data from @p v into @p mv static void extract_data_from_variant( const fc::variant& v, fc::mutable_variant_object& mv, - const std::string& prefix ); + const std::string& prefix, + uint16_t max_depth ); }; From e529705d8addea8308d79463de6ff60f4bc7be04 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 3 Jul 2022 19:43:44 +0000 Subject: [PATCH 116/338] Fix code smells --- libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 14d20b6d20..4180ce6470 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -258,15 +258,16 @@ void elasticsearch_plugin_impl::doOperationHistory( const optional virtual_op; if(_options.operation_object) { + constexpr uint16_t current_depth = 2; // op oho->op.visit(fc::from_static_variant(os.op_object, FC_PACK_MAX_DEPTH)); os.op_object = graphene::utilities::es_data_adaptor::adapt( os.op_object.get_object(), - _options.max_mapping_depth - 2 ); + _options.max_mapping_depth - current_depth ); // operation_result variant v; fc::to_variant( oho->result, v, FC_PACK_MAX_DEPTH ); os.operation_result_object = graphene::utilities::es_data_adaptor::adapt_static_variant( v.get_array(), - _options.max_mapping_depth - 2 ); + _options.max_mapping_depth - current_depth ); } if(_options.operation_string) os.op = fc::json::to_string(oho->op); From 9a135e065799eccb31aceba1bac4879ab200ec7a Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 4 Jul 2022 15:52:09 +0000 Subject: [PATCH 117/338] Fix depth calculation when adapting data for ES --- libraries/utilities/elasticsearch.cpp | 53 +++++++++++---------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 053fc725f5..a8e2581e73 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -309,19 +309,21 @@ fc::variant es_data_adaptor::adapt( const fc::variant_object& op, uint16_t max_d else if( element.is_array() ) { auto& array = element.get_array(); - if( 1u == max_depth ) - element = fc::json::to_string(element); - else if( to_string_fields.find(name) != to_string_fields.end() ) + if( to_string_fields.find(name) != to_string_fields.end() ) { - // make a backup and convert to string - original_arrays.emplace_back( name, array ); + // make a backup (only if depth is sufficient) and convert to string + if( max_depth > 1 ) + original_arrays.emplace_back( name, array ); element = fc::json::to_string(element); } else if( flattened_fields.find(name) != flattened_fields.end() ) { - // make a backup and adapt the original - auto backup = array; - original_arrays.emplace_back( name, backup ); + // make a backup (only if depth is sufficient) and adapt the original + if( max_depth > 1 ) + { + auto backup = array; + original_arrays.emplace_back( name, std::move( backup ) ); + } in_situ_adapt( array, max_depth - 1 ); } else @@ -353,7 +355,7 @@ fc::variant es_data_adaptor::adapt( const fc::variant_object& op, uint16_t max_d auto& value = pair.second; auto type = data_type::map_type; if( to_string_fields.find(name) != to_string_fields.end() ) - type = to_string_fields.at(name); + type = to_string_fields.at(name); o[name + "_object"] = adapt( value, type, max_depth - 1 ); } @@ -364,13 +366,6 @@ fc::variant es_data_adaptor::adapt( const fc::variant_object& op, uint16_t max_d fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type, uint16_t max_depth ) { - if( 0 == max_depth ) - { - fc::variant nv; - fc::to_variant(fc::json::to_string(v), nv, FC_PACK_MAX_DEPTH); - return nv; - } - if( data_type::static_variant_type == type ) return adapt_static_variant( v, max_depth ); @@ -382,12 +377,12 @@ fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type, uint1 if( item.is_array() ) { if( data_type::map_type == type ) - vs.push_back( adapt_map_item( item.get_array(), max_depth - 1 ) ); + vs.push_back( adapt_map_item( item.get_array(), max_depth ) ); else // assume it is a static_variant array - vs.push_back( adapt_static_variant( item.get_array(), max_depth - 1 ) ); + vs.push_back( adapt_static_variant( item.get_array(), max_depth ) ); } else if( item.is_object() ) // object array - vs.push_back( adapt( item.get_object(), max_depth - 1 ) ); + vs.push_back( adapt( item.get_object(), max_depth ) ); else wlog( "Type of item is unexpected: ${item}", ("item", item) ); } @@ -400,7 +395,8 @@ fc::variant es_data_adaptor::adapt( const fc::variants& v, data_type type, uint1 void es_data_adaptor::extract_data_from_variant( const fc::variant& v, fc::mutable_variant_object& mv, const std::string& prefix, uint16_t max_depth ) { - if( v.is_object() && 0 < max_depth ) + FC_ASSERT( max_depth > 0, "Internal error" ); + if( v.is_object() ) mv[prefix + "_object"] = adapt( v.get_object(), max_depth - 1 ); else if( v.is_int64() || v.is_uint64() ) mv[prefix + "_int"] = v; @@ -426,8 +422,8 @@ fc::variant es_data_adaptor::adapt_map_item( const fc::variants& v, uint16_t max FC_ASSERT( v.size() == 2, "Internal error" ); fc::mutable_variant_object mv; - extract_data_from_variant( v[0], mv, "key", max_depth - 1 ); - extract_data_from_variant( v[1], mv, "data", max_depth - 1 ); + extract_data_from_variant( v[0], mv, "key", max_depth ); + extract_data_from_variant( v[1], mv, "data", max_depth ); fc::variant nv; fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); @@ -447,7 +443,7 @@ fc::variant es_data_adaptor::adapt_static_variant( const fc::variants& v, uint16 fc::mutable_variant_object mv; mv["which"] = v[0]; - extract_data_from_variant( v[1], mv, "data", max_depth - 1 ); + extract_data_from_variant( v[1], mv, "data", max_depth ); fc::variant nv; fc::to_variant( mv, nv, FC_PACK_MAX_DEPTH ); @@ -456,19 +452,12 @@ fc::variant es_data_adaptor::adapt_static_variant( const fc::variants& v, uint16 void es_data_adaptor::in_situ_adapt( fc::variants& v, uint16_t max_depth ) { - FC_ASSERT( max_depth > 0, "Internal error" ); - for( auto& array_element : v ) { if( array_element.is_object() ) - array_element = adapt( array_element.get_object(), max_depth - 1 ); + array_element = adapt( array_element.get_object(), max_depth ); else if( array_element.is_array() ) - { - if( 1u == max_depth ) - array_element = fc::json::to_string( array_element ); - else - in_situ_adapt( array_element.get_array(), max_depth - 1 ); - } + in_situ_adapt( array_element.get_array(), max_depth ); else array_element = array_element.as_string(); } From d0ba2cd49e4056482323f44e21735d1483e52987 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 4 Jul 2022 16:21:15 +0000 Subject: [PATCH 118/338] Simplify code --- libraries/utilities/elasticsearch.cpp | 40 ++++++++++++++------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index a8e2581e73..213f3a1833 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -305,30 +305,32 @@ fc::variant es_data_adaptor::adapt( const fc::variant_object& op, uint16_t max_d if( vo.contains(name.c_str()) ) // transfer_operation.amount.amount keys_to_rename.emplace_back(name); element = adapt( vo, max_depth - 1 ); + continue; } - else if( element.is_array() ) + + if( !element.is_array() ) + continue; + + auto& array = element.get_array(); + if( to_string_fields.find(name) != to_string_fields.end() ) { - auto& array = element.get_array(); - if( to_string_fields.find(name) != to_string_fields.end() ) - { - // make a backup (only if depth is sufficient) and convert to string - if( max_depth > 1 ) - original_arrays.emplace_back( name, array ); - element = fc::json::to_string(element); - } - else if( flattened_fields.find(name) != flattened_fields.end() ) + // make a backup (only if depth is sufficient) and convert to string + if( max_depth > 1 ) + original_arrays.emplace_back( name, array ); + element = fc::json::to_string(element); + } + else if( flattened_fields.find(name) != flattened_fields.end() ) + { + // make a backup (only if depth is sufficient) and adapt the original + if( max_depth > 1 ) { - // make a backup (only if depth is sufficient) and adapt the original - if( max_depth > 1 ) - { - auto backup = array; - original_arrays.emplace_back( name, std::move( backup ) ); - } - in_situ_adapt( array, max_depth - 1 ); + auto backup = array; + original_arrays.emplace_back( name, std::move( backup ) ); } - else - in_situ_adapt( array, max_depth - 1 ); + in_situ_adapt( array, max_depth - 1 ); } + else + in_situ_adapt( array, max_depth - 1 ); } for( const auto& i : keys_to_rename ) // transfer_operation.amount From bb697cb18361f627590078e226a76ae490e482de Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 6 Jul 2022 16:22:55 +0000 Subject: [PATCH 119/338] Use ES 7.17.5 in Github Actions workflows --- .github/workflows/build-and-test.ubuntu-debug.yml | 2 +- .github/workflows/build-and-test.ubuntu-release.yml | 2 +- .github/workflows/sonar-scan.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-and-test.ubuntu-debug.yml b/.github/workflows/build-and-test.ubuntu-debug.yml index 2615bf7d31..5e9f33a409 100644 --- a/.github/workflows/build-and-test.ubuntu-debug.yml +++ b/.github/workflows/build-and-test.ubuntu-debug.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.16.2 + image: docker://elasticsearch:7.17.5 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Install dependencies diff --git a/.github/workflows/build-and-test.ubuntu-release.yml b/.github/workflows/build-and-test.ubuntu-release.yml index ba01da96d6..3a26dcfcd1 100644 --- a/.github/workflows/build-and-test.ubuntu-release.yml +++ b/.github/workflows/build-and-test.ubuntu-release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.16.2 + image: docker://elasticsearch:7.17.5 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Install dependencies diff --git a/.github/workflows/sonar-scan.yml b/.github/workflows/sonar-scan.yml index ffb5bfaf01..94aba4f6d5 100644 --- a/.github/workflows/sonar-scan.yml +++ b/.github/workflows/sonar-scan.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.10.1 + image: docker://elasticsearch:7.17.5 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Download and install latest SonarScanner CLI tool From 6a215f9fe2ae3f293629ebcc55531c545684828d Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 6 Jul 2022 18:49:33 +0000 Subject: [PATCH 120/338] Use make -j 1 in sonar-scan workflow --- .github/workflows/sonar-scan.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sonar-scan.yml b/.github/workflows/sonar-scan.yml index 94aba4f6d5..4aa774c40f 100644 --- a/.github/workflows/sonar-scan.yml +++ b/.github/workflows/sonar-scan.yml @@ -111,7 +111,7 @@ jobs: export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" mkdir -p "$CCACHE_DIR" df -h - programs/build_helpers/make_with_sonar bw-output -j 2 -C _build \ + programs/build_helpers/make_with_sonar bw-output -j 1 -C _build \ witness_node cli_wallet js_operation_serializer get_dev_key network_mapper \ app_test chain_test cli_test es_test df -h From 848b80257c351f2c290441bd19b2c88c338f2782 Mon Sep 17 00:00:00 2001 From: Abit Date: Fri, 8 Jul 2022 18:42:03 +0200 Subject: [PATCH 121/338] Add a swap file in sonar-scan workflow --- .github/workflows/sonar-scan.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/sonar-scan.yml b/.github/workflows/sonar-scan.yml index 4aa774c40f..1d840963c3 100644 --- a/.github/workflows/sonar-scan.yml +++ b/.github/workflows/sonar-scan.yml @@ -69,6 +69,12 @@ jobs: run: | pwd df -h . + free + sudo dd if=/dev/zero of=/swapfile bs=1024 count=4M + sudo chmod 600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + free mkdir -p _build sudo mkdir -p /_build/libraries /_build/programs /mnt/_build/tests sudo chmod a+rwx /_build/libraries /_build/programs /mnt/_build/tests From 5d4d8af136e78f283c0fe49eadc50e82994aec37 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 8 Jul 2022 17:01:51 +0000 Subject: [PATCH 122/338] Fix code smells --- libraries/app/include/graphene/app/api.hpp | 6 +++--- .../graphene/chain/operation_history_object.hpp | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 91c0ba0c94..56b4736b48 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -194,9 +194,9 @@ namespace graphene { namespace app { /** * @brief Get all operations inside a block or a transaction, including virtual operations * @param block_num the number (height) of the block to fetch - * @param trx_in_block the sequence of a transaction in the block, starts from @a 0, optional, - * if specified, will return only operations of that transaction; - * if omitted, will return all operations in the specified block + * @param trx_in_block the sequence of a transaction in the block, starts from @a 0, optional. + * If specified, will return only operations of that transaction. + * If omitted, will return all operations in the specified block. * @return a list of @a operation_history objects ordered by ID * * @note the data is fetched from @a account_history plugin, thus the result is possible to diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index 52957d388e..61cba9010d 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -100,7 +100,7 @@ namespace graphene { namespace chain { struct by_block; - typedef multi_index_container< + using operation_history_multi_index_type = multi_index_container< operation_history_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -113,15 +113,15 @@ namespace graphene { namespace chain { > > > - > operation_history_multi_index_type; + >; - typedef generic_index operation_history_index; + using operation_history_index = generic_index< operation_history_object, operation_history_multi_index_type >; struct by_seq; struct by_op; struct by_opid; - typedef multi_index_container< + using account_transaction_history_multi_index_type = multi_index_container< account_transaction_history_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -145,10 +145,10 @@ namespace graphene { namespace chain { &account_transaction_history_object::operation_id> > > - > account_transaction_history_multi_index_type; + >; - typedef generic_index account_transaction_history_index; + using account_transaction_history_index = generic_index< account_transaction_history_object, + account_transaction_history_multi_index_type >; } } // graphene::chain From 7250b79515057d43bd0735c874dfb4de2ee3ba90 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 8 Jul 2022 17:09:33 +0000 Subject: [PATCH 123/338] Add tests for get_block_operation_history API --- tests/tests/history_api_tests.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index deab20400d..5f06e85afb 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -87,6 +87,13 @@ BOOST_AUTO_TEST_CASE(get_account_history) { 100, operation_history_id_type()); BOOST_CHECK_EQUAL(histories.size(), 0u); + // get_block_operation_history + auto head_block_num = db.head_block_num(); + histories = hist_api.get_block_operation_history(head_block_num); + BOOST_CHECK_EQUAL(histories.size(), 3u); + histories = hist_api.get_block_operation_history(head_block_num, 1u); + BOOST_CHECK_EQUAL(histories.size(), 1u); + } catch (fc::exception &e) { edump((e.to_detail_string())); throw; From a84dc87de52152b5aeea77dc03ff2b3e461e08b5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 10 Jul 2022 12:05:32 +0000 Subject: [PATCH 124/338] Rename account_transaction_hist* to account_hist* --- libraries/app/api.cpp | 14 +++---- libraries/app/database_api.cpp | 2 +- .../app/include/graphene/app/database_api.hpp | 2 +- libraries/chain/db_notify.cpp | 4 +- .../include/graphene/chain/account_object.hpp | 2 +- .../chain/operation_history_object.hpp | 41 ++++++++----------- .../chain/include/graphene/chain/types.hpp | 2 +- libraries/chain/small_objects.cpp | 4 +- .../account_history_plugin.cpp | 10 ++--- .../elasticsearch/elasticsearch_plugin.cpp | 16 ++++---- .../elasticsearch/elasticsearch_plugin.hpp | 2 +- libraries/wallet/wallet.cpp | 2 +- tests/common/database_fixture.cpp | 6 +-- 13 files changed, 51 insertions(+), 56 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 9b6cde9b32..7064835c36 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -363,7 +363,7 @@ namespace graphene { namespace app { account_id_type account; try { account = database_api.get_account_id_from_string(account_id_or_name); - const account_transaction_history_object& node = account(db).statistics(db).most_recent_op(db); + const account_history_object& node = account(db).statistics(db).most_recent_op(db); if(start == operation_history_id_type() || start.instance.value > node.operation_id.instance.value) start = node.operation_id; } catch(...) { return result; } @@ -380,7 +380,7 @@ namespace graphene { namespace app { } } - const auto& hist_idx = db.get_index_type(); + const auto& hist_idx = db.get_index_type(); const auto& by_op_idx = hist_idx.indices().get(); auto index_start = by_op_idx.begin(); auto itr = by_op_idx.lower_bound(boost::make_tuple(account, start)); @@ -418,8 +418,8 @@ namespace graphene { namespace app { account = database_api.get_account_id_from_string(account_id_or_name); } catch(...) { return result; } const auto& stats = account(db).statistics(db); - if( stats.most_recent_op == account_transaction_history_id_type() ) return result; - const account_transaction_history_object* node = &stats.most_recent_op(db); + if( stats.most_recent_op == account_history_id_type() ) return result; + const account_history_object* node = &stats.most_recent_op(db); if( start == operation_history_id_type() ) start = node->operation_id; @@ -430,12 +430,12 @@ namespace graphene { namespace app { if(node->operation_id(db).op.which() == operation_type) result.push_back( node->operation_id(db) ); } - if( node->next == account_transaction_history_id_type() ) + if( node->next == account_history_id_type() ) node = nullptr; else node = &node->next(db); } if( stop.instance.value == 0 && result.size() < limit ) { - auto head = db.find(account_transaction_history_id_type()); + auto head = db.find(account_history_id_type()); if (head != nullptr && head->account == account && head->operation_id(db).op.which() == operation_type) result.push_back(head->operation_id(db)); } @@ -469,7 +469,7 @@ namespace graphene { namespace app { if( start >= stop && start > stats.removed_ops && limit > 0 ) { - const auto& hist_idx = db.get_index_type(); + const auto& hist_idx = db.get_index_type(); const auto& by_seq_idx = hist_idx.indices().get(); auto itr = by_seq_idx.upper_bound( boost::make_tuple( account, start ) ); diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 6dfed13bb7..93b0aa44dc 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -123,7 +123,7 @@ fc::variants database_api_impl::get_objects( const vector& ids, [this,to_subscribe](object_id_type id) -> fc::variant { if(auto obj = _db.find_object(id)) { - if( to_subscribe && !id.is() && !id.is() ) + if( to_subscribe && !id.is() && !id.is() ) this->subscribe_to_item( id ); return obj->to_variant(); } diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index d62e631184..6db77eb7e9 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -84,7 +84,7 @@ class database_api * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return The objects retrieved, in the order they are mentioned in ids - * @note operation_history_object (1.11.x) and account_transaction_history_object (2.9.x) + * @note operation_history_object (1.11.x) and account_history_object (2.9.x) * can not be subscribed. * * If any of the provided IDs does not map to an object, a null variant is returned in its position. diff --git a/libraries/chain/db_notify.cpp b/libraries/chain/db_notify.cpp index 37b5dd3873..47958336a7 100644 --- a/libraries/chain/db_notify.cpp +++ b/libraries/chain/db_notify.cpp @@ -536,8 +536,8 @@ static void get_relevant_accounts( const object* obj, flat_set& break; } case impl_block_summary_object_type: break; - case impl_account_transaction_history_object_type: { - const auto* aobj = dynamic_cast(obj); + case impl_account_history_object_type: { + const auto* aobj = dynamic_cast(obj); accounts.insert( aobj->account ); break; } case impl_chain_property_object_type: diff --git a/libraries/chain/include/graphene/chain/account_object.hpp b/libraries/chain/include/graphene/chain/account_object.hpp index 9bb6fceaf7..8b1982cf49 100644 --- a/libraries/chain/include/graphene/chain/account_object.hpp +++ b/libraries/chain/include/graphene/chain/account_object.hpp @@ -56,7 +56,7 @@ namespace graphene { namespace chain { /** * Keep the most recent operation as a root pointer to a linked list of the transaction history. */ - account_transaction_history_id_type most_recent_op; + account_history_id_type most_recent_op; /** Total operations related to this account. */ uint64_t total_ops = 0; /** Total operations related to this account that has been removed from the database. */ diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index 61cba9010d..086e3e440f 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -87,20 +87,20 @@ namespace graphene { namespace chain { * linked list can be traversed with relatively effecient disk access because * of the use of a memory mapped stack. */ - class account_transaction_history_object : public abstract_object + class account_history_object : public abstract_object { public: static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_account_transaction_history_object_type; + static constexpr uint8_t type_id = impl_account_history_object_type; account_id_type account; /// the account this operation applies to operation_history_id_type operation_id; uint64_t sequence = 0; /// the operation position within the given account - account_transaction_history_id_type next; + account_history_id_type next; }; struct by_block; - using operation_history_multi_index_type = multi_index_container< + using operation_history_mlti_idx_type = multi_index_container< operation_history_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -115,49 +115,44 @@ namespace graphene { namespace chain { > >; - using operation_history_index = generic_index< operation_history_object, operation_history_multi_index_type >; + using operation_history_index = generic_index< operation_history_object, operation_history_mlti_idx_type >; struct by_seq; struct by_op; struct by_opid; - using account_transaction_history_multi_index_type = multi_index_container< - account_transaction_history_object, + using account_history_multi_idx_type = multi_index_container< + account_history_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, ordered_unique< tag, - composite_key< account_transaction_history_object, - member< account_transaction_history_object, account_id_type, - &account_transaction_history_object::account>, - member< account_transaction_history_object, uint64_t, &account_transaction_history_object::sequence> + composite_key< account_history_object, + member< account_history_object, account_id_type, &account_history_object::account>, + member< account_history_object, uint64_t, &account_history_object::sequence> > >, ordered_unique< tag, - composite_key< account_transaction_history_object, - member< account_transaction_history_object, account_id_type, - &account_transaction_history_object::account>, - member< account_transaction_history_object, operation_history_id_type, - &account_transaction_history_object::operation_id> + composite_key< account_history_object, + member< account_history_object, account_id_type, &account_history_object::account>, + member< account_history_object, operation_history_id_type, &account_history_object::operation_id> > >, ordered_non_unique< tag, - member< account_transaction_history_object, operation_history_id_type, - &account_transaction_history_object::operation_id> + member< account_history_object, operation_history_id_type, &account_history_object::operation_id> > > >; - using account_transaction_history_index = generic_index< account_transaction_history_object, - account_transaction_history_multi_index_type >; + using account_history_index = generic_index< account_history_object, account_history_multi_idx_type >; } } // graphene::chain MAP_OBJECT_ID_TO_TYPE(graphene::chain::operation_history_object) -MAP_OBJECT_ID_TO_TYPE(graphene::chain::account_transaction_history_object) +MAP_OBJECT_ID_TO_TYPE(graphene::chain::account_history_object) FC_REFLECT_TYPENAME( graphene::chain::operation_history_object ) -FC_REFLECT_TYPENAME( graphene::chain::account_transaction_history_object ) +FC_REFLECT_TYPENAME( graphene::chain::account_history_object ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::chain::operation_history_object ) -GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::chain::account_transaction_history_object ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::chain::account_history_object ) diff --git a/libraries/chain/include/graphene/chain/types.hpp b/libraries/chain/include/graphene/chain/types.hpp index 3c7b46eeb3..00e3cf9ebb 100644 --- a/libraries/chain/include/graphene/chain/types.hpp +++ b/libraries/chain/include/graphene/chain/types.hpp @@ -38,7 +38,7 @@ GRAPHENE_DEFINE_IDS(chain, implementation_ids, impl_, /* 2.6.x */ (account_statistics) /* 2.7.x */ (transaction_history) /* 2.8.x */ (block_summary) - /* 2.9.x */ (account_transaction_history) + /* 2.9.x */ (account_history) /* 2.10.x */ (blinded_balance) /* 2.11.x */ (chain_property) /* 2.12.x */ (witness_schedule) diff --git a/libraries/chain/small_objects.cpp b/libraries/chain/small_objects.cpp index d2aa0664aa..87e58e2586 100644 --- a/libraries/chain/small_objects.cpp +++ b/libraries/chain/small_objects.cpp @@ -141,7 +141,7 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::htlc_object, (graphene::db::obj FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::operation_history_object, (graphene::chain::object), (op)(result)(block_num)(trx_in_block)(op_in_trx)(virtual_op) ) -FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::account_transaction_history_object, (graphene::chain::object), +FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::account_history_object, (graphene::chain::object), (account)(operation_id)(sequence)(next) ) FC_REFLECT_DERIVED_NO_TYPENAME( @@ -258,7 +258,7 @@ GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::dynamic_global_prope GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::global_property_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::htlc_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::operation_history_object ) -GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::account_transaction_history_object ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::account_history_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::special_authority_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::transaction_history_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::withdraw_permission_object ) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 9561315ebf..8270af3f31 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -216,7 +216,7 @@ void account_history_plugin_impl::add_account_history( const account_id_type acc graphene::chain::database& db = database(); const auto& stats_obj = account_id(db).statistics(db); // add new entry - const auto& ath = db.create( [&]( account_transaction_history_object& obj ){ + const auto& ath = db.create( [&]( account_history_object& obj ){ obj.operation_id = op_id; obj.account = account_id; obj.sequence = stats_obj.total_ops + 1; @@ -242,7 +242,7 @@ void account_history_plugin_impl::add_account_history( const account_id_type acc if( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) { // look for the earliest entry - const auto& his_idx = db.get_index_type(); + const auto& his_idx = db.get_index_type(); const auto& by_seq_idx = his_idx.indices().get(); auto itr = by_seq_idx.lower_bound( boost::make_tuple( account_id, 0 ) ); // make sure don't remove the one just added @@ -260,8 +260,8 @@ void account_history_plugin_impl::add_account_history( const account_id_type acc // this should be always true, but just have a check here if( itr != by_seq_idx.end() && itr->account == account_id ) { - db.modify( *itr, [&]( account_transaction_history_object& obj ){ - obj.next = account_transaction_history_id_type(); + db.modify( *itr, [&]( account_history_object& obj ){ + obj.next = account_history_id_type(); }); } // else need to modify the head pointer, but it shouldn't be true @@ -330,7 +330,7 @@ void account_history_plugin::plugin_initialize(const boost::program_options::var { database().applied_block.connect( [&]( const signed_block& b){ my->update_account_histories(b); } ); my->_oho_index = database().add_index< primary_index< operation_history_index > >(); - database().add_index< primary_index< account_transaction_history_index > >(); + database().add_index< primary_index< account_history_index > >(); LOAD_VALUE_SET(options, "track-account", my->_tracked_accounts, graphene::chain::account_id_type); if (options.count("partial-operations") > 0) { diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 4180ce6470..465b497b3e 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -101,7 +101,7 @@ class elasticsearch_plugin_impl void doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs) const; void doVisitor(const optional & oho, visitor_struct& vs) const; void checkState(const fc::time_point_sec& block_time); - void cleanObjects(const account_transaction_history_id_type& ath, const account_id_type& account_id); + void cleanObjects(const account_history_id_type& ath, const account_id_type& account_id); void init_program_options(const boost::program_options::variables_map& options); }; @@ -392,8 +392,8 @@ void elasticsearch_plugin_impl::add_elasticsearch( const account_id_type& accoun const auto &stats_obj = db.get_account_stats_by_owner( account_id ); - const auto &ath = db.create( - [&oho,&account_id,&stats_obj]( account_transaction_history_object &obj ) { + const auto &ath = db.create( + [&oho,&account_id,&stats_obj]( account_history_object &obj ) { obj.operation_id = oho->id; obj.account = account_id; obj.sequence = stats_obj.total_ops + 1; @@ -425,12 +425,12 @@ void elasticsearch_plugin_impl::add_elasticsearch( const account_id_type& accoun cleanObjects(ath.id, account_id); } -void elasticsearch_plugin_impl::cleanObjects( const account_transaction_history_id_type& ath_id, +void elasticsearch_plugin_impl::cleanObjects( const account_history_id_type& ath_id, const account_id_type& account_id ) { graphene::chain::database& db = database(); // remove everything except current object from ath - const auto &his_idx = db.get_index_type(); + const auto &his_idx = db.get_index_type(); const auto &by_seq_idx = his_idx.indices().get(); auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0)); if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath_id) { @@ -443,8 +443,8 @@ void elasticsearch_plugin_impl::cleanObjects( const account_transaction_history_ // this should be always true, but just have a check here if( itr != by_seq_idx.end() && itr->account == account_id ) { - db.modify( *itr, [&]( account_transaction_history_object& obj ){ - obj.next = account_transaction_history_id_type(); + db.modify( *itr, [&]( account_history_object& obj ){ + obj.next = account_history_id_type(); }); } // do the same on oho @@ -555,7 +555,7 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia my->init_program_options( options ); my->_oho_index = database().add_index< primary_index< operation_history_index > >(); - database().add_index< primary_index< account_transaction_history_index > >(); + database().add_index< primary_index< account_history_index > >(); if( my->_options.elasticsearch_mode != mode::only_query ) { diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index 1b6c768a1b..d25770b017 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -134,7 +134,7 @@ struct visitor_struct { }; struct bulk_struct { - account_transaction_history_object account_history; + account_history_object account_history; operation_history_struct operation_history; int64_t operation_type; uint64_t operation_id_num; diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 5662592264..4c79a4c167 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -385,7 +385,7 @@ account_history_operation_detail wallet_api::get_account_history_by_operations( const auto& account = my->get_account(name); const auto& stats = my->get_object(account.statistics); - // sequence of account_transaction_history_object start with 1 + // sequence of account_history_object start with 1 start = start == 0 ? 1 : start; if (start <= stats.removed_ops) { diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index e689a78312..32bba5d53d 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -2047,14 +2047,14 @@ vector< operation_history_object > database_fixture_base::get_operation_history( { vector< operation_history_object > result; const auto& stats = account_id(db).statistics(db); - if(stats.most_recent_op == account_transaction_history_id_type()) + if(stats.most_recent_op == account_history_id_type()) return result; - const account_transaction_history_object* node = &stats.most_recent_op(db); + const account_history_object* node = &stats.most_recent_op(db); while( true ) { result.push_back( node->operation_id(db) ); - if(node->next == account_transaction_history_id_type()) + if(node->next == account_history_id_type()) break; node = db.find(node->next); } From 6ac5655dbc3bf4e56e597cf61724e204968052e1 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 10 Jul 2022 22:11:37 +0000 Subject: [PATCH 125/338] Refactor API classes Avoid wrapping heavy database_api objects in other API classes, and fix some code smells by the way. --- libraries/app/api.cpp | 111 +++++++++++------- libraries/app/application.cpp | 2 +- libraries/app/database_api.cpp | 24 +++- libraries/app/database_api_helper.hxx | 99 ++++++++++++++++ libraries/app/database_api_impl.hxx | 73 +----------- libraries/app/include/graphene/app/api.hpp | 65 +++++----- .../app/include/graphene/app/application.hpp | 2 +- 7 files changed, 222 insertions(+), 154 deletions(-) create mode 100644 libraries/app/database_api_helper.hxx diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 7064835c36..c7a887e796 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -36,6 +36,8 @@ #include #include +#include "database_api_helper.hxx" + #include #include #include @@ -58,10 +60,7 @@ namespace graphene { namespace app { login_api::login_api(application& a) :_app(a) { - } - - login_api::~login_api() - { + // Nothing to do } bool login_api::login(const string& user, const string& password) @@ -90,7 +89,8 @@ namespace graphene { namespace app { { if( api_name == "database_api" ) { - _database_api = std::make_shared< database_api >( std::ref( *_app.chain_database() ), &( _app.get_options() ) ); + _database_api = std::make_shared< database_api >( std::ref( *_app.chain_database() ), + &( _app.get_options() ) ); } else if( api_name == "block_api" ) { @@ -135,8 +135,7 @@ namespace graphene { namespace app { } // block_api - block_api::block_api(graphene::chain::database& db) : _db(db) { } - block_api::~block_api() { } + block_api::block_api(const graphene::chain::database& db) : _db(db) { /* Nothing to do */ } vector> block_api::get_blocks(uint32_t block_num_from, uint32_t block_num_to)const { @@ -150,7 +149,8 @@ namespace graphene { namespace app { network_broadcast_api::network_broadcast_api(application& a):_app(a) { - _applied_block_connection = _app.chain_database()->applied_block.connect([this](const signed_block& b){ on_applied_block(b); }); + _applied_block_connection = _app.chain_database()->applied_block.connect( + [this](const signed_block& b){ on_applied_block(b); }); } void network_broadcast_api::on_applied_block( const signed_block& b ) @@ -168,7 +168,8 @@ namespace graphene { namespace app { { auto block_num = b.block_num(); auto& callback = _callbacks.find(id)->second; - auto v = fc::variant( transaction_confirmation{ id, block_num, trx_num, trx }, GRAPHENE_MAX_NESTED_OBJECTS ); + auto v = fc::variant( transaction_confirmation{ id, block_num, trx_num, trx }, + GRAPHENE_MAX_NESTED_OBJECTS ); fc::async( [capture_this,v,callback]() { callback(v); } ); @@ -214,6 +215,7 @@ namespace graphene { namespace app { network_node_api::network_node_api( application& a ) : _app( a ) { + // Nothing to do } fc::variant_object network_node_api::get_info() const @@ -316,6 +318,11 @@ namespace graphene { namespace app { return *_custom_operations_api; } + history_api::history_api(application& app) + : _app(app) + { // Nothing else to do + } + vector history_api::get_fill_order_history( std::string asset_a, std::string asset_b, uint32_t limit )const { @@ -323,8 +330,9 @@ namespace graphene { namespace app { FC_ASSERT( market_hist_plugin, "Market history plugin is not enabled" ); FC_ASSERT(_app.chain_database()); const auto& db = *_app.chain_database(); - asset_id_type a = database_api.get_asset_id_from_string( asset_a ); - asset_id_type b = database_api.get_asset_id_from_string( asset_b ); + database_api_helper db_api_helper( _app ); + asset_id_type a = db_api_helper.get_asset_from_string( asset_a )->id; + asset_id_type b = db_api_helper.get_asset_from_string( asset_b )->id; if( a > b ) std::swap(a,b); const auto& history_idx = db.get_index_type().indices().get(); history_key hkey; @@ -362,7 +370,8 @@ namespace graphene { namespace app { vector result; account_id_type account; try { - account = database_api.get_account_id_from_string(account_id_or_name); + database_api_helper db_api_helper( _app ); + account = db_api_helper.get_account_from_string(account_id_or_name)->id; const account_history_object& node = account(db).statistics(db).most_recent_op(db); if(start == operation_history_id_type() || start.instance.value > node.operation_id.instance.value) start = node.operation_id; @@ -374,7 +383,7 @@ namespace graphene { namespace app { if(!_app.elasticsearch_thread) _app.elasticsearch_thread= std::make_shared("elasticsearch"); - return _app.elasticsearch_thread->async([&es, &account, &stop, &limit, &start]() { + return _app.elasticsearch_thread->async([&es, account, stop, limit, start]() { return es->get_account_history(account, stop, limit, start); }, "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).wait(); } @@ -385,7 +394,8 @@ namespace graphene { namespace app { auto index_start = by_op_idx.begin(); auto itr = by_op_idx.lower_bound(boost::make_tuple(account, start)); - while(itr != index_start && itr->account == account && itr->operation_id.instance.value > stop.instance.value && result.size() < limit) + while(itr != index_start && itr->account == account && itr->operation_id.instance.value > stop.instance.value + && result.size() < limit) { if(itr->operation_id.instance.value <= start.instance.value) result.push_back(itr->operation_id(db)); @@ -398,11 +408,12 @@ namespace graphene { namespace app { return result; } - vector history_api::get_account_history_operations( const std::string account_id_or_name, - int64_t operation_type, - operation_history_id_type start, - operation_history_id_type stop, - uint32_t limit ) const + vector history_api::get_account_history_operations( + const std::string account_id_or_name, + int64_t operation_type, + operation_history_id_type start, + operation_history_id_type stop, + uint32_t limit ) const { FC_ASSERT( _app.chain_database() ); const auto& db = *_app.chain_database(); @@ -415,7 +426,8 @@ namespace graphene { namespace app { vector result; account_id_type account; try { - account = database_api.get_account_id_from_string(account_id_or_name); + database_api_helper db_api_helper( _app ); + account = db_api_helper.get_account_from_string(account_id_or_name)->id; } catch(...) { return result; } const auto& stats = account(db).statistics(db); if( stats.most_recent_op == account_history_id_type() ) return result; @@ -459,7 +471,8 @@ namespace graphene { namespace app { vector result; account_id_type account; try { - account = database_api.get_account_id_from_string(account_id_or_name); + database_api_helper db_api_helper( _app ); + account = db_api_helper.get_account_from_string(account_id_or_name)->id; } catch(...) { return result; } const auto& stats = account(db).statistics(db); if( start == 0 ) @@ -545,8 +558,9 @@ namespace graphene { namespace app { FC_ASSERT(_app.chain_database()); const auto& db = *_app.chain_database(); - asset_id_type a = database_api.get_asset_id_from_string( asset_a ); - asset_id_type b = database_api.get_asset_id_from_string( asset_b ); + database_api_helper db_api_helper( _app ); + asset_id_type a = db_api_helper.get_asset_from_string( asset_a )->id; + asset_id_type b = db_api_helper.get_asset_from_string( asset_b )->id; vector result; result.reserve(200); @@ -697,8 +711,6 @@ namespace graphene { namespace app { } FC_CAPTURE_AND_RETHROW( (pool_id)(start)(stop)(olimit)(operation_type) ) } - crypto_api::crypto_api(){}; - commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value ) { return fc::ecc::blind( blind, value ); @@ -754,21 +766,22 @@ namespace graphene { namespace app { } // asset_api - asset_api::asset_api(graphene::app::application& app) : - _app(app), - _db( *app.chain_database()), - database_api( std::ref(*app.chain_database()), &(app.get_options()) - ) { } - asset_api::~asset_api() { } + asset_api::asset_api(graphene::app::application& app) + : _app(app), + _db( *app.chain_database() ) + { // Nothing else to do + } - vector asset_api::get_asset_holders( std::string asset, uint32_t start, uint32_t limit ) const + vector asset_api::get_asset_holders( std::string asset, uint32_t start, + uint32_t limit ) const { const auto configured_limit = _app.get_options().api_limit_get_asset_holders; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); - asset_id_type asset_id = database_api.get_asset_id_from_string( asset ); + database_api_helper db_api_helper( _app ); + asset_id_type asset_id = db_api_helper.get_asset_from_string( asset )->id; const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); @@ -799,12 +812,13 @@ namespace graphene { namespace app { return result; } // get number of asset holders. - int asset_api::get_asset_holders_count( std::string asset ) const { + int64_t asset_api::get_asset_holders_count( std::string asset ) const { const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); - asset_id_type asset_id = database_api.get_asset_id_from_string( asset ); + database_api_helper db_api_helper( _app ); + asset_id_type asset_id = db_api_helper.get_asset_from_string( asset )->id; auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); - int count = boost::distance(range) - 1; + int64_t count = boost::distance(range) - 1; return count; } @@ -822,7 +836,7 @@ namespace graphene { namespace app { const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); - int count = boost::distance(range) - 1; + int64_t count = boost::distance(range) - 1; asset_holders ah; ah.asset_id = asset_id; @@ -835,6 +849,11 @@ namespace graphene { namespace app { } // orders_api + orders_api::orders_api(application& app) + : _app(app) + { // Nothing else to do + } + flat_set orders_api::get_tracked_groups()const { auto plugin = _app.get_plugin( "grouped_orders" ); @@ -858,8 +877,9 @@ namespace graphene { namespace app { const auto& limit_groups = plugin->limit_order_groups(); vector< limit_order_group > result; - asset_id_type base_asset_id = database_api.get_asset_id_from_string( base_asset ); - asset_id_type quote_asset_id = database_api.get_asset_id_from_string( quote_asset ); + database_api_helper db_api_helper( _app ); + asset_id_type base_asset_id = db_api_helper.get_asset_from_string( base_asset )->id; + asset_id_type quote_asset_id = db_api_helper.get_asset_from_string( quote_asset )->id; price max_price = price::max( base_asset_id, quote_asset_id ); price min_price = price::min( base_asset_id, quote_asset_id ); @@ -878,13 +898,20 @@ namespace graphene { namespace app { } // custom operations api - vector custom_operations_api::get_storage_info(std::string account_id_or_name, - std::string catalog)const + custom_operations_api::custom_operations_api(application& app) + : _app(app) + { // Nothing else to do + } + + vector custom_operations_api::get_storage_info( + const std::string& account_id_or_name, + const std::string& catalog)const { auto plugin = _app.get_plugin("custom_operations"); FC_ASSERT( plugin ); - const auto account_id = database_api.get_account_id_from_string(account_id_or_name); + database_api_helper db_api_helper( _app ); + const account_id_type account_id = db_api_helper.get_account_from_string(account_id_or_name)->id; vector results; const auto& storage_index = _app.chain_database()->get_index_type(); const auto& by_account_catalog_idx = storage_index.indices().get(); diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 6c2a57ecec..4cd9c885b9 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -1367,7 +1367,7 @@ void application::add_available_plugin(std::shared_ptradd_available_plugin(p); } -const application_options& application::get_options() +const application_options& application::get_options() const { return my->_app_options; } diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 93b0aa44dc..e786cafdf5 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -22,6 +22,8 @@ * THE SOFTWARE. */ +#include + #include "database_api_impl.hxx" #include @@ -48,12 +50,24 @@ namespace graphene { namespace app { ////////////////////////////////////////////////////////////////////// database_api::database_api( graphene::chain::database& db, const application_options* app_options ) - : my( std::make_unique( db, app_options ) ) {} +: my( std::make_shared( db, app_options ) ) +{ // Nothing else to do +} -database_api::~database_api() {} +database_api::~database_api() = default; -database_api_impl::database_api_impl( graphene::chain::database& db, const application_options* app_options ) +database_api_helper::database_api_helper( graphene::chain::database& db, const application_options* app_options ) :_db(db), _app_options(app_options) +{ // Nothing else to do +} + +database_api_helper::database_api_helper( graphene::app::application& app ) +:_db( *app.chain_database() ), _app_options( &app.get_options() ) +{ // Nothing else to do +} + +database_api_impl::database_api_impl( graphene::chain::database& db, const application_options* app_options ) +:database_api_helper( db, app_options ) { dlog("creating database api ${x}", ("x",int64_t(this)) ); _new_connection = _db.new_objects.connect([this](const vector& ids, @@ -3160,7 +3174,7 @@ vector database_api_impl::get_tickets_by_account( // // ////////////////////////////////////////////////////////////////////// -const account_object* database_api_impl::get_account_from_string( const std::string& name_or_id, +const account_object* database_api_helper::get_account_from_string( const std::string& name_or_id, bool throw_if_not_found ) const { // TODO cache the result to avoid repeatly fetching from db @@ -3186,7 +3200,7 @@ const account_object* database_api_impl::get_account_from_string( const std::str return account_ptr; } -const asset_object* database_api_impl::get_asset_from_string( const std::string& symbol_or_id, +const asset_object* database_api_helper::get_asset_from_string( const std::string& symbol_or_id, bool throw_if_not_found ) const { // TODO cache the result to avoid repeatly fetching from db diff --git a/libraries/app/database_api_helper.hxx b/libraries/app/database_api_helper.hxx new file mode 100644 index 0000000000..2df0180ab4 --- /dev/null +++ b/libraries/app/database_api_helper.hxx @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +namespace graphene { namespace app { + +class database_api_helper +{ +public: + database_api_helper( graphene::chain::database& db, const application_options* app_options ); + explicit database_api_helper( graphene::app::application& app ); + + // Member variables + graphene::chain::database& _db; + const application_options* _app_options = nullptr; + + // Accounts + const account_object* get_account_from_string( const std::string& name_or_id, + bool throw_if_not_found = true ) const; + + // Assets + const asset_object* get_asset_from_string( const std::string& symbol_or_id, + bool throw_if_not_found = true ) const; + + /// Template functions for simple list_X and get_X_by_T APIs, to reduce duplicate code + /// @{ + template + auto make_tuple_if_multiple(X x) const + { return x; } + + template + auto make_tuple_if_multiple(X... x) const + { return std::make_tuple( x... ); } + + template + auto call_end_or_upper_bound( const T& t ) const + { return std::end( t ); } + + template + auto call_end_or_upper_bound( const T& t, X... x ) const + { return t.upper_bound( make_tuple_if_multiple( x... ) ); } + + template + vector get_objects_by_x( + T application_options::* app_opt_member_ptr, + const INDEX_TYPE& idx, + const optional& olimit, + const optional& ostart_id, + X... x ) const + { + uint64_t limit = olimit.valid() ? *olimit : ( application_options::get_default().*app_opt_member_ptr ); + + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->*app_opt_member_ptr; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + vector results; + + OBJ_ID_TYPE start_id = ostart_id.valid() ? *ostart_id : OBJ_ID_TYPE(); + + auto lower_itr = idx.lower_bound( make_tuple_if_multiple( x..., start_id ) ); + auto upper_itr = call_end_or_upper_bound( idx, x... ); + + results.reserve( limit ); + for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) + { + results.emplace_back( *lower_itr ); + } + + return results; + } + /// @} + +}; + +} } // graphene::app diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 8bee269c5b..634e41219e 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -21,22 +21,22 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ - -#include +#pragma once #include +#include "database_api_helper.hxx" #define GET_REQUIRED_FEES_MAX_RECURSION 4 namespace graphene { namespace app { -typedef std::map< std::pair, - std::vector > market_queue_type; +using market_queue_type = std::map< std::pair, + std::vector >; -class database_api_impl : public std::enable_shared_from_this +class database_api_impl : public std::enable_shared_from_this, public database_api_helper { public: - explicit database_api_impl( graphene::chain::database& db, const application_options* app_options ); + database_api_impl( graphene::chain::database& db, const application_options* app_options ); virtual ~database_api_impl(); // Objects @@ -67,7 +67,6 @@ class database_api_impl : public std::enable_shared_from_this bool is_public_key_registered(string public_key) const; // Accounts - account_id_type get_account_id_from_string(const std::string& name_or_id)const; vector> get_accounts( const vector& account_names_or_ids, optional subscribe )const; std::map get_full_accounts( const vector& names_or_ids, @@ -91,7 +90,6 @@ class database_api_impl : public std::enable_shared_from_this // Assets uint64_t get_asset_count()const; - asset_id_type get_asset_id_from_string(const std::string& symbol_or_id)const; vector> get_assets( const vector& asset_symbols_or_ids, optional subscribe )const; vector list_assets(const string& lower_bound_symbol, uint32_t limit)const; @@ -253,9 +251,6 @@ class database_api_impl : public std::enable_shared_from_this // Accounts //////////////////////////////////////////////// - const account_object* get_account_from_string( const std::string& name_or_id, - bool throw_if_not_found = true ) const; - //////////////////////////////////////////////// // Assets //////////////////////////////////////////////// @@ -274,8 +269,6 @@ class database_api_impl : public std::enable_shared_from_this return result; } - const asset_object* get_asset_from_string( const std::string& symbol_or_id, - bool throw_if_not_found = true ) const; // helper function vector> get_assets( const vector& asset_ids, optional subscribe = optional() )const; @@ -344,57 +337,6 @@ class database_api_impl : public std::enable_shared_from_this return results; } - /// Template functions for simple list_X and get_X_by_T APIs, to reduce duplicate code - /// @{ - template - auto make_tuple_if_multiple(X x) const - { return x; } - - template - auto make_tuple_if_multiple(X... x) const - { return std::make_tuple( x... ); } - - template - auto call_end_or_upper_bound( const T& t ) const - { return std::end( t ); } - - template - auto call_end_or_upper_bound( const T& t, X... x ) const - { return t.upper_bound( make_tuple_if_multiple( x... ) ); } - - template - vector get_objects_by_x( - T application_options::* app_opt_member_ptr, - const INDEX_TYPE& idx, - const optional& olimit, - const optional& ostart_id, - X... x ) const - { - uint64_t limit = olimit.valid() ? *olimit : ( application_options::get_default().*app_opt_member_ptr ); - - FC_ASSERT( _app_options, "Internal error" ); - const auto configured_limit = _app_options->*app_opt_member_ptr; - FC_ASSERT( limit <= configured_limit, - "limit can not be greater than ${configured_limit}", - ("configured_limit", configured_limit) ); - - vector results; - - OBJ_ID_TYPE start_id = ostart_id.valid() ? *ostart_id : OBJ_ID_TYPE(); - - auto lower_itr = idx.lower_bound( make_tuple_if_multiple( x..., start_id ) ); - auto upper_itr = call_end_or_upper_bound( idx, x... ); - - results.reserve( limit ); - for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) - { - results.emplace_back( *lower_itr ); - } - - return results; - } - /// @} - //////////////////////////////////////////////// // Subscription //////////////////////////////////////////////// @@ -517,9 +459,6 @@ class database_api_impl : public std::enable_shared_from_this map< pair, std::function > _market_subscriptions; - graphene::chain::database& _db; - const application_options* _app_options = nullptr; - const graphene::api_helper_indexes::amount_in_collateral_index* amount_in_collateral_index; const graphene::api_helper_indexes::asset_in_liquidity_pools_index* asset_in_liquidity_pools_index; }; diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 56b4736b48..350a7618fe 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -89,7 +89,7 @@ namespace graphene { namespace app { struct asset_holders { asset_id_type asset_id; - int count; + int64_t count; }; struct history_operation_detail { @@ -102,12 +102,12 @@ namespace graphene { namespace app { */ struct limit_order_group { - limit_order_group( const std::pair& p ) + explicit limit_order_group( const std::pair& p ) : min_price( p.first.min_price ), max_price( p.second.max_price ), total_for_sale( p.second.total_for_sale ) {} - limit_order_group() {} + limit_order_group() = default; price min_price; ///< possible lowest price in the group price max_price; ///< possible highest price in the group @@ -122,8 +122,7 @@ namespace graphene { namespace app { class history_api { public: - history_api(application& app) - :_app(app), database_api( std::ref(*app.chain_database()), &(app.get_options())) {} + explicit history_api(application& app); /** * @brief Get operations relevant to the specificed account @@ -298,7 +297,6 @@ namespace graphene { namespace app { private: application& _app; - graphene::app::database_api database_api; }; /** @@ -307,8 +305,7 @@ namespace graphene { namespace app { class block_api { public: - block_api(graphene::chain::database& db); - ~block_api(); + explicit block_api(const graphene::chain::database& db); /** * @brief Get signed blocks @@ -319,7 +316,7 @@ namespace graphene { namespace app { vector> get_blocks(uint32_t block_num_from, uint32_t block_num_to)const; private: - graphene::chain::database& _db; + const graphene::chain::database& _db; }; @@ -329,7 +326,7 @@ namespace graphene { namespace app { class network_broadcast_api : public std::enable_shared_from_this { public: - network_broadcast_api(application& a); + explicit network_broadcast_api(application& a); struct transaction_confirmation { @@ -339,7 +336,7 @@ namespace graphene { namespace app { processed_transaction trx; }; - typedef std::function confirmation_callback; + using confirmation_callback = std::function; /** * @brief Broadcast a transaction to the network @@ -392,7 +389,7 @@ namespace graphene { namespace app { class network_node_api { public: - network_node_api(application& a); + explicit network_node_api(application& a); /** * @brief Return general network information, such as p2p port @@ -438,8 +435,6 @@ namespace graphene { namespace app { class crypto_api { public: - crypto_api(); - /** * @brief Generates a pedersen commitment: *commit = blind * G + value * G2. * The commitment is 33 bytes, the blinding factor is 32 bytes. @@ -529,8 +524,7 @@ namespace graphene { namespace app { class asset_api { public: - asset_api(graphene::app::application& app); - ~asset_api(); + explicit asset_api(graphene::app::application& app); /** * @brief Get asset holders for a specific asset @@ -546,7 +540,7 @@ namespace graphene { namespace app { * @param asset The specific asset id or symbol * @return Holders count for the specified asset */ - int get_asset_holders_count( std::string asset )const; + int64_t get_asset_holders_count( std::string asset )const; /** * @brief Get all asset holders @@ -557,7 +551,6 @@ namespace graphene { namespace app { private: graphene::app::application& _app; graphene::chain::database& _db; - graphene::app::database_api database_api; }; /** @@ -566,9 +559,7 @@ namespace graphene { namespace app { class orders_api { public: - orders_api(application& app) - :_app(app), database_api( std::ref(*app.chain_database()), &(app.get_options()) ){} - //virtual ~orders_api() {} + explicit orders_api(application& app); /** * @brief Get tracked groups configured by the server. @@ -594,7 +585,6 @@ namespace graphene { namespace app { private: application& _app; - graphene::app::database_api database_api; }; /** @@ -603,23 +593,23 @@ namespace graphene { namespace app { */ class custom_operations_api { - public: - custom_operations_api(application& app):_app(app), database_api( *app.chain_database(), - &(app.get_options()) ){} + public: + explicit custom_operations_api(application& app); - /** - * @brief Get all stored objects of an account in a particular catalog - * - * @param account_name_or_id The account name or ID to get info from - * @param catalog Category classification. Each account can store multiple catalogs. - * - * @return The vector of objects of the account or empty - */ - vector get_storage_info(std::string account_name_or_id, std::string catalog)const; + /** + * @brief Get all stored objects of an account in a particular catalog + * + * @param account_name_or_id The account name or ID to get info from + * @param catalog Category classification. Each account can store multiple catalogs. + * + * @return The vector of objects of the account or empty + */ + vector get_storage_info( + const std::string& account_name_or_id, + const std::string& catalog )const; private: - application& _app; - graphene::app::database_api database_api; + application& _app; }; } } // graphene::app @@ -642,8 +632,7 @@ namespace graphene { namespace app { class login_api { public: - login_api(application& a); - ~login_api(); + explicit login_api(application& a); /** * @brief Authenticate to the RPC server diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index 32f1b88da3..ff6e2aee1c 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -146,7 +146,7 @@ namespace graphene { namespace app { /// Emitted when syncing finishes (is_finished_syncing will return true) boost::signals2::signal syncing_finished; - const application_options& get_options(); + const application_options& get_options() const; void enable_plugin( const string& name ) const; From dee3a1ec7804b4b5c215fde9c8a7e64f94068d21 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 13 Jul 2022 12:51:24 +0000 Subject: [PATCH 126/338] Avoid magic numbers in code / doc about API limits Add api_limit_get_market_history to app_options --- libraries/app/api.cpp | 17 ++--- libraries/app/application.cpp | 7 +++ libraries/app/include/graphene/app/api.hpp | 63 +++++++++++-------- .../app/include/graphene/app/application.hpp | 5 +- 4 files changed, 55 insertions(+), 37 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index c7a887e796..eab9d5a25c 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -340,15 +340,13 @@ namespace graphene { namespace app { hkey.quote = b; hkey.sequence = std::numeric_limits::min(); - uint32_t count = 0; auto itr = history_idx.lower_bound( hkey ); vector result; - while( itr != history_idx.end() && count < limit) + while( itr != history_idx.end() && result.size() < limit ) { if( itr->key.base != a || itr->key.quote != b ) break; result.push_back( *itr ); ++itr; - ++count; } return result; @@ -455,7 +453,7 @@ namespace graphene { namespace app { } - vector history_api::get_relative_account_history( const std::string account_id_or_name, + vector history_api::get_relative_account_history( const std::string& account_id_or_name, uint64_t stop, uint32_t limit, uint64_t start ) const @@ -562,7 +560,8 @@ namespace graphene { namespace app { asset_id_type a = db_api_helper.get_asset_from_string( asset_a )->id; asset_id_type b = db_api_helper.get_asset_from_string( asset_b )->id; vector result; - result.reserve(200); + const auto configured_limit = _app.get_options().api_limit_get_market_history; + result.reserve( configured_limit ); if( a > b ) std::swap(a,b); @@ -570,7 +569,7 @@ namespace graphene { namespace app { const auto& by_key_idx = bidx.indices().get(); auto itr = by_key_idx.lower_bound( bucket_key( a, b, bucket_seconds, start ) ); - while( itr != by_key_idx.end() && itr->key.open <= end && result.size() < 200 ) + while( itr != by_key_idx.end() && itr->key.open <= end && result.size() < configured_limit ) { if( !(itr->key.base == a && itr->key.quote == b && itr->key.seconds == bucket_seconds) ) { @@ -591,7 +590,8 @@ namespace graphene { namespace app { { try { FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); - uint32_t limit = olimit.valid() ? *olimit : 101; + uint32_t limit = olimit.valid() ? *olimit + : application_options::get_default().api_limit_get_liquidity_pool_history; const auto configured_limit = _app.get_options().api_limit_get_liquidity_pool_history; FC_ASSERT( limit <= configured_limit, @@ -649,7 +649,8 @@ namespace graphene { namespace app { { try { FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); - uint32_t limit = olimit.valid() ? *olimit : 101; + uint32_t limit = olimit.valid() ? *olimit + : application_options::get_default().api_limit_get_liquidity_pool_history; const auto configured_limit = _app.get_options().api_limit_get_liquidity_pool_history; FC_ASSERT( limit <= configured_limit, diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 4cd9c885b9..064aead8f9 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -310,6 +310,10 @@ void application_impl::set_api_limit() { _app_options.api_limit_get_grouped_limit_orders = _options->at("api-limit-get-grouped-limit-orders").as(); } + if(_options->count("api-limit-get-market-history") > 0){ + _app_options.api_limit_get_market_history = + _options->at("api-limit-get-market-history").as(); + } if(_options->count("api-limit-get-relative-account-history") > 0){ _app_options.api_limit_get_relative_account_history = _options->at("api-limit-get-relative-account-history").as(); @@ -1175,6 +1179,9 @@ void application::set_program_options(boost::program_options::options_descriptio ("api-limit-get-grouped-limit-orders", bpo::value()->default_value(default_opts.api_limit_get_grouped_limit_orders), "For orders_api::get_grouped_limit_orders to set max limit value") + ("api-limit-get-market-history", + bpo::value()->default_value(default_opts.api_limit_get_market_history), + "Maximum number of records to return for the history_api::get_market_history API") ("api-limit-get-relative-account-history", bpo::value()->default_value(default_opts.api_limit_get_relative_account_history), "For history_api::get_relative_account_history to set max limit value") diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 350a7618fe..009fa37e9c 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -128,14 +128,15 @@ namespace graphene { namespace app { * @brief Get operations relevant to the specificed account * @param account_name_or_id The account name or ID whose history should be queried * @param stop ID of the earliest operation to retrieve - * @param limit Maximum number of operations to retrieve (must not exceed 100) + * @param limit Maximum number of operations to retrieve, must not exceed the configured value of + * @a api_limit_get_account_history * @param start ID of the most recent operation to retrieve * @return A list of operations performed by account, ordered from most recent to oldest. */ vector get_account_history( const std::string account_name_or_id, operation_history_id_type stop = operation_history_id_type(), - uint32_t limit = 100, + uint32_t limit = application_options::get_default().api_limit_get_account_history, operation_history_id_type start = operation_history_id_type() )const; @@ -145,7 +146,8 @@ namespace graphene { namespace app { * @param operation_types The IDs of the operation we want to get operations in the account * ( 0 = transfer , 1 = limit order create, ...) * @param start the sequence number where to start looping back throw the history - * @param limit the max number of entries to return (from start number) + * @param limit the max number of entries to return (from start number), must not exceed the configured + * value of @a api_limit_get_account_history_by_operations * @return history_operation_detail */ history_operation_detail get_account_history_by_operations( @@ -161,7 +163,8 @@ namespace graphene { namespace app { * @param operation_type The type of the operation we want to get operations in the account * ( 0 = transfer , 1 = limit order create, ...) * @param stop ID of the earliest operation to retrieve - * @param limit Maximum number of operations to retrieve (must not exceed 100) + * @param limit Maximum number of operations to retrieve, must not exceed the configured value of + * @a api_limit_get_account_history_operations * @param start ID of the most recent operation to retrieve * @return A list of operations performed by account, ordered from most recent to oldest. */ @@ -170,7 +173,7 @@ namespace graphene { namespace app { int64_t operation_type, operation_history_id_type start = operation_history_id_type(), operation_history_id_type stop = operation_history_id_type(), - uint32_t limit = 100 + uint32_t limit = application_options::get_default().api_limit_get_account_history_operations )const; /** @@ -179,16 +182,18 @@ namespace graphene { namespace app { * for the account can be found in the account statistics (or use 0 for start). * @param account_name_or_id The account name or ID whose history should be queried * @param stop Sequence number of earliest operation. 0 is default and will - * query 'limit' number of operations. - * @param limit Maximum number of operations to retrieve (must not exceed 100) + * query 'limit' number of operations. + * @param limit Maximum number of operations to retrieve, must not exceed the configured value of + * @a api_limit_get_relative_account_history * @param start Sequence number of the most recent operation to retrieve. - * 0 is default, which will start querying from the most recent operation. + * 0 is default, which will start querying from the most recent operation. * @return A list of operations performed by account, ordered from most recent to oldest. */ - vector get_relative_account_history( const std::string account_name_or_id, - uint64_t stop = 0, - uint32_t limit = 100, - uint64_t start = 0) const; + vector get_relative_account_history( + const std::string& account_name_or_id, + uint64_t stop = 0, + uint32_t limit = application_options::get_default().api_limit_get_relative_account_history, + uint64_t start = 0) const; /** * @brief Get all operations inside a block or a transaction, including virtual operations @@ -225,7 +230,8 @@ namespace graphene { namespace app { * @param start The start of a time range, E.G. "2018-01-01T00:00:00" * @param end The end of the time range * @return A list of OHLCV data, in "least recent first" order. - * If there are more than 200 records in the specified time range, the first 200 records will be returned. + * If there are more records in the specified time range than the configured value of + * @a api_limit_get_market_history, only the first records will be returned. */ vector get_market_history( std::string a, std::string b, uint32_t bucket_seconds, fc::time_point_sec start, fc::time_point_sec end )const; @@ -244,15 +250,17 @@ namespace graphene { namespace app { * If specified, only the operations occurred not later than this time will be returned. * @param stop A UNIX timestamp. Optional. * If specified, only the operations occurred later than this time will be returned. - * @param limit Maximum quantity of operations in the history to retrieve. - * Optional. If not specified, at most 101 records will be returned. + * @param limit Maximum quantity of operations in the history to retrieve. Optional. + * If not specified, the default value of + * @ref application_options::api_limit_get_liquidity_pool_history will be used. + * If specified, it must not exceed the configured limit. * @param operation_type Optional. If specified, only the operations whose type is the specified type * will be returned. Otherwise all operations will be returned. * @return operation history of the liquidity pool, ordered by time, most recent first. * * @note * 1. The time must be UTC. The range is (stop, start]. - * 2. In case when there are more than 100 operations occurred in the same second, this API only returns + * 2. In case when there are more operations than @p limit occurred in the same second, this API only returns * the most recent records, the rest records can be retrieved with the * @ref get_liquidity_pool_history_by_sequence API. * 3. List of operation type code: 59-creation, 60-deletion, 61-deposit, 62-withdrawal, 63-exchange. @@ -263,7 +271,7 @@ namespace graphene { namespace app { liquidity_pool_id_type pool_id, optional start = optional(), optional stop = optional(), - optional limit = 101, + optional limit = application_options::get_default().api_limit_get_liquidity_pool_history, optional operation_type = optional() )const; /** @@ -273,26 +281,25 @@ namespace graphene { namespace app { * If specified, only the operations whose sequences are not greater than this will be returned. * @param stop A UNIX timestamp. Optional. * If specified, only operations occurred later than this time will be returned. - * @param limit Maximum quantity of operations in the history to retrieve. - * Optional. If not specified, at most 101 records will be returned. + * @param limit Maximum quantity of operations in the history to retrieve. Optional. + * If not specified, the default value of + * @ref application_options::api_limit_get_liquidity_pool_history will be used. + * If specified, it must not exceed the configured limit. * @param operation_type Optional. If specified, only the operations whose type is the specified type * will be returned. Otherwise all operations will be returned. * @return operation history of the liquidity pool, ordered by time, most recent first. * * @note * 1. The time must be UTC. The range is (stop, start]. - * 2. In case when there are more than 100 operations occurred in the same second, this API only returns - * the most recent records, the rest records can be retrieved with the - * @ref get_liquidity_pool_history_by_sequence API. - * 3. List of operation type code: 59-creation, 60-deletion, 61-deposit, 62-withdrawal, 63-exchange. - * 4. Can only omit one or more arguments in the end of the list, but not one or more in the middle. + * 2. List of operation type code: 59-creation, 60-deletion, 61-deposit, 62-withdrawal, 63-exchange. + * 3. Can only omit one or more arguments in the end of the list, but not one or more in the middle. * If need to not specify an individual argument, can specify \c null in the place. */ vector get_liquidity_pool_history_by_sequence( liquidity_pool_id_type pool_id, optional start = optional(), optional stop = optional(), - optional limit = 101, + optional limit = application_options::get_default().api_limit_get_liquidity_pool_history, optional operation_type = optional() )const; private: @@ -530,7 +537,8 @@ namespace graphene { namespace app { * @brief Get asset holders for a specific asset * @param asset The specific asset id or symbol * @param start The start index - * @param limit Maximum limit must not exceed 100 + * @param limit Maximum number of accounts to retrieve, must not exceed the configured value of + * @a api_limit_get_asset_holders * @return A list of asset holders for the specified asset */ vector get_asset_holders( std::string asset, uint32_t start, uint32_t limit )const; @@ -574,7 +582,8 @@ namespace graphene { namespace app { * @param quote_asset symbol or ID of asset being purchased * @param group Maximum price diff within each order group, have to be one of configured values * @param start Optional price to indicate the first order group to retrieve - * @param limit Maximum number of order groups to retrieve (must not exceed 101) + * @param limit Maximum number of order groups to retrieve, must not exceed the configured value of + * @a api_limit_get_grouped_limit_orders * @return The grouped limit orders, ordered from best offered price to worst */ vector< limit_order_group > get_grouped_limit_orders( std::string base_asset, diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index ff6e2aee1c..73914a6fdb 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -46,6 +46,7 @@ namespace graphene { namespace app { uint64_t api_limit_get_account_history_operations = 100; uint64_t api_limit_get_account_history = 100; uint64_t api_limit_get_grouped_limit_orders = 101; + uint32_t api_limit_get_market_history = 200; uint64_t api_limit_get_relative_account_history = 100; uint64_t api_limit_get_account_history_by_operations = 100; uint64_t api_limit_get_asset_holders = 100; @@ -78,9 +79,9 @@ namespace graphene { namespace app { uint64_t api_limit_get_samet_funds = 101; uint64_t api_limit_get_credit_offers = 101; - static const application_options& get_default() + static constexpr application_options get_default() { - static const application_options default_options; + constexpr application_options default_options; return default_options; } }; From fd503a9e78879ee4caa02640a17ef921436cfe70 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 21 Jul 2022 07:13:29 +0000 Subject: [PATCH 127/338] Avoid magic numbers about limits in database_api Add api_limit_get_full_accounts_subscribe to app_options --- libraries/app/application.cpp | 143 ++++--- libraries/app/database_api.cpp | 254 ++++------- libraries/app/database_api_impl.hxx | 80 ++-- .../app/include/graphene/app/application.hpp | 69 +-- .../app/include/graphene/app/database_api.hpp | 394 +++++++++++------- tests/common/database_fixture.cpp | 54 +-- 6 files changed, 499 insertions(+), 495 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 064aead8f9..714d1d3a77 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -300,15 +300,15 @@ void application_impl::initialize(const fc::path& data_dir, shared_ptrcount("api-limit-get-account-history-operations") > 0) { _app_options.api_limit_get_account_history_operations = - _options->at("api-limit-get-account-history-operations").as(); + _options->at("api-limit-get-account-history-operations").as(); } if(_options->count("api-limit-get-account-history") > 0){ _app_options.api_limit_get_account_history = - _options->at("api-limit-get-account-history").as(); + _options->at("api-limit-get-account-history").as(); } if(_options->count("api-limit-get-grouped-limit-orders") > 0){ _app_options.api_limit_get_grouped_limit_orders = - _options->at("api-limit-get-grouped-limit-orders").as(); + _options->at("api-limit-get-grouped-limit-orders").as(); } if(_options->count("api-limit-get-market-history") > 0){ _app_options.api_limit_get_market_history = @@ -316,127 +316,131 @@ void application_impl::set_api_limit() { } if(_options->count("api-limit-get-relative-account-history") > 0){ _app_options.api_limit_get_relative_account_history = - _options->at("api-limit-get-relative-account-history").as(); + _options->at("api-limit-get-relative-account-history").as(); } if(_options->count("api-limit-get-account-history-by-operations") > 0){ _app_options.api_limit_get_account_history_by_operations = - _options->at("api-limit-get-account-history-by-operations").as(); + _options->at("api-limit-get-account-history-by-operations").as(); } if(_options->count("api-limit-get-asset-holders") > 0){ _app_options.api_limit_get_asset_holders = - _options->at("api-limit-get-asset-holders").as(); + _options->at("api-limit-get-asset-holders").as(); } if(_options->count("api-limit-get-key-references") > 0){ _app_options.api_limit_get_key_references = - _options->at("api-limit-get-key-references").as(); + _options->at("api-limit-get-key-references").as(); } if(_options->count("api-limit-get-htlc-by") > 0) { _app_options.api_limit_get_htlc_by = - _options->at("api-limit-get-htlc-by").as(); + _options->at("api-limit-get-htlc-by").as(); } if(_options->count("api-limit-get-full-accounts") > 0) { _app_options.api_limit_get_full_accounts = - _options->at("api-limit-get-full-accounts").as(); + _options->at("api-limit-get-full-accounts").as(); } if(_options->count("api-limit-get-full-accounts-lists") > 0) { _app_options.api_limit_get_full_accounts_lists = - _options->at("api-limit-get-full-accounts-lists").as(); + _options->at("api-limit-get-full-accounts-lists").as(); + } + if(_options->count("api-limit-get-full-accounts-subscribe") > 0) { + _app_options.api_limit_get_full_accounts_subscribe = + _options->at("api-limit-get-full-accounts-subscribe").as(); } if(_options->count("api-limit-get-top-voters") > 0) { _app_options.api_limit_get_top_voters = - _options->at("api-limit-get-top-voters").as(); + _options->at("api-limit-get-top-voters").as(); } if(_options->count("api-limit-get-call-orders") > 0) { _app_options.api_limit_get_call_orders = - _options->at("api-limit-get-call-orders").as(); + _options->at("api-limit-get-call-orders").as(); } if(_options->count("api-limit-get-settle-orders") > 0) { _app_options.api_limit_get_settle_orders = - _options->at("api-limit-get-settle-orders").as(); + _options->at("api-limit-get-settle-orders").as(); } if(_options->count("api-limit-get-assets") > 0) { _app_options.api_limit_get_assets = - _options->at("api-limit-get-assets").as(); + _options->at("api-limit-get-assets").as(); } if(_options->count("api-limit-get-limit-orders") > 0){ _app_options.api_limit_get_limit_orders = - _options->at("api-limit-get-limit-orders").as(); + _options->at("api-limit-get-limit-orders").as(); } if(_options->count("api-limit-get-limit-orders-by-account") > 0){ _app_options.api_limit_get_limit_orders_by_account = - _options->at("api-limit-get-limit-orders-by-account").as(); + _options->at("api-limit-get-limit-orders-by-account").as(); } if(_options->count("api-limit-get-order-book") > 0){ _app_options.api_limit_get_order_book = - _options->at("api-limit-get-order-book").as(); + _options->at("api-limit-get-order-book").as(); } if(_options->count("api-limit-list-htlcs") > 0){ _app_options.api_limit_list_htlcs = - _options->at("api-limit-list-htlcs").as(); + _options->at("api-limit-list-htlcs").as(); } if(_options->count("api-limit-lookup-accounts") > 0) { _app_options.api_limit_lookup_accounts = - _options->at("api-limit-lookup-accounts").as(); + _options->at("api-limit-lookup-accounts").as(); } if(_options->count("api-limit-lookup-witness-accounts") > 0) { _app_options.api_limit_lookup_witness_accounts = - _options->at("api-limit-lookup-witness-accounts").as(); + _options->at("api-limit-lookup-witness-accounts").as(); } if(_options->count("api-limit-lookup-committee-member-accounts") > 0) { _app_options.api_limit_lookup_committee_member_accounts = - _options->at("api-limit-lookup-committee-member-accounts").as(); + _options->at("api-limit-lookup-committee-member-accounts").as(); } if(_options->count("api-limit-lookup-vote-ids") > 0) { _app_options.api_limit_lookup_vote_ids = - _options->at("api-limit-lookup-vote-ids").as(); + _options->at("api-limit-lookup-vote-ids").as(); } if(_options->count("api-limit-get-account-limit-orders") > 0) { _app_options.api_limit_get_account_limit_orders = - _options->at("api-limit-get-account-limit-orders").as(); + _options->at("api-limit-get-account-limit-orders").as(); } if(_options->count("api-limit-get-collateral-bids") > 0) { _app_options.api_limit_get_collateral_bids = - _options->at("api-limit-get-collateral-bids").as(); + _options->at("api-limit-get-collateral-bids").as(); } if(_options->count("api-limit-get-top-markets") > 0) { _app_options.api_limit_get_top_markets = - _options->at("api-limit-get-top-markets").as(); + _options->at("api-limit-get-top-markets").as(); } if(_options->count("api-limit-get-trade-history") > 0) { _app_options.api_limit_get_trade_history = - _options->at("api-limit-get-trade-history").as(); + _options->at("api-limit-get-trade-history").as(); } if(_options->count("api-limit-get-trade-history-by-sequence") > 0) { _app_options.api_limit_get_trade_history_by_sequence = - _options->at("api-limit-get-trade-history-by-sequence").as(); + _options->at("api-limit-get-trade-history-by-sequence").as(); } if(_options->count("api-limit-get-withdraw-permissions-by-giver") > 0) { _app_options.api_limit_get_withdraw_permissions_by_giver = - _options->at("api-limit-get-withdraw-permissions-by-giver").as(); + _options->at("api-limit-get-withdraw-permissions-by-giver").as(); } if(_options->count("api-limit-get-withdraw-permissions-by-recipient") > 0) { _app_options.api_limit_get_withdraw_permissions_by_recipient = - _options->at("api-limit-get-withdraw-permissions-by-recipient").as(); + _options->at("api-limit-get-withdraw-permissions-by-recipient").as(); } if(_options->count("api-limit-get-tickets") > 0) { _app_options.api_limit_get_tickets = - _options->at("api-limit-get-tickets").as(); + _options->at("api-limit-get-tickets").as(); } if(_options->count("api-limit-get-liquidity-pools") > 0) { _app_options.api_limit_get_liquidity_pools = - _options->at("api-limit-get-liquidity-pools").as(); + _options->at("api-limit-get-liquidity-pools").as(); } if(_options->count("api-limit-get-liquidity-pool-history") > 0) { _app_options.api_limit_get_liquidity_pool_history = - _options->at("api-limit-get-liquidity-pool-history").as(); + _options->at("api-limit-get-liquidity-pool-history").as(); } if(_options->count("api-limit-get-samet-funds") > 0) { _app_options.api_limit_get_samet_funds = - _options->at("api-limit-get-samet-funds").as(); + _options->at("api-limit-get-samet-funds").as(); } if(_options->count("api-limit-get-credit-offers") > 0) { _app_options.api_limit_get_credit_offers = - _options->at("api-limit-get-credit-offers").as(); + _options->at("api-limit-get-credit-offers").as(); } } @@ -1171,109 +1175,112 @@ void application::set_program_options(boost::program_options::options_descriptio "Whether to enable tracking of votes of standby witnesses and committee members. " "Set it to true to provide accurate data to API clients, set to false for slightly better performance.") ("api-limit-get-account-history-operations", - bpo::value()->default_value(default_opts.api_limit_get_account_history_operations), + bpo::value()->default_value(default_opts.api_limit_get_account_history_operations), "For history_api::get_account_history_operations to set max limit value") ("api-limit-get-account-history", - bpo::value()->default_value(default_opts.api_limit_get_account_history), + bpo::value()->default_value(default_opts.api_limit_get_account_history), "For history_api::get_account_history to set max limit value") ("api-limit-get-grouped-limit-orders", - bpo::value()->default_value(default_opts.api_limit_get_grouped_limit_orders), + bpo::value()->default_value(default_opts.api_limit_get_grouped_limit_orders), "For orders_api::get_grouped_limit_orders to set max limit value") ("api-limit-get-market-history", bpo::value()->default_value(default_opts.api_limit_get_market_history), "Maximum number of records to return for the history_api::get_market_history API") ("api-limit-get-relative-account-history", - bpo::value()->default_value(default_opts.api_limit_get_relative_account_history), + bpo::value()->default_value(default_opts.api_limit_get_relative_account_history), "For history_api::get_relative_account_history to set max limit value") ("api-limit-get-account-history-by-operations", - bpo::value()->default_value(default_opts.api_limit_get_account_history_by_operations), + bpo::value()->default_value(default_opts.api_limit_get_account_history_by_operations), "For history_api::get_account_history_by_operations to set max limit value") ("api-limit-get-asset-holders", - bpo::value()->default_value(default_opts.api_limit_get_asset_holders), + bpo::value()->default_value(default_opts.api_limit_get_asset_holders), "For asset_api::get_asset_holders to set max limit value") ("api-limit-get-key-references", - bpo::value()->default_value(default_opts.api_limit_get_key_references), + bpo::value()->default_value(default_opts.api_limit_get_key_references), "For database_api_impl::get_key_references to set max limit value") ("api-limit-get-htlc-by", - bpo::value()->default_value(default_opts.api_limit_get_htlc_by), + bpo::value()->default_value(default_opts.api_limit_get_htlc_by), "For database_api_impl::get_htlc_by_from and get_htlc_by_to to set max limit value") ("api-limit-get-full-accounts", - bpo::value()->default_value(default_opts.api_limit_get_full_accounts), + bpo::value()->default_value(default_opts.api_limit_get_full_accounts), "For database_api_impl::get_full_accounts to set max accounts to query at once") ("api-limit-get-full-accounts-lists", - bpo::value()->default_value(default_opts.api_limit_get_full_accounts_lists), + bpo::value()->default_value(default_opts.api_limit_get_full_accounts_lists), "For database_api_impl::get_full_accounts to set max items to return in the lists") + ("api-limit-get-full-accounts-subscribe", + bpo::value()->default_value(default_opts.api_limit_get_full_accounts_subscribe), + "Maximum number of accounts allowed to subscribe per connection with the get_full_accounts API") ("api-limit-get-top-voters", - bpo::value()->default_value(default_opts.api_limit_get_top_voters), + bpo::value()->default_value(default_opts.api_limit_get_top_voters), "For database_api_impl::get_top_voters to set max limit value") ("api-limit-get-call-orders", - bpo::value()->default_value(default_opts.api_limit_get_call_orders), + bpo::value()->default_value(default_opts.api_limit_get_call_orders), "For database_api_impl::get_call_orders and get_call_orders_by_account to set max limit value") ("api-limit-get-settle-orders", - bpo::value()->default_value(default_opts.api_limit_get_settle_orders), + bpo::value()->default_value(default_opts.api_limit_get_settle_orders), "For database_api_impl::get_settle_orders and get_settle_orders_by_account to set max limit value") ("api-limit-get-assets", - bpo::value()->default_value(default_opts.api_limit_get_assets), + bpo::value()->default_value(default_opts.api_limit_get_assets), "For database_api_impl::list_assets and get_assets_by_issuer to set max limit value") ("api-limit-get-limit-orders", - bpo::value()->default_value(default_opts.api_limit_get_limit_orders), + bpo::value()->default_value(default_opts.api_limit_get_limit_orders), "For database_api_impl::get_limit_orders to set max limit value") ("api-limit-get-limit-orders-by-account", - bpo::value()->default_value(default_opts.api_limit_get_limit_orders_by_account), + bpo::value()->default_value(default_opts.api_limit_get_limit_orders_by_account), "For database_api_impl::get_limit_orders_by_account to set max limit value") ("api-limit-get-order-book", - bpo::value()->default_value(default_opts.api_limit_get_order_book), + bpo::value()->default_value(default_opts.api_limit_get_order_book), "For database_api_impl::get_order_book to set max limit value") ("api-limit-list-htlcs", - bpo::value()->default_value(default_opts.api_limit_list_htlcs), + bpo::value()->default_value(default_opts.api_limit_list_htlcs), "For database_api_impl::list_htlcs to set max limit value") ("api-limit-lookup-accounts", - bpo::value()->default_value(default_opts.api_limit_lookup_accounts), + bpo::value()->default_value(default_opts.api_limit_lookup_accounts), "For database_api_impl::lookup_accounts to set max limit value") ("api-limit-lookup-witness-accounts", - bpo::value()->default_value(default_opts.api_limit_lookup_witness_accounts), + bpo::value()->default_value(default_opts.api_limit_lookup_witness_accounts), "For database_api_impl::lookup_witness_accounts to set max limit value") ("api-limit-lookup-committee-member-accounts", - bpo::value()->default_value(default_opts.api_limit_lookup_committee_member_accounts), + bpo::value()->default_value(default_opts.api_limit_lookup_committee_member_accounts), "For database_api_impl::lookup_committee_member_accounts to set max limit value") ("api-limit-lookup-vote-ids", - bpo::value()->default_value(default_opts.api_limit_lookup_vote_ids), + bpo::value()->default_value(default_opts.api_limit_lookup_vote_ids), "For database_api_impl::lookup_vote_ids to set max limit value") ("api-limit-get-account-limit-orders", - bpo::value()->default_value(default_opts.api_limit_get_account_limit_orders), + bpo::value()->default_value(default_opts.api_limit_get_account_limit_orders), "For database_api_impl::get_account_limit_orders to set max limit value") ("api-limit-get-collateral-bids", - bpo::value()->default_value(default_opts.api_limit_get_collateral_bids), + bpo::value()->default_value(default_opts.api_limit_get_collateral_bids), "For database_api_impl::get_collateral_bids to set max limit value") ("api-limit-get-top-markets", - bpo::value()->default_value(default_opts.api_limit_get_top_markets), + bpo::value()->default_value(default_opts.api_limit_get_top_markets), "For database_api_impl::get_top_markets to set max limit value") ("api-limit-get-trade-history", - bpo::value()->default_value(default_opts.api_limit_get_trade_history), + bpo::value()->default_value(default_opts.api_limit_get_trade_history), "For database_api_impl::get_trade_history to set max limit value") ("api-limit-get-trade-history-by-sequence", - bpo::value()->default_value(default_opts.api_limit_get_trade_history_by_sequence), + bpo::value()->default_value(default_opts.api_limit_get_trade_history_by_sequence), "For database_api_impl::get_trade_history_by_sequence to set max limit value") ("api-limit-get-withdraw-permissions-by-giver", - bpo::value()->default_value(default_opts.api_limit_get_withdraw_permissions_by_giver), + bpo::value()->default_value(default_opts.api_limit_get_withdraw_permissions_by_giver), "For database_api_impl::get_withdraw_permissions_by_giver to set max limit value") ("api-limit-get-withdraw-permissions-by-recipient", - bpo::value()->default_value(default_opts.api_limit_get_withdraw_permissions_by_recipient), + bpo::value()->default_value(default_opts.api_limit_get_withdraw_permissions_by_recipient), "For database_api_impl::get_withdraw_permissions_by_recipient to set max limit value") ("api-limit-get-tickets", - bpo::value()->default_value(default_opts.api_limit_get_tickets), + bpo::value()->default_value(default_opts.api_limit_get_tickets), "Set maximum limit value for database APIs which query for tickets") ("api-limit-get-liquidity-pools", - bpo::value()->default_value(default_opts.api_limit_get_liquidity_pools), + bpo::value()->default_value(default_opts.api_limit_get_liquidity_pools), "Set maximum limit value for database APIs which query for liquidity pools") ("api-limit-get-liquidity-pool-history", - bpo::value()->default_value(default_opts.api_limit_get_liquidity_pool_history), + bpo::value()->default_value(default_opts.api_limit_get_liquidity_pool_history), "Set maximum limit value for APIs which query for history of liquidity pools") ("api-limit-get-samet-funds", - bpo::value()->default_value(default_opts.api_limit_get_samet_funds), + bpo::value()->default_value(default_opts.api_limit_get_samet_funds), "Set maximum limit value for database APIs which query for SameT Funds") ("api-limit-get-credit-offers", - bpo::value()->default_value(default_opts.api_limit_get_credit_offers), + bpo::value()->default_value(default_opts.api_limit_get_credit_offers), "Set maximum limit value for database APIs which query for credit offers or credit deals") ; command_line_options.add(configuration_file_options); diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index e786cafdf5..296d9e1d75 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -511,7 +511,7 @@ std::map database_api_impl::get_full_accounts( const if( to_subscribe ) { - if(_subscribed_accounts.size() < 100) { + if(_subscribed_accounts.size() < _app_options->api_limit_get_full_accounts_subscribe) { _subscribed_accounts.insert( account->get_id() ); subscribe_to_item( account->id ); } @@ -1285,28 +1285,11 @@ vector database_api_impl::get_settle_orders_by_account( } -vector database_api::get_margin_positions( const std::string account_id_or_name )const +vector database_api::get_margin_positions( const std::string& account_name_or_id )const { - return my->get_margin_positions( account_id_or_name ); -} - -vector database_api_impl::get_margin_positions( const std::string account_id_or_name )const -{ - try - { - const auto& idx = _db.get_index_type(); - const auto& aidx = idx.indices().get(); - const account_id_type id = get_account_from_string(account_id_or_name)->id; - auto start = aidx.lower_bound( boost::make_tuple( id, asset_id_type(0) ) ); - auto end = aidx.lower_bound( boost::make_tuple( id+1, asset_id_type(0) ) ); - vector result; - while( start != end ) - { - result.push_back(*start); - ++start; - } - return result; - } FC_CAPTURE_AND_RETHROW( (account_id_or_name) ) + FC_ASSERT( my->_app_options, "Internal error" ); + return my->get_call_orders_by_account( account_name_or_id, asset_id_type(), + my->_app_options->api_limit_get_call_orders ); } vector database_api::get_collateral_bids( const std::string& asset, @@ -1424,12 +1407,12 @@ market_volume database_api_impl::get_24_volume( const string& base, const string return result; } -order_book database_api::get_order_book( const string& base, const string& quote, unsigned limit )const +order_book database_api::get_order_book( const string& base, const string& quote, uint32_t limit )const { - return my->get_order_book( base, quote, limit); + return my->get_order_book( base, quote, limit ); } -order_book database_api_impl::get_order_book( const string& base, const string& quote, unsigned limit )const +order_book database_api_impl::get_order_book( const string& base, const string& quote, uint32_t limit )const { FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_order_book; @@ -1513,7 +1496,7 @@ vector database_api::get_trade_history( const string& base, const string& quote, fc::time_point_sec start, fc::time_point_sec stop, - unsigned limit )const + uint32_t limit )const { return my->get_trade_history( base, quote, start, stop, limit ); } @@ -1522,7 +1505,7 @@ vector database_api_impl::get_trade_history( const string& base, const string& quote, fc::time_point_sec start, fc::time_point_sec stop, - unsigned limit )const + uint32_t limit )const { FC_ASSERT( _app_options && _app_options->has_market_history_plugin, "Market history plugin is not enabled." ); @@ -1615,7 +1598,7 @@ vector database_api::get_trade_history_by_sequence( const string& quote, int64_t start, fc::time_point_sec stop, - unsigned limit )const + uint32_t limit )const { return my->get_trade_history_by_sequence( base, quote, start, stop, limit ); } @@ -1625,7 +1608,7 @@ vector database_api_impl::get_trade_history_by_sequence( const string& quote, int64_t start, fc::time_point_sec stop, - unsigned limit )const + uint32_t limit )const { FC_ASSERT( _app_options && _app_options->has_market_history_plugin, "Market history plugin is not enabled." ); @@ -1735,9 +1718,9 @@ vector database_api_impl::get_trade_history_by_sequence( ////////////////////////////////////////////////////////////////////// vector database_api::list_liquidity_pools( - optional limit, - optional start_id, - optional with_statistics )const + const optional& limit, + const optional& start_id, + const optional& with_statistics )const { return my->list_liquidity_pools( limit, @@ -1746,11 +1729,11 @@ vector database_api::list_liquidity_pools( } vector database_api_impl::list_liquidity_pools( - optional olimit, - optional ostart_id, - optional with_statistics )const + const optional& olimit, + const optional& ostart_id, + const optional& with_statistics )const { - uint32_t limit = olimit.valid() ? *olimit : 101; + uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; @@ -1778,10 +1761,10 @@ vector database_api_impl::list_liquidity_pools( } vector database_api::get_liquidity_pools_by_asset_a( - std::string asset_symbol_or_id, - optional limit, - optional start_id, - optional with_statistics )const + const std::string& asset_symbol_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const { return my->get_liquidity_pools_by_asset_a( asset_symbol_or_id, @@ -1791,10 +1774,10 @@ vector database_api::get_liquidity_pools_by_asse } vector database_api_impl::get_liquidity_pools_by_asset_a( - std::string asset_symbol_or_id, - optional limit, - optional start_id, - optional with_statistics )const + const std::string& asset_symbol_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const { return get_liquidity_pools_by_asset_x( asset_symbol_or_id, @@ -1804,10 +1787,10 @@ vector database_api_impl::get_liquidity_pools_by } vector database_api::get_liquidity_pools_by_asset_b( - std::string asset_symbol_or_id, - optional limit, - optional start_id, - optional with_statistics )const + const std::string& asset_symbol_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const { return my->get_liquidity_pools_by_asset_b( asset_symbol_or_id, @@ -1817,10 +1800,10 @@ vector database_api::get_liquidity_pools_by_asse } vector database_api_impl::get_liquidity_pools_by_asset_b( - std::string asset_symbol_or_id, - optional limit, - optional start_id, - optional with_statistics )const + const std::string& asset_symbol_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const { return get_liquidity_pools_by_asset_x( asset_symbol_or_id, @@ -1852,7 +1835,7 @@ vector database_api_impl::get_liquidity_pools_by FC_ASSERT( _app_options && _app_options->has_api_helper_indexes_plugin, "api_helper_indexes plugin is not enabled on this server." ); - uint32_t limit = olimit.valid() ? *olimit : 101; + uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; const auto configured_limit = _app_options->api_limit_get_liquidity_pools; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", @@ -1882,11 +1865,11 @@ vector database_api_impl::get_liquidity_pools_by } vector database_api::get_liquidity_pools_by_both_assets( - std::string asset_symbol_or_id_a, - std::string asset_symbol_or_id_b, - optional limit, - optional start_id, - optional with_statistics )const + const std::string& asset_symbol_or_id_a, + const std::string& asset_symbol_or_id_b, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const { return my->get_liquidity_pools_by_both_assets( asset_symbol_or_id_a, @@ -1897,13 +1880,13 @@ vector database_api::get_liquidity_pools_by_both } vector database_api_impl::get_liquidity_pools_by_both_assets( - std::string asset_symbol_or_id_a, - std::string asset_symbol_or_id_b, - optional olimit, - optional ostart_id, - optional with_statistics )const + const std::string& asset_symbol_or_id_a, + const std::string& asset_symbol_or_id_b, + const optional& olimit, + const optional& ostart_id, + const optional& with_statistics )const { - uint32_t limit = olimit.valid() ? *olimit : 101; + uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; @@ -1937,8 +1920,8 @@ vector database_api_impl::get_liquidity_pools_by vector> database_api::get_liquidity_pools( const vector& ids, - optional subscribe, - optional with_statistics )const + const optional& subscribe, + const optional& with_statistics )const { return my->get_liquidity_pools( ids, @@ -1948,8 +1931,8 @@ vector> database_api::get_liquidity_poo vector> database_api_impl::get_liquidity_pools( const vector& ids, - optional subscribe, - optional with_statistics )const + const optional& subscribe, + const optional& with_statistics )const { FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; @@ -1983,8 +1966,8 @@ vector> database_api_impl::get_liquidit vector> database_api::get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe, - optional with_statistics )const + const optional& subscribe, + const optional& with_statistics )const { return my->get_liquidity_pools_by_share_asset( asset_symbols_or_ids, @@ -1994,8 +1977,8 @@ vector> database_api::get_liquidity_poo vector> database_api_impl::get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe, - optional with_statistics )const + const optional& subscribe, + const optional& with_statistics )const { FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; @@ -2027,10 +2010,10 @@ vector> database_api_impl::get_liquidit } vector database_api::get_liquidity_pools_by_owner( - std::string account_name_or_id, - optional limit, - optional start_id, - optional with_statistics )const + const std::string& account_name_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const { return my->get_liquidity_pools_by_owner( account_name_or_id, @@ -2040,12 +2023,12 @@ vector database_api::get_liquidity_pools_by_owne } vector database_api_impl::get_liquidity_pools_by_owner( - std::string account_name_or_id, - optional olimit, - optional ostart_id, - optional with_statistics )const + const std::string& account_name_or_id, + const optional& olimit, + const optional& ostart_id, + const optional& with_statistics )const { - uint32_t limit = olimit.valid() ? *olimit : 101; + uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; @@ -2264,12 +2247,12 @@ vector> database_api_impl::get_witnesses(const vector database_api::get_witness_by_account(const std::string account_id_or_name)const +fc::optional database_api::get_witness_by_account(const std::string& account_id_or_name)const { return my->get_witness_by_account( account_id_or_name ); } -fc::optional database_api_impl::get_witness_by_account(const std::string account_id_or_name) const +fc::optional database_api_impl::get_witness_by_account(const std::string& account_id_or_name) const { const auto& idx = _db.get_index_type().indices().get(); const account_id_type account = get_account_from_string(account_id_or_name)->id; @@ -2354,13 +2337,13 @@ vector> database_api_impl::get_committee_membe } fc::optional database_api::get_committee_member_by_account( - const std::string account_id_or_name )const + const std::string& account_id_or_name )const { return my->get_committee_member_by_account( account_id_or_name ); } fc::optional database_api_impl::get_committee_member_by_account( - const std::string account_id_or_name )const + const std::string& account_id_or_name )const { const auto& idx = _db.get_index_type().indices().get(); const account_id_type account = get_account_from_string(account_id_or_name)->id; @@ -2426,12 +2409,12 @@ uint64_t database_api_impl::get_committee_count()const // // ////////////////////////////////////////////////////////////////////// -vector database_api::get_all_workers( const optional is_expired )const +vector database_api::get_all_workers( const optional& is_expired )const { return my->get_all_workers( is_expired ); } -vector database_api_impl::get_all_workers( const optional is_expired )const +vector database_api_impl::get_all_workers( const optional& is_expired )const { vector result; @@ -2459,12 +2442,12 @@ vector database_api_impl::get_all_workers( const optional i return result; } -vector database_api::get_workers_by_account(const std::string account_id_or_name)const +vector database_api::get_workers_by_account(const std::string& account_id_or_name)const { return my->get_workers_by_account( account_id_or_name ); } -vector database_api_impl::get_workers_by_account(const std::string account_id_or_name)const +vector database_api_impl::get_workers_by_account(const std::string& account_id_or_name)const { vector result; const auto& workers_idx = _db.get_index_type().indices().get(); @@ -3086,86 +3069,27 @@ vector database_api_impl::list_htlcs(const htlc_id_type start, uint ////////////////////////////////////////////////////////////////////// vector database_api::list_tickets( - optional limit, - optional start_id )const -{ - return my->list_tickets( - limit, - start_id ); -} - -vector database_api_impl::list_tickets( - optional olimit, - optional ostart_id )const + const optional& limit, + const optional& start_id )const { - uint32_t limit = olimit.valid() ? *olimit : 101; - - FC_ASSERT( _app_options, "Internal error" ); - const auto configured_limit = _app_options->api_limit_get_tickets; - FC_ASSERT( limit <= configured_limit, - "limit can not be greater than ${configured_limit}", - ("configured_limit", configured_limit) ); - - vector results; - - ticket_id_type start_id = ostart_id.valid() ? *ostart_id : ticket_id_type(); - - const auto& idx = _db.get_index_type().indices().get(); - auto lower_itr = idx.lower_bound( start_id ); - auto upper_itr = idx.end(); - - results.reserve( limit ); - uint32_t count = 0; - for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) - { - results.emplace_back( *lower_itr ); - } - - return results; + const auto& idx = my->_db.get_index_type().indices().get(); + return my->get_objects_by_x< ticket_object, + ticket_id_type + >( &application_options::api_limit_get_tickets, + idx, limit, start_id ); } vector database_api::get_tickets_by_account( - std::string account_name_or_id, - optional limit, - optional start_id )const -{ - return my->get_tickets_by_account( - account_name_or_id, - limit, - start_id ); -} - -vector database_api_impl::get_tickets_by_account( - std::string account_name_or_id, - optional olimit, - optional ostart_id )const -{ - uint32_t limit = olimit.valid() ? *olimit : 101; - - FC_ASSERT( _app_options, "Internal error" ); - const auto configured_limit = _app_options->api_limit_get_tickets; - FC_ASSERT( limit <= configured_limit, - "limit can not be greater than ${configured_limit}", - ("configured_limit", configured_limit) ); - - vector results; - - account_id_type account = get_account_from_string(account_name_or_id)->id; - - ticket_id_type start_id = ostart_id.valid() ? *ostart_id : ticket_id_type(); - - const auto& idx = _db.get_index_type().indices().get(); - auto lower_itr = idx.lower_bound( std::make_tuple( account, start_id ) ); - auto upper_itr = idx.upper_bound( account ); - - results.reserve( limit ); - uint32_t count = 0; - for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) - { - results.emplace_back( *lower_itr ); - } - - return results; + const std::string& account_name_or_id, + const optional& limit, + const optional& start_id )const +{ + account_id_type account = my->get_account_from_string(account_name_or_id)->id; + const auto& idx = my->_db.get_index_type().indices().get(); + return my->get_objects_by_x< ticket_object, + ticket_id_type + >( &application_options::api_limit_get_tickets, + idx, limit, start_id, account ); } ////////////////////////////////////////////////////////////////////// diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 634e41219e..06ddc05ffb 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -115,7 +115,6 @@ class database_api_impl : public std::enable_shared_from_this vector get_settle_orders_by_account(const std::string& account_name_or_id, force_settlement_id_type start, uint32_t limit)const; - vector get_margin_positions( const std::string account_id_or_name )const; vector get_collateral_bids( const std::string& asset, uint32_t limit, uint32_t start)const; @@ -127,58 +126,58 @@ class database_api_impl : public std::enable_shared_from_this bool skip_order_book = false )const; market_volume get_24_volume( const string& base, const string& quote )const; order_book get_order_book( const string& base, const string& quote, - unsigned limit = 50 )const; + uint32_t limit )const; vector get_top_markets( uint32_t limit )const; vector get_trade_history( const string& base, const string& quote, fc::time_point_sec start, fc::time_point_sec stop, - unsigned limit = 100 )const; + uint32_t limit )const; vector get_trade_history_by_sequence( const string& base, const string& quote, int64_t start, fc::time_point_sec stop, - unsigned limit = 100 )const; + uint32_t limit )const; // Liquidity pools vector list_liquidity_pools( - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const optional& limit, + const optional& start_id, + const optional& with_statistics )const; vector get_liquidity_pools_by_asset_a( - std::string asset_symbol_or_id, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& asset_symbol_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const; vector get_liquidity_pools_by_asset_b( - std::string asset_symbol_or_id, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& asset_symbol_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const; vector get_liquidity_pools_by_one_asset( const std::string& asset_symbol_or_id, - const optional& limit = 101, - const optional& start_id = optional(), - const optional& with_statistics = false )const; + const optional& limit, + const optional& start_id, + const optional& with_statistics )const; vector get_liquidity_pools_by_both_assets( - std::string asset_symbol_or_id_a, - std::string asset_symbol_or_id_b, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& asset_symbol_or_id_a, + const std::string& asset_symbol_or_id_b, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const; vector> get_liquidity_pools( const vector& ids, - optional subscribe = optional(), - optional with_statistics = false )const; + const optional& subscribe, + const optional& with_statistics )const; vector> get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe = optional(), - optional with_statistics = false )const; + const optional& subscribe, + const optional& with_statistics )const; vector get_liquidity_pools_by_owner( - std::string account_name_or_id, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& account_name_or_id, + const optional& limit, + const optional& start_id, + const optional& with_statistics )const; // Witnesses vector> get_witnesses(const vector& witness_ids)const; - fc::optional get_witness_by_account(const std::string account_id_or_name)const; + fc::optional get_witness_by_account(const std::string& account_id_or_name)const; map lookup_witness_accounts(const string& lower_bound_name, uint32_t limit)const; uint64_t get_witness_count()const; @@ -186,14 +185,14 @@ class database_api_impl : public std::enable_shared_from_this vector> get_committee_members( const vector& committee_member_ids )const; fc::optional get_committee_member_by_account( - const std::string account_id_or_name )const; + const std::string& account_id_or_name )const; map lookup_committee_member_accounts( const string& lower_bound_name, uint32_t limit )const; uint64_t get_committee_count()const; // Workers - vector get_all_workers( const optional is_expired = optional() )const; - vector get_workers_by_account(const std::string account_id_or_name)const; + vector get_all_workers( const optional& is_expired )const; + vector get_workers_by_account(const std::string& account_id_or_name)const; uint64_t get_worker_count()const; // Votes @@ -236,15 +235,6 @@ class database_api_impl : public std::enable_shared_from_this htlc_id_type start, uint32_t limit) const; vector list_htlcs(const htlc_id_type lower_bound_id, uint32_t limit) const; - // Tickets - vector list_tickets( - optional limit = 101, - optional start_id = optional() )const; - vector get_tickets_by_account( - std::string account_name_or_id, - optional limit = 101, - optional start_id = optional() )const; - //private: //////////////////////////////////////////////// @@ -308,7 +298,7 @@ class database_api_impl : public std::enable_shared_from_this optional ostart_id, optional with_statistics )const { - uint32_t limit = olimit.valid() ? *olimit : 101; + uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index 73914a6fdb..fdec652ca4 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -43,41 +43,42 @@ namespace graphene { namespace app { bool has_api_helper_indexes_plugin = false; bool has_market_history_plugin = false; - uint64_t api_limit_get_account_history_operations = 100; - uint64_t api_limit_get_account_history = 100; - uint64_t api_limit_get_grouped_limit_orders = 101; + uint32_t api_limit_get_account_history_operations = 100; + uint32_t api_limit_get_account_history = 100; + uint32_t api_limit_get_grouped_limit_orders = 101; uint32_t api_limit_get_market_history = 200; - uint64_t api_limit_get_relative_account_history = 100; - uint64_t api_limit_get_account_history_by_operations = 100; - uint64_t api_limit_get_asset_holders = 100; - uint64_t api_limit_get_key_references = 100; - uint64_t api_limit_get_htlc_by = 100; - uint64_t api_limit_get_full_accounts = 50; - uint64_t api_limit_get_full_accounts_lists = 500; - uint64_t api_limit_get_top_voters = 200; - uint64_t api_limit_get_call_orders = 300; - uint64_t api_limit_get_settle_orders = 300; - uint64_t api_limit_get_assets = 101; - uint64_t api_limit_get_limit_orders = 300; - uint64_t api_limit_get_limit_orders_by_account = 101; - uint64_t api_limit_get_order_book = 50; - uint64_t api_limit_list_htlcs = 100; - uint64_t api_limit_lookup_accounts = 1000; - uint64_t api_limit_lookup_witness_accounts = 1000; - uint64_t api_limit_lookup_committee_member_accounts = 1000; - uint64_t api_limit_lookup_vote_ids = 1000; - uint64_t api_limit_get_account_limit_orders = 101; - uint64_t api_limit_get_collateral_bids = 100; - uint64_t api_limit_get_top_markets = 100; - uint64_t api_limit_get_trade_history = 100; - uint64_t api_limit_get_trade_history_by_sequence = 100; - uint64_t api_limit_get_withdraw_permissions_by_giver = 101; - uint64_t api_limit_get_withdraw_permissions_by_recipient = 101; - uint64_t api_limit_get_tickets = 101; - uint64_t api_limit_get_liquidity_pools = 101; - uint64_t api_limit_get_liquidity_pool_history = 101; - uint64_t api_limit_get_samet_funds = 101; - uint64_t api_limit_get_credit_offers = 101; + uint32_t api_limit_get_relative_account_history = 100; + uint32_t api_limit_get_account_history_by_operations = 100; + uint32_t api_limit_get_asset_holders = 100; + uint32_t api_limit_get_key_references = 100; + uint32_t api_limit_get_htlc_by = 100; + uint32_t api_limit_get_full_accounts = 50; + uint32_t api_limit_get_full_accounts_lists = 500; + uint32_t api_limit_get_full_accounts_subscribe = 100; + uint32_t api_limit_get_top_voters = 200; + uint32_t api_limit_get_call_orders = 300; + uint32_t api_limit_get_settle_orders = 300; + uint32_t api_limit_get_assets = 101; + uint32_t api_limit_get_limit_orders = 300; + uint32_t api_limit_get_limit_orders_by_account = 101; + uint32_t api_limit_get_order_book = 50; + uint32_t api_limit_list_htlcs = 100; + uint32_t api_limit_lookup_accounts = 1000; + uint32_t api_limit_lookup_witness_accounts = 1000; + uint32_t api_limit_lookup_committee_member_accounts = 1000; + uint32_t api_limit_lookup_vote_ids = 1000; + uint32_t api_limit_get_account_limit_orders = 101; + uint32_t api_limit_get_collateral_bids = 100; + uint32_t api_limit_get_top_markets = 100; + uint32_t api_limit_get_trade_history = 100; + uint32_t api_limit_get_trade_history_by_sequence = 100; + uint32_t api_limit_get_withdraw_permissions_by_giver = 101; + uint32_t api_limit_get_withdraw_permissions_by_recipient = 101; + uint32_t api_limit_get_tickets = 101; + uint32_t api_limit_get_liquidity_pools = 101; + uint32_t api_limit_get_liquidity_pool_history = 101; + uint32_t api_limit_get_samet_funds = 101; + uint32_t api_limit_get_credit_offers = 101; static constexpr application_options get_default() { diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 6db77eb7e9..a8ffdf12d1 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -227,7 +227,9 @@ class database_api /** * @brief Get all accounts that refer to the specified public keys in their owner authority, active authorities * or memo key - * @param keys a list of public keys to query + * @param keys a list of public keys to query, + * the quantity should not be greater than the configured value of + * @a api_limit_get_key_references * @return ID of all accounts that refer to the specified keys */ vector> get_key_references( vector keys )const; @@ -267,16 +269,22 @@ class database_api optional subscribe = optional() )const; /** - * @brief Fetch all objects relevant to the specified accounts and optionally subscribe to updates - * @param names_or_ids Each item must be the name or ID of an account to retrieve + * @brief Fetch objects relevant to the specified accounts and optionally subscribe to updates + * @param names_or_ids Each item must be the name or ID of an account to retrieve, + * the quantity should not be greater than the configured value of + * @a api_limit_get_full_accounts * @param subscribe @a true to subscribe to the queried full account objects; @a false to not subscribe; * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return Map of string from @p names_or_ids to the corresponding account * - * This function fetches all relevant objects for the given accounts, and subscribes to updates to the given + * This function fetches relevant objects for the given accounts, and subscribes to updates to the given * accounts. If any of the strings in @p names_or_ids cannot be tied to an account, that input will be - * ignored. All other accounts will be retrieved and subscribed. + * ignored. Other accounts will be retrieved and subscribed. + * @note the maximum number of accounts allowed to subscribe per connection is configured by the + * @a api_limit_get_full_accounts_subscribe option. Exceeded subscriptions will be ignored. + * @note for each object type, the maximum number of objects to return is configured by the + * @a api_limit_get_full_accounts_lists option. Exceeded objects need to be queried with other APIs. * */ std::map get_full_accounts( const vector& names_or_ids, @@ -284,7 +292,8 @@ class database_api /** * @brief Returns vector of voting power sorted by reverse vp_active - * @param limit Max number of results + * @param limit Maximum number of accounts to retrieve, must not exceed the configured value of + * @a api_limit_get_top_voters * @return Desc Sorted voting power vector */ vector get_top_voters(uint32_t limit)const; @@ -315,7 +324,8 @@ class database_api /** * @brief Get names and IDs for registered accounts * @param lower_bound_name Lower bound of the first name to return - * @param limit Maximum number of results to return -- must not exceed 1000 + * @param limit Maximum number of results to return, must not exceed the configured value of + * @a api_limit_lookup_accounts * @param subscribe @a true to subscribe to the queried account objects; @a false to not subscribe; * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) @@ -398,7 +408,8 @@ class database_api /** * @brief Get assets alphabetically by symbol name * @param lower_bound_symbol Lower bound of symbol names to retrieve - * @param limit Maximum number of assets to fetch (must not exceed 101) + * @param limit Maximum number of assets to fetch, must not exceed the configured value of + * @a api_limit_get_assets * @return The assets found */ vector list_assets(const string& lower_bound_symbol, uint32_t limit)const; @@ -422,7 +433,8 @@ class database_api * @brief Get assets issued (owned) by a given account * @param issuer_name_or_id Account name or ID to get objects from * @param start Asset objects(1.3.X) before this ID will be skipped in results. Pagination purposes. - * @param limit Maximum number of orders to retrieve + * @param limit Maximum number of assets to retrieve, must not exceed the configured value of + * @a api_limit_get_assets * @return The assets issued (owned) by the account */ vector get_assets_by_issuer(const std::string& issuer_name_or_id, @@ -436,7 +448,9 @@ class database_api * @brief Get limit orders in a given market * @param a symbol or ID of asset being sold * @param b symbol or ID of asset being purchased - * @param limit Maximum number of orders to retrieve + * @param limit Maximum number of orders to retrieve, must not exceed the configured value of + * @a api_limit_get_limit_orders + * @return The assets issued (owned) by the account * @return The limit orders, ordered from least price to greatest */ vector get_limit_orders(std::string a, std::string b, uint32_t limit)const; @@ -445,19 +459,21 @@ class database_api * @brief Fetch open limit orders in all markets relevant to the specified account, ordered by ID * * @param account_name_or_id The name or ID of an account to retrieve - * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_limit_orders_by_account * @param start_id Start order id, fetch orders whose IDs are greater than or equal to this order * * @return List of limit orders of the specified account * * @note * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_limit_orders_by_account will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of orders * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_limit_orders_by_account( const string& account_name_or_id, - optional limit = 101, + optional limit = application_options::get_default().api_limit_get_limit_orders_by_account, optional start_id = optional() ); /** @@ -467,7 +483,8 @@ class database_api * @param account_name_or_id The name or ID of an account to retrieve * @param base Base asset * @param quote Quote asset - * @param limit The limitation of items each query can fetch, not greater than 101 + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_account_limit_orders * @param ostart_id Start order id, fetch orders which price lower than this order, * or price equal to this order but order ID greater than this order * @param ostart_price Fetch orders with price lower than or equal to this price @@ -484,16 +501,17 @@ class database_api * @p ostart_price, but orders' id greater than @p ostart_id */ vector get_account_limit_orders( const string& account_name_or_id, - const string &base, - const string "e, - uint32_t limit = 101, - optional ostart_id = optional(), - optional ostart_price = optional()); + const string &base, + const string "e, + uint32_t limit = application_options::get_default().api_limit_get_account_limit_orders, + optional ostart_id = optional(), + optional ostart_price = optional()); /** * @brief Get call orders (aka margin positions) for a given asset * @param a symbol name or ID of the debt asset - * @param limit Maximum number of orders to retrieve + * @param limit Maximum number of orders to retrieve, must not exceed the configured value of + * @a api_limit_get_call_orders * @return The call orders, ordered from earliest to be called to latest */ vector get_call_orders(const std::string& a, uint32_t limit)const; @@ -502,7 +520,8 @@ class database_api * @brief Get call orders (aka margin positions) of a given account * @param account_name_or_id Account name or ID to get objects from * @param start Asset objects(1.3.X) before this ID will be skipped in results. Pagination purposes. - * @param limit Maximum number of objects to retrieve + * @param limit Maximum number of orders to retrieve, must not exceed the configured value of + * @a api_limit_get_call_orders * @return The call orders of the account */ vector get_call_orders_by_account(const std::string& account_name_or_id, @@ -511,7 +530,8 @@ class database_api /** * @brief Get forced settlement orders in a given asset * @param a Symbol or ID of asset being settled - * @param limit Maximum number of orders to retrieve + * @param limit Maximum number of orders to retrieve, must not exceed the configured value of + * @a api_limit_get_settle_orders * @return The settle orders, ordered from earliest settlement date to latest */ vector get_settle_orders(const std::string& a, uint32_t limit)const; @@ -520,7 +540,9 @@ class database_api * @brief Get forced settlement orders of a given account * @param account_name_or_id Account name or ID to get objects from * @param start Force settlement objects(1.4.X) before this ID will be skipped in results. Pagination purposes. - * @param limit Maximum number of orders to retrieve + * @param limit Maximum number of orders to retrieve, must not exceed the configured value of + * @a api_limit_get_settle_orders + * @return The settle orders, ordered from earliest settlement date to latest * @return The settle orders of the account */ vector get_settle_orders_by_account( const std::string& account_name_or_id, @@ -530,20 +552,22 @@ class database_api /** * @brief Get collateral_bid_objects for a given asset * @param a Symbol or ID of asset - * @param limit Maximum number of objects to retrieve + * @param limit Maximum number of objects to retrieve, must not exceed the configured value of + * @a api_limit_get_collateral_bids * @param start skip that many results * @return The settle orders, ordered from earliest settlement date to latest */ vector get_collateral_bids(const std::string& a, uint32_t limit, uint32_t start)const; /** - * @brief Get all open margin positions of a given account + * @brief Get open margin positions of a given account * @param account_name_or_id name or ID of an account - * @return all open margin positions of the account + * @return open margin positions of the account * - * Similar to @ref get_call_orders_by_account, but without pagination. + * Similar to @ref get_call_orders_by_account, but only the first page will be returned, the page size is + * the configured value of @a api_limit_get_call_orders. */ - vector get_margin_positions( const std::string account_name_or_id )const; + vector get_margin_positions( const std::string& account_name_or_id )const; /** * @brief Request notification when the active orders in the market between two assets changes @@ -584,15 +608,18 @@ class database_api * @brief Returns the order book for the market base:quote * @param base symbol name or ID of the base asset * @param quote symbol name or ID of the quote asset - * @param limit depth of the order book to retrieve, for bids and asks each, capped at 50 + * @param limit depth of the order book to retrieve, for bids and asks each, capped at the configured value of + * @a api_limit_get_order_book and @a api_limit_get_limit_orders * @return Order book of the market */ - order_book get_order_book( const string& base, const string& quote, unsigned limit = 50 )const; + order_book get_order_book( const string& base, const string& quote, + uint32_t limit = application_options::get_default().api_limit_get_order_book )const; /** * @brief Returns vector of tickers sorted by reverse base_volume - * Note: this API is experimental and subject to change in next releases - * @param limit Max number of results + * @note this API is experimental and subject to change in next releases + * @param limit Max number of results, must not exceed the configured value of + * @a api_limit_get_top_markets * @return Desc Sorted ticker vector */ vector get_top_markets(uint32_t limit)const; @@ -603,16 +630,17 @@ class database_api * @param quote symbol or ID of the quote asset * @param start Start time as a UNIX timestamp, the latest transactions to retrieve * @param stop Stop time as a UNIX timestamp, the earliest transactions to retrieve - * @param limit Maximum quantity of transactions to retrieve, capped at 100. + * @param limit Maximum quantity of transactions to retrieve, capped at the configured value of + * @a api_limit_get_trade_history * @return Transactions in the market * @note The time must be UTC, timezone offsets are not supported. The range is [stop, start]. - * In case when there are more than 100 transactions occurred in the same second, - * this API only returns the most recent 100 records, the rest records can be retrieved + * In case when there are more transactions than @p limit occurred in the same second, + * this API only returns the most recent records, the rest records can be retrieved * with the @ref get_trade_history_by_sequence API. */ vector get_trade_history( const string& base, const string& quote, - fc::time_point_sec start, fc::time_point_sec stop, - unsigned limit = 100 )const; + fc::time_point_sec start, fc::time_point_sec stop, + uint32_t limit = application_options::get_default().api_limit_get_trade_history )const; /** * @brief Get market transactions occurred in the market base:quote, ordered by time, most recent first. @@ -620,13 +648,14 @@ class database_api * @param quote symbol or ID of the quote asset * @param start Start sequence as an Integer, the latest transaction to retrieve * @param stop Stop time as a UNIX timestamp, the earliest transactions to retrieve - * @param limit Maximum quantity of transactions to retrieve, capped at 100 + * @param limit Maximum quantity of transactions to retrieve, capped at the configured value of + * @a api_limit_get_trade_history_by_sequence * @return Transactions in the market * @note The time must be UTC, timezone offsets are not supported. The range is [stop, start]. */ vector get_trade_history_by_sequence( const string& base, const string& quote, - int64_t start, fc::time_point_sec stop, - unsigned limit = 100 )const; + int64_t start, fc::time_point_sec stop, + uint32_t limit = application_options::get_default().api_limit_get_trade_history_by_sequence )const; ///////////////////// @@ -635,78 +664,86 @@ class database_api /** * @brief Get a list of liquidity pools - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_liquidity_pools + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note - * 1. @p limit can be omitted or be null, if so the default value 101 will be used + * 1. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_liquidity_pools will be used * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector list_liquidity_pools( - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& start_id = optional(), + const optional& with_statistics = false )const; /** * @brief Get a list of liquidity pools by the symbol or ID of the first asset in the pool * @param asset_symbol_or_id symbol name or ID of the asset - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_liquidity_pools + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_liquidity_pools will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_liquidity_pools_by_asset_a( - std::string asset_symbol_or_id, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& asset_symbol_or_id, + const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& start_id = optional(), + const optional& with_statistics = false )const; /** * @brief Get a list of liquidity pools by the symbol or ID of the second asset in the pool * @param asset_symbol_or_id symbol name or ID of the asset - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_liquidity_pools + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_liquidity_pools will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_liquidity_pools_by_asset_b( - std::string asset_symbol_or_id, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& asset_symbol_or_id, + const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& start_id = optional(), + const optional& with_statistics = false )const; /** * @brief Get a list of liquidity pools by the symbol or ID of one asset in the pool * @param asset_symbol_or_id symbol name or ID of the asset - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_liquidity_pools + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_liquidity_pools will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_liquidity_pools_by_one_asset( const std::string& asset_symbol_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, const optional& start_id = optional(), const optional& with_statistics = false )const; @@ -714,28 +751,32 @@ class database_api * @brief Get a list of liquidity pools by the symbols or IDs of the two assets in the pool * @param asset_symbol_or_id_a symbol name or ID of one asset * @param asset_symbol_or_id_b symbol name or ID of the other asset - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_liquidity_pools + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note * 1. if @p asset_symbol_or_id_a or @p asset_symbol_or_id_b cannot be tied to an asset, * an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_liquidity_pools will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_liquidity_pools_by_both_assets( - std::string asset_symbol_or_id_a, - std::string asset_symbol_or_id_b, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& asset_symbol_or_id_a, + const std::string& asset_symbol_or_id_b, + const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& start_id = optional(), + const optional& with_statistics = false )const; /** * @brief Get a list of liquidity pools by their IDs - * @param ids IDs of the liquidity pools + * @param ids IDs of the liquidity pools, + * the quantity should not be greater than the configured value of + * @a api_limit_get_liquidity_pools * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) @@ -747,12 +788,14 @@ class database_api */ vector> get_liquidity_pools( const vector& ids, - optional subscribe = optional(), - optional with_statistics = false )const; + const optional& subscribe = optional(), + const optional& with_statistics = false )const; /** * @brief Get a list of liquidity pools by their share asset symbols or IDs - * @param asset_symbols_or_ids symbol names or IDs of the share assets + * @param asset_symbols_or_ids symbol names or IDs of the share assets, + * the quantity should not be greater than the configured value of + * @a api_limit_get_liquidity_pools * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) @@ -764,28 +807,30 @@ class database_api */ vector> get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe = optional(), - optional with_statistics = false )const; + const optional& subscribe = optional(), + const optional& with_statistics = false )const; /** * @brief Get a list of liquidity pools by the name or ID of the owner account * @param account_name_or_id name or ID of the owner account - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start share asset id, fetch pools whose share asset IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_liquidity_pools + * @param start_id Start share asset id, fetch pools whose share asset IDs are greater than or equal to this ID * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_liquidity_pools will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_liquidity_pools_by_owner( - std::string account_name_or_id, - optional limit = 101, - optional start_id = optional(), - optional with_statistics = false )const; + const std::string& account_name_or_id, + const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& start_id = optional(), + const optional& with_statistics = false )const; ///////////////////// @@ -794,53 +839,59 @@ class database_api /** * @brief Get a list of SameT Funds - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start SameT Fund id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_samet_funds + * @param start_id Start SameT Fund id, fetch items whose IDs are greater than or equal to this ID * @return The SameT Funds * * @note - * 1. @p limit can be omitted or be null, if so the default value 101 will be used + * 1. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_samet_funds will be used * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector list_samet_funds( - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_samet_funds, const optional& start_id = optional() )const; /** * @brief Get a list of SameT Funds by the name or ID of the owner account * @param account_name_or_id name or ID of the owner account - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start SameT Fund id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_samet_funds + * @param start_id Start SameT Fund id, fetch items whose IDs are greater than or equal to this ID * @return The SameT Funds * * @note * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_samet_funds will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_samet_funds_by_owner( const std::string& account_name_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_samet_funds, const optional& start_id = optional() )const; /** * @brief Get a list of SameT Funds by the symbol or ID of the asset type * @param asset_symbol_or_id symbol or ID of the asset type - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start SameT Fund id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_samet_funds + * @param start_id Start SameT Fund id, fetch items whose IDs are greater than or equal to this ID * @return The SameT Funds * * @note * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_samet_funds will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_samet_funds_by_asset( const std::string& asset_symbol_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_samet_funds, const optional& start_id = optional() )const; /// @} @@ -851,158 +902,176 @@ class database_api /** * @brief Get a list of credit offers - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit offer id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit offer id, fetch items whose IDs are greater than or equal to this ID * @return The credit offers * * @note - * 1. @p limit can be omitted or be null, if so the default value 101 will be used + * 1. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector list_credit_offers( - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit offers by the name or ID of the owner account * @param account_name_or_id name or ID of the owner account - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit offer id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit offer id, fetch items whose IDs are greater than or equal to this ID * @return The credit offers * * @note * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_credit_offers_by_owner( const std::string& account_name_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit offers by the symbol or ID of the asset type * @param asset_symbol_or_id symbol or ID of the asset type - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit offer id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit offer id, fetch items whose IDs are greater than or equal to this ID * @return The credit offers * * @note * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_credit_offers_by_asset( const std::string& asset_symbol_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit deals - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID * @return The credit deals * * @note - * 1. @p limit can be omitted or be null, if so the default value 101 will be used + * 1. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector list_credit_deals( - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit deals by the ID of a credit offer * @param offer_id ID of the credit offer - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID * @return The credit deals * * @note * 1. if @p offer_id cannot be tied to a credit offer, an empty list will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_credit_deals_by_offer_id( const credit_offer_id_type& offer_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit deals by the name or ID of a credit offer owner account * @param account_name_or_id name or ID of the credit offer owner account - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID * @return The credit deals * * @note * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_credit_deals_by_offer_owner( const std::string& account_name_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit deals by the name or ID of a borrower account * @param account_name_or_id name or ID of the borrower account - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID * @return The credit deals * * @note * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_credit_deals_by_borrower( const std::string& account_name_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit deals by the symbol or ID of the debt asset type * @param asset_symbol_or_id symbol or ID of the debt asset type - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID * @return The credit deals * * @note * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_credit_deals_by_debt_asset( const std::string& asset_symbol_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /** * @brief Get a list of credit deals by the symbol or ID of the collateral asset type * @param asset_symbol_or_id symbol or ID of the collateral asset type - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_credit_offers + * @param start_id Start credit deal id, fetch items whose IDs are greater than or equal to this ID * @return The credit deals * * @note * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_credit_offers will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_credit_deals_by_collateral_asset( const std::string& asset_symbol_or_id, - const optional& limit = 101, + const optional& limit = application_options::get_default().api_limit_get_credit_offers, const optional& start_id = optional() )const; /// @} @@ -1025,12 +1094,13 @@ class database_api * @param account_name_or_id The name or ID of the account whose witness should be retrieved * @return The witness object, or null if the account does not have a witness */ - fc::optional get_witness_by_account(const std::string account_name_or_id)const; + fc::optional get_witness_by_account(const std::string& account_name_or_id)const; /** * @brief Get names and IDs for registered witnesses * @param lower_bound_name Lower bound of the first name to return - * @param limit Maximum number of results to return -- must not exceed 1000 + * @param limit Maximum number of results to return, must not exceed the configured value of + * @a api_limit_lookup_witness_accounts * @return Map of witness names to corresponding IDs */ map lookup_witness_accounts(const string& lower_bound_name, uint32_t limit)const; @@ -1059,12 +1129,13 @@ class database_api * @param account_name_or_id The name or ID of the account whose committee_member should be retrieved * @return The committee_member object, or null if the account does not have a committee_member */ - fc::optional get_committee_member_by_account( const string account_name_or_id )const; + fc::optional get_committee_member_by_account( const string& account_name_or_id )const; /** * @brief Get names and IDs for registered committee_members * @param lower_bound_name Lower bound of the first name to return - * @param limit Maximum number of results to return -- must not exceed 1000 + * @param limit Maximum number of results to return, must not exceed the configured value of + * @a api_limit_lookup_committee_member_accounts * @return Map of committee_member names to corresponding IDs */ map lookup_committee_member_accounts( const string& lower_bound_name, @@ -1086,14 +1157,14 @@ class database_api * @return A list of worker objects * */ - vector get_all_workers( const optional is_expired = optional() )const; + vector get_all_workers( const optional& is_expired = optional() )const; /** * @brief Get the workers owned by a given account * @param account_name_or_id The name or ID of the account whose worker should be retrieved * @return A list of worker objects owned by the account */ - vector get_workers_by_account(const std::string account_name_or_id)const; + vector get_workers_by_account(const std::string& account_name_or_id)const; /** * @brief Get the total number of workers registered with the blockchain @@ -1108,7 +1179,9 @@ class database_api /** * @brief Given a set of votes, return the objects they are voting for - * @param votes a list of vote IDs + * @param votes a list of vote IDs, + * the quantity should not be greater than the configured value of + * @a api_limit_lookup_vote_ids * @return the referenced objects * * This will be a mixture of committee_member_objects, witness_objects, and worker_objects @@ -1231,7 +1304,8 @@ class database_api * @param account_name_or_id Account name or ID to get objects from * @param start Withdraw permission objects(1.12.X) before this ID will be skipped in results. * Pagination purposes. - * @param limit Maximum number of objects to retrieve + * @param limit Maximum number of objects to retrieve, must not exceed the configured value of + * @a api_limit_get_withdraw_permissions_by_giver * @return Withdraw permission objects for the account */ vector get_withdraw_permissions_by_giver( const std::string account_name_or_id, @@ -1243,7 +1317,8 @@ class database_api * @param account_name_or_id Account name or ID to get objects from * @param start Withdraw permission objects(1.12.X) before this ID will be skipped in results. * Pagination purposes. - * @param limit Maximum number of objects to retrieve + * @param limit Maximum number of objects to retrieve, must not exceed the configured value of + * @a api_limit_get_withdraw_permissions_by_recipient * @return Withdraw permission objects for the account */ vector get_withdraw_permissions_by_recipient( const std::string account_name_or_id, @@ -1268,7 +1343,8 @@ class database_api * @brief Get non expired HTLC objects using the sender account * @param account_name_or_id Account name or ID to get objects from * @param start htlc objects before this ID will be skipped in results. Pagination purposes. - * @param limit Maximum number of objects to retrieve + * @param limit Maximum number of objects to retrieve, must not exceed the configured value of + * @a api_limit_get_htlc_by * @return HTLC objects for the account */ vector get_htlc_by_from( const std::string account_name_or_id, @@ -1279,7 +1355,8 @@ class database_api * @brief Get non expired HTLC objects using the receiver account * @param account_name_or_id Account name or ID to get objects from * @param start htlc objects before this ID will be skipped in results. Pagination purposes. - * @param limit Maximum number of objects to retrieve + * @param limit Maximum number of objects to retrieve, must not exceed the configured value of + * @a api_limit_get_htlc_by * @return HTLC objects for the account */ vector get_htlc_by_to( const std::string account_name_or_id, @@ -1289,7 +1366,8 @@ class database_api /** * @brief Get all HTLCs * @param start Lower bound of htlc id to start getting results - * @param limit Maximum number of htlc objects to fetch + * @param limit Maximum number of htlc objects to fetch, must not exceed the configured value of + * @a api_limit_list_htlcs * @return The htlc object list */ vector list_htlcs(const htlc_id_type start, uint32_t limit) const; @@ -1301,36 +1379,40 @@ class database_api /** * @brief Get a list of tickets - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start ticket id, fetch tickets whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_tickets + * @param start_id Start ticket id, fetch tickets whose IDs are greater than or equal to this ID * @return The tickets * * @note - * 1. @p limit can be omitted or be null, if so the default value 101 will be used + * 1. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_tickets will be used * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of tickets * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector list_tickets( - optional limit = 101, - optional start_id = optional() )const; + const optional& limit = application_options::get_default().api_limit_get_tickets, + const optional& start_id = optional() )const; /** * @brief Get a list of tickets by the name or ID of the owner account * @param account_name_or_id name or ID of the owner account - * @param limit The limitation of items each query can fetch, not greater than a configured value - * @param start_id Start ticket id, fetch tickets whose IDs are greater than or equal to this ID + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_tickets + * @param start_id Start ticket id, fetch tickets whose IDs are greater than or equal to this ID * @return The tickets * * @note * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p limit can be omitted or be null, if so the default value of + * @ref application_options::api_limit_get_tickets will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of tickets * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ vector get_tickets_by_account( - std::string account_name_or_id, - optional limit = 101, - optional start_id = optional() )const; + const std::string& account_name_or_id, + const optional& limit = application_options::get_default().api_limit_get_tickets, + const optional& start_id = optional() )const; private: std::shared_ptr< database_api_impl > my; diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 32bba5d53d..ac7a74cd4a 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -204,107 +204,107 @@ std::shared_ptr database_fixture_base::in if (fixture.current_test_name == "api_limit_get_account_history_operations") { fc::set_option( options, "max-ops-per-account", (uint64_t)125 ); - fc::set_option( options, "api-limit-get-account-history-operations", (uint64_t)300 ); + fc::set_option( options, "api-limit-get-account-history-operations", (uint32_t)300 ); } if(fixture.current_test_name =="api_limit_get_account_history") { fc::set_option( options, "max-ops-per-account", (uint64_t)125 ); - fc::set_option( options, "api-limit-get-account-history", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-account-history", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_grouped_limit_orders") { - fc::set_option( options, "api-limit-get-grouped-limit-orders", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-grouped-limit-orders", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_relative_account_history") { fc::set_option( options, "max-ops-per-account", (uint64_t)125 ); - fc::set_option( options, "api-limit-get-relative-account-history", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-relative-account-history", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_account_history_by_operations") { - fc::set_option( options, "api-limit-get-account-history-by-operations", (uint64_t)250 ); - fc::set_option( options, "api-limit-get-relative-account-history", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-account-history-by-operations", (uint32_t)250 ); + fc::set_option( options, "api-limit-get-relative-account-history", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_asset_holders") { - fc::set_option( options, "api-limit-get-asset-holders", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-asset-holders", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_key_references") { - fc::set_option( options, "api-limit-get-key-references", (uint64_t)200 ); + fc::set_option( options, "api-limit-get-key-references", (uint32_t)200 ); } if(fixture.current_test_name =="api_limit_get_limit_orders") { - fc::set_option( options, "api-limit-get-limit-orders", (uint64_t)350 ); + fc::set_option( options, "api-limit-get-limit-orders", (uint32_t)350 ); } if(fixture.current_test_name =="api_limit_get_limit_orders_by_account") { - fc::set_option( options, "api-limit-get-limit-orders-by-account", (uint64_t)150 ); + fc::set_option( options, "api-limit-get-limit-orders-by-account", (uint32_t)150 ); } if(fixture.current_test_name =="api_limit_get_call_orders") { - fc::set_option( options, "api-limit-get-call-orders", (uint64_t)350 ); + fc::set_option( options, "api-limit-get-call-orders", (uint32_t)350 ); } if(fixture.current_test_name =="api_limit_get_settle_orders") { - fc::set_option( options, "api-limit-get-settle-orders", (uint64_t)350 ); + fc::set_option( options, "api-limit-get-settle-orders", (uint32_t)350 ); } if(fixture.current_test_name =="api_limit_get_order_book") { - fc::set_option( options, "api-limit-get-order-book", (uint64_t)80 ); + fc::set_option( options, "api-limit-get-order-book", (uint32_t)80 ); } if(fixture.current_test_name =="api_limit_lookup_accounts") { - fc::set_option( options, "api-limit-lookup-accounts", (uint64_t)200 ); + fc::set_option( options, "api-limit-lookup-accounts", (uint32_t)200 ); } if(fixture.current_test_name =="api_limit_lookup_witness_accounts") { - fc::set_option( options, "api-limit-lookup-witness-accounts", (uint64_t)200 ); + fc::set_option( options, "api-limit-lookup-witness-accounts", (uint32_t)200 ); } if(fixture.current_test_name =="api_limit_lookup_committee_member_accounts") { - fc::set_option( options, "api-limit-lookup-committee-member-accounts", (uint64_t)200 ); + fc::set_option( options, "api-limit-lookup-committee-member-accounts", (uint32_t)200 ); } if(fixture.current_test_name =="api_limit_lookup_committee_member_accounts") { - fc::set_option( options, "api-limit-lookup-committee-member-accounts", (uint64_t)200 ); + fc::set_option( options, "api-limit-lookup-committee-member-accounts", (uint32_t)200 ); } if(fixture.current_test_name =="api_limit_lookup_vote_ids") { - fc::set_option( options, "api-limit-lookup-vote-ids", (uint64_t)2 ); + fc::set_option( options, "api-limit-lookup-vote-ids", (uint32_t)2 ); } if(fixture.current_test_name =="api_limit_get_account_limit_orders") { - fc::set_option( options, "api-limit-get-account-limit-orders", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-account-limit-orders", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_collateral_bids") { - fc::set_option( options, "api-limit-get-collateral-bids", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-collateral-bids", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_top_markets") { - fc::set_option( options, "api-limit-get-top-markets", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-top-markets", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_trade_history") { - fc::set_option( options, "api-limit-get-trade-history", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-trade-history", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_trade_history_by_sequence") { - fc::set_option( options, "api-limit-get-trade-history-by-sequence", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-trade-history-by-sequence", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_withdraw_permissions_by_giver") { - fc::set_option( options, "api-limit-get-withdraw-permissions-by-giver", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-withdraw-permissions-by-giver", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_withdraw_permissions_by_recipient") { - fc::set_option( options, "api-limit-get-withdraw-permissions-by-recipient", (uint64_t)250 ); + fc::set_option( options, "api-limit-get-withdraw-permissions-by-recipient", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_full_accounts2") { - fc::set_option( options, "api-limit-get-full-accounts", (uint64_t)200 ); - fc::set_option( options, "api-limit-get-full-accounts-lists", (uint64_t)120 ); + fc::set_option( options, "api-limit-get-full-accounts", (uint32_t)200 ); + fc::set_option( options, "api-limit-get-full-accounts-lists", (uint32_t)120 ); } // add account tracking for ahplugin for special test case with track-account enabled From 5ac5b9fb9ffe8fca42ca0a3228eeea8e75d12533 Mon Sep 17 00:00:00 2001 From: Abit Date: Thu, 21 Jul 2022 09:52:02 +0200 Subject: [PATCH 128/338] Use make -j 1 in ubuntu-release workflow --- .github/workflows/build-and-test.ubuntu-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.ubuntu-release.yml b/.github/workflows/build-and-test.ubuntu-release.yml index 3a26dcfcd1..42a9886de0 100644 --- a/.github/workflows/build-and-test.ubuntu-release.yml +++ b/.github/workflows/build-and-test.ubuntu-release.yml @@ -68,7 +68,7 @@ jobs: run: | export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" mkdir -p "$CCACHE_DIR" - make -j 2 -C _build + make -j 1 -C _build df -h - name: Unit-Tests run: | From 9a9f5b6d30b6748dcebdc59c769f34ae5dd7ac04 Mon Sep 17 00:00:00 2001 From: Abit Date: Thu, 21 Jul 2022 15:07:07 +0200 Subject: [PATCH 129/338] Add a swap file in ubuntu-debug workflow --- .github/workflows/build-and-test.ubuntu-debug.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/build-and-test.ubuntu-debug.yml b/.github/workflows/build-and-test.ubuntu-debug.yml index 5e9f33a409..5b45178185 100644 --- a/.github/workflows/build-and-test.ubuntu-debug.yml +++ b/.github/workflows/build-and-test.ubuntu-debug.yml @@ -50,6 +50,12 @@ jobs: run: | pwd df -h . + free + sudo dd if=/dev/zero of=/swapfile bs=1024 count=4M + sudo chmod 600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + free mkdir -p _build sudo mkdir -p /_build/libraries /_build/programs /_build/tests /mnt/_build sudo chmod a+rwx /_build/libraries /_build/programs /_build/tests From 2e758291e76263cd8598a23cd7104d33c0d28a0a Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 21 Jul 2022 13:34:07 +0000 Subject: [PATCH 130/338] Fix code smells --- libraries/app/api.cpp | 45 ++++++++++---------- libraries/app/database_api.cpp | 10 ++--- libraries/app/include/graphene/app/api.hpp | 48 +++++++++++++--------- 3 files changed, 56 insertions(+), 47 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index eab9d5a25c..0be9b84c13 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -323,7 +323,8 @@ namespace graphene { namespace app { { // Nothing else to do } - vector history_api::get_fill_order_history( std::string asset_a, std::string asset_b, + vector history_api::get_fill_order_history( const std::string& asset_a, + const std::string& asset_b, uint32_t limit )const { auto market_hist_plugin = _app.get_plugin( "market_history" ); @@ -352,7 +353,7 @@ namespace graphene { namespace app { return result; } - vector history_api::get_account_history( const std::string account_id_or_name, + vector history_api::get_account_history( const std::string& account_id_or_name, operation_history_id_type stop, uint32_t limit, operation_history_id_type start ) const @@ -407,7 +408,7 @@ namespace graphene { namespace app { } vector history_api::get_account_history_operations( - const std::string account_id_or_name, + const std::string& account_id_or_name, int64_t operation_type, operation_history_id_type start, operation_history_id_type stop, @@ -498,7 +499,7 @@ namespace graphene { namespace app { vector history_api::get_block_operation_history( uint32_t block_num, - optional trx_in_block ) const + const optional& trx_in_block ) const { FC_ASSERT(_app.chain_database()); const auto& db = *_app.chain_database(); @@ -517,9 +518,10 @@ namespace graphene { namespace app { return market_hist_plugin->tracked_buckets(); } - history_operation_detail history_api::get_account_history_by_operations( const std::string account_id_or_name, - flat_set operation_types, - uint32_t start, uint32_t limit )const + history_operation_detail history_api::get_account_history_by_operations( + const std::string& account_id_or_name, + const flat_set& operation_types, + uint32_t start, uint32_t limit )const { const auto configured_limit = _app.get_options().api_limit_get_account_history_by_operations; FC_ASSERT( limit <= configured_limit, @@ -546,9 +548,10 @@ namespace graphene { namespace app { return result; } - vector history_api::get_market_history( std::string asset_a, std::string asset_b, + vector history_api::get_market_history( const std::string& asset_a, const std::string& asset_b, uint32_t bucket_seconds, - fc::time_point_sec start, fc::time_point_sec end )const + const fc::time_point_sec& start, + const fc::time_point_sec& end )const { try { auto market_hist_plugin = _app.get_plugin( "market_history" ); @@ -583,10 +586,10 @@ namespace graphene { namespace app { vector history_api::get_liquidity_pool_history( liquidity_pool_id_type pool_id, - optional start, - optional stop, - optional olimit, - optional operation_type )const + const optional& start, + const optional& stop, + const optional& olimit, + const optional& operation_type )const { try { FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); @@ -642,10 +645,10 @@ namespace graphene { namespace app { vector history_api::get_liquidity_pool_history_by_sequence( liquidity_pool_id_type pool_id, - optional start, - optional stop, - optional olimit, - optional operation_type )const + const optional& start, + const optional& stop, + const optional& olimit, + const optional& operation_type )const { try { FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); @@ -773,7 +776,7 @@ namespace graphene { namespace app { { // Nothing else to do } - vector asset_api::get_asset_holders( std::string asset, uint32_t start, + vector asset_api::get_asset_holders( const std::string& asset_symbol_or_id, uint32_t start, uint32_t limit ) const { const auto configured_limit = _app.get_options().api_limit_get_asset_holders; @@ -782,7 +785,7 @@ namespace graphene { namespace app { ("configured_limit", configured_limit) ); database_api_helper db_api_helper( _app ); - asset_id_type asset_id = db_api_helper.get_asset_from_string( asset )->id; + asset_id_type asset_id = db_api_helper.get_asset_from_string( asset_symbol_or_id )->id; const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); @@ -813,10 +816,10 @@ namespace graphene { namespace app { return result; } // get number of asset holders. - int64_t asset_api::get_asset_holders_count( std::string asset ) const { + int64_t asset_api::get_asset_holders_count( const std::string& asset_symbol_or_id ) const { const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); database_api_helper db_api_helper( _app ); - asset_id_type asset_id = db_api_helper.get_asset_from_string( asset )->id; + asset_id_type asset_id = db_api_helper.get_asset_from_string( asset_symbol_or_id )->id; auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); int64_t count = boost::distance(range) - 1; diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 296d9e1d75..dce24a718c 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -506,15 +506,13 @@ std::map database_api_impl::get_full_accounts( const for (const std::string& account_name_or_id : names_or_ids) { const account_object* account = get_account_from_string(account_name_or_id, false); - if (account == nullptr) + if( !account ) continue; - if( to_subscribe ) + if( to_subscribe && _subscribed_accounts.size() < _app_options->api_limit_get_full_accounts_subscribe ) { - if(_subscribed_accounts.size() < _app_options->api_limit_get_full_accounts_subscribe) { - _subscribed_accounts.insert( account->get_id() ); - subscribe_to_item( account->id ); - } + _subscribed_accounts.insert( account->get_id() ); + subscribe_to_item( account->id ); } full_account acnt; diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 009fa37e9c..35612a3f18 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -134,7 +134,7 @@ namespace graphene { namespace app { * @return A list of operations performed by account, ordered from most recent to oldest. */ vector get_account_history( - const std::string account_name_or_id, + const std::string& account_name_or_id, operation_history_id_type stop = operation_history_id_type(), uint32_t limit = application_options::get_default().api_limit_get_account_history, operation_history_id_type start = operation_history_id_type() @@ -151,8 +151,8 @@ namespace graphene { namespace app { * @return history_operation_detail */ history_operation_detail get_account_history_by_operations( - const std::string account_name_or_id, - flat_set operation_types, + const std::string& account_name_or_id, + const flat_set& operation_types, uint32_t start, uint32_t limit )const; @@ -169,7 +169,7 @@ namespace graphene { namespace app { * @return A list of operations performed by account, ordered from most recent to oldest. */ vector get_account_history_operations( - const std::string account_name_or_id, + const std::string& account_name_or_id, int64_t operation_type, operation_history_id_type start = operation_history_id_type(), operation_history_id_type stop = operation_history_id_type(), @@ -210,7 +210,7 @@ namespace graphene { namespace app { */ vector get_block_operation_history( uint32_t block_num, - optional trx_in_block = {} ) const; + const optional& trx_in_block = {} ) const; /** * @brief Get details of order executions occurred most recently in a trading pair @@ -219,7 +219,10 @@ namespace graphene { namespace app { * @param limit Maximum records to return * @return a list of order_history objects, in "most recent first" order */ - vector get_fill_order_history( std::string a, std::string b, uint32_t limit )const; + vector get_fill_order_history( + const std::string& a, + const std::string& b, + uint32_t limit )const; /** * @brief Get OHLCV data of a trading pair in a time range @@ -233,8 +236,10 @@ namespace graphene { namespace app { * If there are more records in the specified time range than the configured value of * @a api_limit_get_market_history, only the first records will be returned. */ - vector get_market_history( std::string a, std::string b, uint32_t bucket_seconds, - fc::time_point_sec start, fc::time_point_sec end )const; + vector get_market_history( const std::string& a, const std::string& b, + uint32_t bucket_seconds, + const fc::time_point_sec& start, + const fc::time_point_sec& end )const; /** * @brief Get OHLCV time bucket lengths supported (configured) by this API server @@ -269,10 +274,11 @@ namespace graphene { namespace app { */ vector get_liquidity_pool_history( liquidity_pool_id_type pool_id, - optional start = optional(), - optional stop = optional(), - optional limit = application_options::get_default().api_limit_get_liquidity_pool_history, - optional operation_type = optional() )const; + const optional& start = optional(), + const optional& stop = optional(), + const optional& limit = application_options::get_default() + .api_limit_get_liquidity_pool_history, + const optional& operation_type = optional() )const; /** * @brief Get history of a liquidity pool @@ -297,10 +303,11 @@ namespace graphene { namespace app { */ vector get_liquidity_pool_history_by_sequence( liquidity_pool_id_type pool_id, - optional start = optional(), - optional stop = optional(), - optional limit = application_options::get_default().api_limit_get_liquidity_pool_history, - optional operation_type = optional() )const; + const optional& start = optional(), + const optional& stop = optional(), + const optional& limit = application_options::get_default() + .api_limit_get_liquidity_pool_history, + const optional& operation_type = optional() )const; private: application& _app; @@ -535,20 +542,21 @@ namespace graphene { namespace app { /** * @brief Get asset holders for a specific asset - * @param asset The specific asset id or symbol + * @param asset_symbol_or_id The specific asset symbol or ID * @param start The start index * @param limit Maximum number of accounts to retrieve, must not exceed the configured value of * @a api_limit_get_asset_holders * @return A list of asset holders for the specified asset */ - vector get_asset_holders( std::string asset, uint32_t start, uint32_t limit )const; + vector get_asset_holders( const std::string& asset_symbol_or_id, + uint32_t start, uint32_t limit )const; /** * @brief Get asset holders count for a specific asset - * @param asset The specific asset id or symbol + * @param asset_symbol_or_id The specific asset symbol or id * @return Holders count for the specified asset */ - int64_t get_asset_holders_count( std::string asset )const; + int64_t get_asset_holders_count( const std::string& asset_symbol_or_id )const; /** * @brief Get all asset holders From cca1a2b68a941e5196c8619401b46f1d1a794461 Mon Sep 17 00:00:00 2001 From: Abit Date: Thu, 21 Jul 2022 17:49:40 +0200 Subject: [PATCH 131/338] Remove macOS 10.15 from Github actions workflow --- .github/workflows/build-and-test.mac.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.mac.yml b/.github/workflows/build-and-test.mac.yml index 021e73783c..8817ab1e9f 100644 --- a/.github/workflows/build-and-test.mac.yml +++ b/.github/workflows/build-and-test.mac.yml @@ -8,7 +8,7 @@ jobs: name: Build and test in macOS strategy: matrix: - os: [macos-10.15, macos-11] + os: [macos-11] runs-on: ${{ matrix.os }} steps: - name: Install dependencies From 42d87b0c34747a5ac6b80d0149cfbbd6f4cd3936 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 22 Jul 2022 17:47:00 +0000 Subject: [PATCH 132/338] Move API-related data structures into API classes --- libraries/app/api.cpp | 39 +++--- libraries/app/include/graphene/app/api.hpp | 130 +++++++++--------- .../wallet/include/graphene/wallet/wallet.hpp | 3 +- libraries/wallet/wallet.cpp | 3 +- libraries/wallet/wallet_api_impl.hpp | 4 +- libraries/wallet/wallet_sign.cpp | 6 +- tests/tests/asset_api_tests.cpp | 6 +- tests/tests/custom_operations.cpp | 2 +- tests/tests/grouped_orders_api_tests.cpp | 8 +- tests/tests/history_api_tests.cpp | 2 +- 10 files changed, 106 insertions(+), 97 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 0be9b84c13..6d9d64af17 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -518,7 +518,7 @@ namespace graphene { namespace app { return market_hist_plugin->tracked_buckets(); } - history_operation_detail history_api::get_account_history_by_operations( + history_api::history_operation_detail history_api::get_account_history_by_operations( const std::string& account_id_or_name, const flat_set& operation_types, uint32_t start, uint32_t limit )const @@ -715,22 +715,26 @@ namespace graphene { namespace app { } FC_CAPTURE_AND_RETHROW( (pool_id)(start)(stop)(olimit)(operation_type) ) } - commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value ) + fc::ecc::commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value ) { return fc::ecc::blind( blind, value ); } - blind_factor_type crypto_api::blind_sum( const std::vector& blinds_in, uint32_t non_neg ) + fc::ecc::blind_factor_type crypto_api::blind_sum( const std::vector& blinds_in, + uint32_t non_neg ) { return fc::ecc::blind_sum( blinds_in, non_neg ); } - bool crypto_api::verify_sum( const std::vector& commits_in, const std::vector& neg_commits_in, int64_t excess ) + bool crypto_api::verify_sum( const std::vector& commits_in, + const std::vector& neg_commits_in, + int64_t excess ) { return fc::ecc::verify_sum( commits_in, neg_commits_in, excess ); } - verify_range_result crypto_api::verify_range( const commitment_type& commit, const std::vector& proof ) + crypto_api::verify_range_result crypto_api::verify_range( const commitment_type& commit, + const std::vector& proof ) { verify_range_result result; result.success = fc::ecc::verify_range( result.min_val, result.max_val, commit, proof ); @@ -748,9 +752,10 @@ namespace graphene { namespace app { return fc::ecc::range_proof_sign( min_value, commit, commit_blind, nonce, base10_exp, min_bits, actual_value ); } - verify_range_proof_rewind_result crypto_api::verify_range_proof_rewind( const blind_factor_type& nonce, - const commitment_type& commit, - const std::vector& proof ) + crypto_api::verify_range_proof_rewind_result crypto_api::verify_range_proof_rewind( + const blind_factor_type& nonce, + const commitment_type& commit, + const std::vector& proof ) { verify_range_proof_rewind_result result; result.success = fc::ecc::verify_range_proof_rewind( result.blind_out, @@ -764,7 +769,7 @@ namespace graphene { namespace app { return result; } - range_proof_info crypto_api::range_get_info( const std::vector& proof ) + fc::ecc::range_proof_info crypto_api::range_get_info( const std::vector& proof ) { return fc::ecc::range_get_info( proof ); } @@ -776,8 +781,8 @@ namespace graphene { namespace app { { // Nothing else to do } - vector asset_api::get_asset_holders( const std::string& asset_symbol_or_id, uint32_t start, - uint32_t limit ) const + vector asset_api::get_asset_holders( const std::string& asset_symbol_or_id, + uint32_t start, uint32_t limit ) const { const auto configured_limit = _app.get_options().api_limit_get_asset_holders; FC_ASSERT( limit <= configured_limit, @@ -827,7 +832,7 @@ namespace graphene { namespace app { return count; } // function to get vector of system assets with holders count. - vector asset_api::get_all_asset_holders() const { + vector asset_api::get_all_asset_holders() const { vector result; vector total_assets; for( const asset_object& asset_obj : _db.get_index_type().indices() ) @@ -865,11 +870,11 @@ namespace graphene { namespace app { return plugin->tracked_groups(); } - vector< limit_order_group > orders_api::get_grouped_limit_orders( std::string base_asset, - std::string quote_asset, - uint16_t group, - optional start, - uint32_t limit )const + vector< orders_api::limit_order_group > orders_api::get_grouped_limit_orders( const std::string& base_asset, + const std::string& quote_asset, + uint16_t group, + const optional& start, + uint32_t limit )const { const auto configured_limit = _app.get_options().api_limit_get_grouped_limit_orders; FC_ASSERT( limit <= configured_limit, diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 35612a3f18..f19bfe8c60 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -56,64 +56,12 @@ namespace graphene { namespace app { using namespace graphene::grouped_orders; using namespace graphene::custom_operations; - using namespace fc::ecc; using std::string; using std::vector; using std::map; class application; - struct verify_range_result - { - bool success; - uint64_t min_val; - uint64_t max_val; - }; - - struct verify_range_proof_rewind_result - { - bool success; - uint64_t min_val; - uint64_t max_val; - uint64_t value_out; - fc::ecc::blind_factor_type blind_out; - string message_out; - }; - - struct account_asset_balance - { - string name; - account_id_type account_id; - share_type amount; - }; - struct asset_holders - { - asset_id_type asset_id; - int64_t count; - }; - - struct history_operation_detail { - uint32_t total_count = 0; - vector operation_history_objs; - }; - - /** - * @brief summary data of a group of limit orders - */ - struct limit_order_group - { - explicit limit_order_group( const std::pair& p ) - : min_price( p.first.min_price ), - max_price( p.second.max_price ), - total_for_sale( p.second.total_for_sale ) - {} - limit_order_group() = default; - - price min_price; ///< possible lowest price in the group - price max_price; ///< possible highest price in the group - share_type total_for_sale; ///< total amount of asset for sale, asset id is min_price.base.asset_id - }; - /** * @brief The history_api class implements the RPC API for account history * @@ -124,6 +72,12 @@ namespace graphene { namespace app { public: explicit history_api(application& app); + struct history_operation_detail + { + uint32_t total_count = 0; + vector operation_history_objs; + }; + /** * @brief Get operations relevant to the specificed account * @param account_name_or_id The account name or ID whose history should be queried @@ -449,6 +403,24 @@ namespace graphene { namespace app { class crypto_api { public: + + struct verify_range_result + { + bool success; + uint64_t min_val; + uint64_t max_val; + }; + + struct verify_range_proof_rewind_result + { + bool success; + uint64_t min_val; + uint64_t max_val; + uint64_t value_out; + fc::ecc::blind_factor_type blind_out; + string message_out; + }; + /** * @brief Generates a pedersen commitment: *commit = blind * G + value * G2. * The commitment is 33 bytes, the blinding factor is 32 bytes. @@ -529,7 +501,7 @@ namespace graphene { namespace app { * @param proof List of proof's characters * @return A range proof info structure with exponent, mantissa, min and max values */ - range_proof_info range_get_info( const std::vector& proof ); + fc::ecc::range_proof_info range_get_info( const std::vector& proof ); }; /** @@ -540,6 +512,18 @@ namespace graphene { namespace app { public: explicit asset_api(graphene::app::application& app); + struct account_asset_balance + { + string name; + account_id_type account_id; + share_type amount; + }; + struct asset_holders + { + asset_id_type asset_id; + int64_t count; + }; + /** * @brief Get asset holders for a specific asset * @param asset_symbol_or_id The specific asset symbol or ID @@ -577,6 +561,23 @@ namespace graphene { namespace app { public: explicit orders_api(application& app); + /** + * @brief summary data of a group of limit orders + */ + struct limit_order_group + { + explicit limit_order_group( const std::pair& p ) + : min_price( p.first.min_price ), + max_price( p.second.max_price ), + total_for_sale( p.second.total_for_sale ) + {} + limit_order_group() = default; + + price min_price; ///< possible lowest price in the group + price max_price; ///< possible highest price in the group + share_type total_for_sale; ///< total amount of asset for sale, asset id is min_price.base.asset_id + }; + /** * @brief Get tracked groups configured by the server. * @return A list of numbers which indicate configured groups, of those, 1 means 0.01% diff on price. @@ -594,10 +595,10 @@ namespace graphene { namespace app { * @a api_limit_get_grouped_limit_orders * @return The grouped limit orders, ordered from best offered price to worst */ - vector< limit_order_group > get_grouped_limit_orders( std::string base_asset, - std::string quote_asset, + vector< limit_order_group > get_grouped_limit_orders( const std::string& base_asset, + const std::string& quote_asset, uint16_t group, - optional start, + const optional& start, uint32_t limit )const; private: @@ -705,19 +706,20 @@ extern template class fc::api; FC_REFLECT( graphene::app::network_broadcast_api::transaction_confirmation, (id)(block_num)(trx_num)(trx) ) -FC_REFLECT( graphene::app::verify_range_result, + +FC_REFLECT( graphene::app::crypto_api::verify_range_result, (success)(min_val)(max_val) ) -FC_REFLECT( graphene::app::verify_range_proof_rewind_result, +FC_REFLECT( graphene::app::crypto_api::verify_range_proof_rewind_result, (success)(min_val)(max_val)(value_out)(blind_out)(message_out) ) -FC_REFLECT( graphene::app::history_operation_detail, + +FC_REFLECT( graphene::app::history_api::history_operation_detail, (total_count)(operation_history_objs) ) -FC_REFLECT( graphene::app::limit_order_group, + +FC_REFLECT( graphene::app::orders_api::limit_order_group, (min_price)(max_price)(total_for_sale) ) -//FC_REFLECT_TYPENAME( fc::ecc::compact_signature ) -//FC_REFLECT_TYPENAME( fc::ecc::commitment_type ) -FC_REFLECT( graphene::app::account_asset_balance, (name)(account_id)(amount) ) -FC_REFLECT( graphene::app::asset_holders, (asset_id)(count) ) +FC_REFLECT( graphene::app::asset_api::account_asset_balance, (name)(account_id)(amount) ) +FC_REFLECT( graphene::app::asset_api::asset_holders, (asset_id)(count) ) FC_API(graphene::app::history_api, (get_account_history) diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index 9bb26c686b..1bddc7a2d3 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -835,7 +835,8 @@ class wallet_api * @param sig the message signature * @return true if signature matches */ - bool verify_message( string message, string account, int block, const string& time, compact_signature sig ); + bool verify_message( const string& message, const string& account, int32_t block, const string& time, + const fc::ecc::compact_signature& sig ); /** Verify a message signed with sign_message * diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 4c79a4c167..3365628d53 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -1351,7 +1351,8 @@ signed_message wallet_api::sign_message(string signer, string message) return my->sign_message(signer, message); } -bool wallet_api::verify_message( string message, string account, int block, const string& time, compact_signature sig ) +bool wallet_api::verify_message( const string& message, const string& account, int32_t block, const string& time, + const fc::ecc::compact_signature& sig ) { return my->verify_message( message, account, block, time, sig ); } diff --git a/libraries/wallet/wallet_api_impl.hpp b/libraries/wallet/wallet_api_impl.hpp index 663efccaaf..fd228df2e2 100644 --- a/libraries/wallet/wallet_api_impl.hpp +++ b/libraries/wallet/wallet_api_impl.hpp @@ -342,8 +342,8 @@ class wallet_api_impl signed_message sign_message(string signer, string message); - bool verify_message( const string& message, const string& account, int block, const string& time, - const compact_signature& sig ); + bool verify_message( const string& message, const string& account, int32_t block, const string& time, + const fc::ecc::compact_signature& sig ); bool verify_signed_message( const signed_message& message ); diff --git a/libraries/wallet/wallet_sign.cpp b/libraries/wallet/wallet_sign.cpp index f4893a10c6..477af5e1b7 100644 --- a/libraries/wallet/wallet_sign.cpp +++ b/libraries/wallet/wallet_sign.cpp @@ -214,8 +214,8 @@ namespace graphene { namespace wallet { namespace detail { return msg; } - bool wallet_api_impl::verify_message( const string& message, const string& account, int block, const string& time, - const compact_signature& sig ) + bool wallet_api_impl::verify_message( const string& message, const string& account, int32_t block, + const string& time, const fc::ecc::compact_signature& sig ) { const account_object from_account = get_account( account ); @@ -236,7 +236,7 @@ namespace graphene { namespace wallet { namespace detail { const account_object from_account = get_account( message.meta.account ); - const public_key signer( *message.signature, message.digest() ); + const fc::ecc::public_key signer( *message.signature, message.digest() ); if( !( message.meta.memo_key == signer ) ) return false; FC_ASSERT( from_account.options.memo_key == signer, "Message was signed by contained key, but it doesn't belong to the contained account!" ); diff --git a/tests/tests/asset_api_tests.cpp b/tests/tests/asset_api_tests.cpp index 7536b3529a..ef78315d84 100644 --- a/tests/tests/asset_api_tests.cpp +++ b/tests/tests/asset_api_tests.cpp @@ -51,7 +51,7 @@ BOOST_AUTO_TEST_CASE( asset_holders ) transfer(account_id_type()(db), bob, asset(300)); // make call - vector holders = asset_api.get_asset_holders( std::string( static_cast(asset_id_type())), 0, 100); + auto holders = asset_api.get_asset_holders( std::string( asset_id_type() ), 0, 100); BOOST_CHECK_EQUAL(holders.size(), 4u); // by now we can guarantee the order @@ -76,8 +76,8 @@ BOOST_AUTO_TEST_CASE( api_limit_get_asset_holders ) transfer(account_id_type()(db), bob, asset(300)); // make call - GRAPHENE_CHECK_THROW(asset_api.get_asset_holders(std::string( static_cast(asset_id_type())), 0, 260), fc::exception); - vector holders = asset_api.get_asset_holders(std::string( static_cast(asset_id_type())), 0, 210); + GRAPHENE_CHECK_THROW(asset_api.get_asset_holders(std::string( asset_id_type() ), 0, 260), fc::exception); + auto holders = asset_api.get_asset_holders(std::string( asset_id_type() ), 0, 210); BOOST_REQUIRE_EQUAL( holders.size(), 4u ); } diff --git a/tests/tests/custom_operations.cpp b/tests/tests/custom_operations.cpp index 9829a8b55f..8ff2cd1f7a 100644 --- a/tests/tests/custom_operations.cpp +++ b/tests/tests/custom_operations.cpp @@ -42,7 +42,7 @@ using namespace graphene::custom_operations; BOOST_FIXTURE_TEST_SUITE( custom_operation_tests, database_fixture ) void map_operation(flat_map>& pairs, bool remove, string& catalog, account_id_type& account, - private_key& pk, database& db) + fc::ecc::private_key& pk, database& db) { signed_transaction trx; set_expiration(db, trx); diff --git a/tests/tests/grouped_orders_api_tests.cpp b/tests/tests/grouped_orders_api_tests.cpp index e08c7d5d0e..3d8b308055 100644 --- a/tests/tests/grouped_orders_api_tests.cpp +++ b/tests/tests/grouped_orders_api_tests.cpp @@ -39,18 +39,18 @@ BOOST_AUTO_TEST_CASE(api_limit_get_grouped_limit_orders) { { app.enable_plugin("grouped_orders"); graphene::app::orders_api orders_api(app); - optional< api_access_info > acc; optional start; - //account_id_type() do 3 ops + //account_id_type() do 3 ops create_bitasset("USD", account_id_type()); create_account("dan"); create_account("bob"); asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; generate_block(); fc::usleep(fc::milliseconds(100)); - GRAPHENE_CHECK_THROW(orders_api.get_grouped_limit_orders(std::string( static_cast(asset_id_type())), std::string( static_cast(asset_id_type())),10, start,260), fc::exception); - vector< limit_order_group > orders =orders_api.get_grouped_limit_orders(std::string( static_cast(asset_id_type())), std::string( static_cast(bit_jmj_id)), 10,start,240); + auto core = std::string( asset_id_type() ); + GRAPHENE_CHECK_THROW(orders_api.get_grouped_limit_orders(core, core, 10, start, 260), fc::exception); + auto orders =orders_api.get_grouped_limit_orders(core, std::string( bit_jmj_id ), 10,start,240); BOOST_REQUIRE_EQUAL( orders.size(), 0u); }catch (fc::exception &e) { diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 5f06e85afb..7b0a98d8b4 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -831,7 +831,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history_by_operations) { generate_block(); fc::usleep(fc::milliseconds(100)); GRAPHENE_CHECK_THROW(hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 260), fc::exception); - history_operation_detail histories = hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 210); + auto histories = hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 210); BOOST_REQUIRE_EQUAL( histories.total_count, 3u ); } catch (fc::exception &e) { From 6b869d811516b65caf26c3cf1378bdd4623188fe Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 22 Jul 2022 20:29:05 +0000 Subject: [PATCH 133/338] Fix code smells --- libraries/app/api.cpp | 14 +++++++------- libraries/app/include/graphene/app/api.hpp | 16 +++++++++------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 6d9d64af17..10fefa2c7f 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -715,26 +715,26 @@ namespace graphene { namespace app { } FC_CAPTURE_AND_RETHROW( (pool_id)(start)(stop)(olimit)(operation_type) ) } - fc::ecc::commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value ) + fc::ecc::commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value ) const { return fc::ecc::blind( blind, value ); } fc::ecc::blind_factor_type crypto_api::blind_sum( const std::vector& blinds_in, - uint32_t non_neg ) + uint32_t non_neg ) const { return fc::ecc::blind_sum( blinds_in, non_neg ); } bool crypto_api::verify_sum( const std::vector& commits_in, const std::vector& neg_commits_in, - int64_t excess ) + int64_t excess ) const { return fc::ecc::verify_sum( commits_in, neg_commits_in, excess ); } crypto_api::verify_range_result crypto_api::verify_range( const commitment_type& commit, - const std::vector& proof ) + const std::vector& proof ) const { verify_range_result result; result.success = fc::ecc::verify_range( result.min_val, result.max_val, commit, proof ); @@ -747,7 +747,7 @@ namespace graphene { namespace app { const blind_factor_type& nonce, int8_t base10_exp, uint8_t min_bits, - uint64_t actual_value ) + uint64_t actual_value ) const { return fc::ecc::range_proof_sign( min_value, commit, commit_blind, nonce, base10_exp, min_bits, actual_value ); } @@ -755,7 +755,7 @@ namespace graphene { namespace app { crypto_api::verify_range_proof_rewind_result crypto_api::verify_range_proof_rewind( const blind_factor_type& nonce, const commitment_type& commit, - const std::vector& proof ) + const std::vector& proof ) const { verify_range_proof_rewind_result result; result.success = fc::ecc::verify_range_proof_rewind( result.blind_out, @@ -769,7 +769,7 @@ namespace graphene { namespace app { return result; } - fc::ecc::range_proof_info crypto_api::range_get_info( const std::vector& proof ) + fc::ecc::range_proof_info crypto_api::range_get_info( const std::vector& proof ) const { return fc::ecc::range_get_info( proof ); } diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index f19bfe8c60..527721df55 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -429,7 +429,7 @@ namespace graphene { namespace app { * @param value Positive 64-bit integer value * @return A 33-byte pedersen commitment: *commit = blind * G + value * G2 */ - fc::ecc::commitment_type blind( const fc::ecc::blind_factor_type& blind, uint64_t value ); + fc::ecc::commitment_type blind( const fc::ecc::blind_factor_type& blind, uint64_t value ) const; /** * @brief Get sha-256 blind factor type @@ -437,7 +437,8 @@ namespace graphene { namespace app { * @param non_neg 32-bit integer value * @return A blind factor type */ - fc::ecc::blind_factor_type blind_sum( const std::vector& blinds_in, uint32_t non_neg ); + fc::ecc::blind_factor_type blind_sum( const std::vector& blinds_in, + uint32_t non_neg ) const; /** * @brief Verifies that commits + neg_commits + excess == 0 @@ -451,7 +452,7 @@ namespace graphene { namespace app { const std::vector& commits_in, const std::vector& neg_commits_in, int64_t excess - ); + ) const; /** * @brief Verifies range proof for 33-byte pedersen commitment @@ -459,7 +460,8 @@ namespace graphene { namespace app { * @param proof List of characters * @return A structure with success, min and max values */ - verify_range_result verify_range( const fc::ecc::commitment_type& commit, const std::vector& proof ); + verify_range_result verify_range( const fc::ecc::commitment_type& commit, + const std::vector& proof ) const; /** * @brief Proves with respect to min_value the range for pedersen @@ -479,7 +481,7 @@ namespace graphene { namespace app { const blind_factor_type& nonce, int8_t base10_exp, uint8_t min_bits, - uint64_t actual_value ); + uint64_t actual_value ) const; /** * @brief Verifies range proof rewind for 33-byte pedersen commitment @@ -490,7 +492,7 @@ namespace graphene { namespace app { */ verify_range_proof_rewind_result verify_range_proof_rewind( const blind_factor_type& nonce, const fc::ecc::commitment_type& commit, - const std::vector& proof ); + const std::vector& proof ) const; /** * @brief Gets "range proof" info. The cli_wallet includes functionality for sending blind transfers @@ -501,7 +503,7 @@ namespace graphene { namespace app { * @param proof List of proof's characters * @return A range proof info structure with exponent, mantissa, min and max values */ - fc::ecc::range_proof_info range_get_info( const std::vector& proof ); + fc::ecc::range_proof_info range_get_info( const std::vector& proof ) const; }; /** From 15f5a077a3293a69432db6cf73270559d9f0bb26 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 22 Jul 2022 20:37:02 +0000 Subject: [PATCH 134/338] Fix duplicate code --- libraries/app/api.cpp | 43 ++++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 10fefa2c7f..11eb8b9d87 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -584,13 +584,8 @@ namespace graphene { namespace app { return result; } FC_CAPTURE_AND_RETHROW( (asset_a)(asset_b)(bucket_seconds)(start)(end) ) } - vector history_api::get_liquidity_pool_history( - liquidity_pool_id_type pool_id, - const optional& start, - const optional& stop, - const optional& olimit, - const optional& operation_type )const - { try { + static uint32_t validate_get_lp_history_params( application& _app, const optional& olimit ) + { FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); uint32_t limit = olimit.valid() ? *olimit @@ -603,13 +598,25 @@ namespace graphene { namespace app { FC_ASSERT( _app.chain_database(), "Internal error: the chain database is not availalbe" ); - const auto& db = *_app.chain_database(); + return limit; + } + + vector history_api::get_liquidity_pool_history( + liquidity_pool_id_type pool_id, + const optional& start, + const optional& stop, + const optional& olimit, + const optional& operation_type )const + { try { + uint32_t limit = validate_get_lp_history_params( _app, olimit ); vector result; - if( limit == 0 || ( start.valid() && stop.valid() && *start <= *stop ) ) // empty result + if( 0 == limit || ( start.valid() && stop.valid() && *start <= *stop ) ) // empty result return result; + const auto& db = *_app.chain_database(); + const auto& hist_idx = db.get_index_type(); if( operation_type.valid() ) // one operation type @@ -650,25 +657,15 @@ namespace graphene { namespace app { const optional& olimit, const optional& operation_type )const { try { - FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); - - uint32_t limit = olimit.valid() ? *olimit - : application_options::get_default().api_limit_get_liquidity_pool_history; - - const auto configured_limit = _app.get_options().api_limit_get_liquidity_pool_history; - FC_ASSERT( limit <= configured_limit, - "limit can not be greater than ${configured_limit}", - ("configured_limit", configured_limit) ); - - FC_ASSERT( _app.chain_database(), "Internal error: the chain database is not availalbe" ); - - const auto& db = *_app.chain_database(); + uint32_t limit = validate_get_lp_history_params( _app, olimit ); vector result; - if( limit == 0 ) // empty result + if( 0 == limit ) // empty result return result; + const auto& db = *_app.chain_database(); + const auto& hist_idx = db.get_index_type(); if( operation_type.valid() ) // one operation type From f24f498c40d8a9f516649bd7023df83ddfa42bed Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 22 Jul 2022 20:45:20 +0000 Subject: [PATCH 135/338] Change reserved name "time" to a non-reserved one --- libraries/wallet/include/graphene/wallet/wallet.hpp | 4 ++-- libraries/wallet/wallet.cpp | 4 ++-- libraries/wallet/wallet_api_impl.hpp | 2 +- libraries/wallet/wallet_sign.cpp | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index 1bddc7a2d3..f19a0ef4d9 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -831,11 +831,11 @@ class wallet_api * @param message the message text * @param account the account name of the message * @param block the block number of the message - * @param time the timestamp of the message + * @param msg_time the timestamp of the message * @param sig the message signature * @return true if signature matches */ - bool verify_message( const string& message, const string& account, int32_t block, const string& time, + bool verify_message( const string& message, const string& account, int32_t block, const string& msg_time, const fc::ecc::compact_signature& sig ); /** Verify a message signed with sign_message diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 3365628d53..a3a128d2b5 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -1351,10 +1351,10 @@ signed_message wallet_api::sign_message(string signer, string message) return my->sign_message(signer, message); } -bool wallet_api::verify_message( const string& message, const string& account, int32_t block, const string& time, +bool wallet_api::verify_message( const string& message, const string& account, int32_t block, const string& msg_time, const fc::ecc::compact_signature& sig ) { - return my->verify_message( message, account, block, time, sig ); + return my->verify_message( message, account, block, msg_time, sig ); } /** Verify a message signed with sign_message diff --git a/libraries/wallet/wallet_api_impl.hpp b/libraries/wallet/wallet_api_impl.hpp index fd228df2e2..8964c9576e 100644 --- a/libraries/wallet/wallet_api_impl.hpp +++ b/libraries/wallet/wallet_api_impl.hpp @@ -342,7 +342,7 @@ class wallet_api_impl signed_message sign_message(string signer, string message); - bool verify_message( const string& message, const string& account, int32_t block, const string& time, + bool verify_message( const string& message, const string& account, int32_t block, const string& msg_time, const fc::ecc::compact_signature& sig ); bool verify_signed_message( const signed_message& message ); diff --git a/libraries/wallet/wallet_sign.cpp b/libraries/wallet/wallet_sign.cpp index 477af5e1b7..7ce982e622 100644 --- a/libraries/wallet/wallet_sign.cpp +++ b/libraries/wallet/wallet_sign.cpp @@ -215,7 +215,7 @@ namespace graphene { namespace wallet { namespace detail { } bool wallet_api_impl::verify_message( const string& message, const string& account, int32_t block, - const string& time, const fc::ecc::compact_signature& sig ) + const string& msg_time, const fc::ecc::compact_signature& sig ) { const account_object from_account = get_account( account ); @@ -224,7 +224,7 @@ namespace graphene { namespace wallet { namespace detail { msg.meta.account = from_account.name; msg.meta.memo_key = from_account.options.memo_key; msg.meta.block = block; - msg.meta.time = time; + msg.meta.time = msg_time; msg.signature = sig; return verify_signed_message( msg ); From be88e1d074aa483a936bbd2f6c69350d6409f2f9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 22 Jul 2022 20:55:30 +0000 Subject: [PATCH 136/338] Update API docs --- libraries/app/include/graphene/app/api.hpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 527721df55..7ebb43a224 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -212,7 +212,8 @@ namespace graphene { namespace app { * @param limit Maximum quantity of operations in the history to retrieve. Optional. * If not specified, the default value of * @ref application_options::api_limit_get_liquidity_pool_history will be used. - * If specified, it must not exceed the configured limit. + * If specified, it must not exceed the configured value of + * @a api_limit_get_liquidity_pool_history. * @param operation_type Optional. If specified, only the operations whose type is the specified type * will be returned. Otherwise all operations will be returned. * @return operation history of the liquidity pool, ordered by time, most recent first. @@ -244,7 +245,8 @@ namespace graphene { namespace app { * @param limit Maximum quantity of operations in the history to retrieve. Optional. * If not specified, the default value of * @ref application_options::api_limit_get_liquidity_pool_history will be used. - * If specified, it must not exceed the configured limit. + * If specified, it must not exceed the configured value of + * @a api_limit_get_liquidity_pool_history. * @param operation_type Optional. If specified, only the operations whose type is the specified type * will be returned. Otherwise all operations will be returned. * @return operation history of the liquidity pool, ordered by time, most recent first. From b464717c5ba74052a474d3e224528215e7a7bd47 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 22 Jul 2022 21:35:29 +0000 Subject: [PATCH 137/338] Make the type of a parameter a reference-to-const --- libraries/app/api.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 11eb8b9d87..4b6ed994c4 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -584,7 +584,7 @@ namespace graphene { namespace app { return result; } FC_CAPTURE_AND_RETHROW( (asset_a)(asset_b)(bucket_seconds)(start)(end) ) } - static uint32_t validate_get_lp_history_params( application& _app, const optional& olimit ) + static uint32_t validate_get_lp_history_params( const application& _app, const optional& olimit ) { FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); From 2497cb1065d4ac9942dd76b1768cff62c90658c0 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 23 Jul 2022 20:07:11 +0000 Subject: [PATCH 138/338] Fix an error in database API docs --- libraries/app/include/graphene/app/database_api.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index a8ffdf12d1..4a6d4e8a5a 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -450,7 +450,6 @@ class database_api * @param b symbol or ID of asset being purchased * @param limit Maximum number of orders to retrieve, must not exceed the configured value of * @a api_limit_get_limit_orders - * @return The assets issued (owned) by the account * @return The limit orders, ordered from least price to greatest */ vector get_limit_orders(std::string a, std::string b, uint32_t limit)const; From 3857d030eebc0eece90347d09cc81c2c89ca761c Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 23 Jul 2022 20:21:26 +0000 Subject: [PATCH 139/338] Update database API docs --- libraries/app/include/graphene/app/database_api.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 4a6d4e8a5a..85e10d50c7 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -281,9 +281,9 @@ class database_api * This function fetches relevant objects for the given accounts, and subscribes to updates to the given * accounts. If any of the strings in @p names_or_ids cannot be tied to an account, that input will be * ignored. Other accounts will be retrieved and subscribed. - * @note the maximum number of accounts allowed to subscribe per connection is configured by the + * @note The maximum number of accounts allowed to subscribe per connection is configured by the * @a api_limit_get_full_accounts_subscribe option. Exceeded subscriptions will be ignored. - * @note for each object type, the maximum number of objects to return is configured by the + * @note For each object type, the maximum number of objects to return is configured by the * @a api_limit_get_full_accounts_lists option. Exceeded objects need to be queried with other APIs. * */ From 604cb2966c08b075591b9c1d63c7a45fa2e4e0d9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 23 Jul 2022 20:53:25 +0000 Subject: [PATCH 140/338] Simplify code for LP-related database APIs --- libraries/app/database_api.cpp | 125 ++++------------------------ libraries/app/database_api_impl.hxx | 39 ++------- 2 files changed, 26 insertions(+), 138 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index dce24a718c..d6c59a17fa 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -1720,68 +1720,24 @@ vector database_api::list_liquidity_pools( const optional& start_id, const optional& with_statistics )const { - return my->list_liquidity_pools( + return my->get_liquidity_pools_by_asset_x( limit, start_id, with_statistics ); } -vector database_api_impl::list_liquidity_pools( - const optional& olimit, - const optional& ostart_id, - const optional& with_statistics )const -{ - uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; - - FC_ASSERT( _app_options, "Internal error" ); - const auto configured_limit = _app_options->api_limit_get_liquidity_pools; - FC_ASSERT( limit <= configured_limit, - "limit can not be greater than ${configured_limit}", - ("configured_limit", configured_limit) ); - - bool with_stats = ( with_statistics.valid() && *with_statistics ); - - vector results; - - liquidity_pool_id_type start_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); - - const auto& idx = _db.get_index_type().indices().get(); - auto lower_itr = idx.lower_bound( start_id ); - auto upper_itr = idx.end(); - - results.reserve( limit ); - for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) - { - results.emplace_back( extend_liquidity_pool( *lower_itr, with_stats ) ); - } - - return results; -} - vector database_api::get_liquidity_pools_by_asset_a( const std::string& asset_symbol_or_id, const optional& limit, const optional& start_id, const optional& with_statistics )const { - return my->get_liquidity_pools_by_asset_a( - asset_symbol_or_id, - limit, - start_id, - with_statistics ); -} - -vector database_api_impl::get_liquidity_pools_by_asset_a( - const std::string& asset_symbol_or_id, - const optional& limit, - const optional& start_id, - const optional& with_statistics )const -{ - return get_liquidity_pools_by_asset_x( - asset_symbol_or_id, + asset_id_type asset_id = my->get_asset_from_string(asset_symbol_or_id)->id; + return my->get_liquidity_pools_by_asset_x( limit, start_id, - with_statistics ); + with_statistics, + asset_id ); } vector database_api::get_liquidity_pools_by_asset_b( @@ -1790,24 +1746,12 @@ vector database_api::get_liquidity_pools_by_asse const optional& start_id, const optional& with_statistics )const { - return my->get_liquidity_pools_by_asset_b( - asset_symbol_or_id, - limit, - start_id, - with_statistics ); -} - -vector database_api_impl::get_liquidity_pools_by_asset_b( - const std::string& asset_symbol_or_id, - const optional& limit, - const optional& start_id, - const optional& with_statistics )const -{ - return get_liquidity_pools_by_asset_x( - asset_symbol_or_id, + asset_id_type asset_id = my->get_asset_from_string(asset_symbol_or_id)->id; + return my->get_liquidity_pools_by_asset_x( limit, start_id, - with_statistics ); + with_statistics, + asset_id ); } vector database_api::get_liquidity_pools_by_one_asset( @@ -1869,51 +1813,16 @@ vector database_api::get_liquidity_pools_by_both const optional& start_id, const optional& with_statistics )const { - return my->get_liquidity_pools_by_both_assets( - asset_symbol_or_id_a, - asset_symbol_or_id_b, - limit, - start_id, - with_statistics ); -} - -vector database_api_impl::get_liquidity_pools_by_both_assets( - const std::string& asset_symbol_or_id_a, - const std::string& asset_symbol_or_id_b, - const optional& olimit, - const optional& ostart_id, - const optional& with_statistics )const -{ - uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; - - FC_ASSERT( _app_options, "Internal error" ); - const auto configured_limit = _app_options->api_limit_get_liquidity_pools; - FC_ASSERT( limit <= configured_limit, - "limit can not be greater than ${configured_limit}", - ("configured_limit", configured_limit) ); - - bool with_stats = ( with_statistics.valid() && *with_statistics ); - - vector results; - - asset_id_type asset_id_a = get_asset_from_string(asset_symbol_or_id_a)->id; - asset_id_type asset_id_b = get_asset_from_string(asset_symbol_or_id_b)->id; + asset_id_type asset_id_a = my->get_asset_from_string(asset_symbol_or_id_a)->id; + asset_id_type asset_id_b = my->get_asset_from_string(asset_symbol_or_id_b)->id; if( asset_id_a > asset_id_b ) std::swap( asset_id_a, asset_id_b ); - - liquidity_pool_id_type start_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); - - const auto& idx = _db.get_index_type().indices().get(); - auto lower_itr = idx.lower_bound( std::make_tuple( asset_id_a, asset_id_b, start_id ) ); - auto upper_itr = idx.upper_bound( std::make_tuple( asset_id_a, asset_id_b ) ); - - results.reserve( limit ); - for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) - { - results.emplace_back( extend_liquidity_pool( *lower_itr, with_stats ) ); - } - - return results; + return my->get_liquidity_pools_by_asset_x( + limit, + start_id, + with_statistics, + asset_id_a, + asset_id_b ); } vector> database_api::get_liquidity_pools( diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 06ddc05ffb..ed119b7107 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -136,31 +136,11 @@ class database_api_impl : public std::enable_shared_from_this uint32_t limit )const; // Liquidity pools - vector list_liquidity_pools( - const optional& limit, - const optional& start_id, - const optional& with_statistics )const; - vector get_liquidity_pools_by_asset_a( - const std::string& asset_symbol_or_id, - const optional& limit, - const optional& start_id, - const optional& with_statistics )const; - vector get_liquidity_pools_by_asset_b( - const std::string& asset_symbol_or_id, - const optional& limit, - const optional& start_id, - const optional& with_statistics )const; vector get_liquidity_pools_by_one_asset( const std::string& asset_symbol_or_id, const optional& limit, const optional& start_id, const optional& with_statistics )const; - vector get_liquidity_pools_by_both_assets( - const std::string& asset_symbol_or_id_a, - const std::string& asset_symbol_or_id_b, - const optional& limit, - const optional& start_id, - const optional& with_statistics )const; vector> get_liquidity_pools( const vector& ids, const optional& subscribe, @@ -291,12 +271,12 @@ class database_api_impl : public std::enable_shared_from_this } // template function to reduce duplicate code - template + template vector get_liquidity_pools_by_asset_x( - std::string asset_symbol_or_id, - optional olimit, - optional ostart_id, - optional with_statistics )const + const optional& olimit, + const optional& ostart_id, + const optional& with_statistics, + X... x )const { uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; @@ -310,13 +290,12 @@ class database_api_impl : public std::enable_shared_from_this vector results; - const asset_id_type asset_id = get_asset_from_string(asset_symbol_or_id)->id; - liquidity_pool_id_type start_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); - const auto& idx = _db.get_index_type().indices().get(); - auto lower_itr = idx.lower_bound( std::make_tuple( asset_id, start_id ) ); - auto upper_itr = idx.upper_bound( asset_id ); + const auto& idx = _db.get_index_type().indices().get(); + + auto lower_itr = idx.lower_bound( make_tuple_if_multiple( x..., start_id ) ); + auto upper_itr = call_end_or_upper_bound( idx, x... ); results.reserve( limit ); for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) From 34295d1690fa0a556cef9251e42cb9207823ffc9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 25 Jul 2022 11:14:59 +0000 Subject: [PATCH 141/338] Update order of members of application_options --- .../app/include/graphene/app/application.hpp | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index fdec652ca4..ea11966d7e 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -43,40 +43,40 @@ namespace graphene { namespace app { bool has_api_helper_indexes_plugin = false; bool has_market_history_plugin = false; - uint32_t api_limit_get_account_history_operations = 100; uint32_t api_limit_get_account_history = 100; - uint32_t api_limit_get_grouped_limit_orders = 101; - uint32_t api_limit_get_market_history = 200; - uint32_t api_limit_get_relative_account_history = 100; + uint32_t api_limit_get_account_history_operations = 100; uint32_t api_limit_get_account_history_by_operations = 100; + uint32_t api_limit_get_relative_account_history = 100; + uint32_t api_limit_get_market_history = 200; + uint32_t api_limit_get_trade_history = 100; + uint32_t api_limit_get_trade_history_by_sequence = 100; + uint32_t api_limit_get_liquidity_pool_history = 101; + uint32_t api_limit_get_top_markets = 100; + uint32_t api_limit_get_assets = 101; uint32_t api_limit_get_asset_holders = 100; uint32_t api_limit_get_key_references = 100; - uint32_t api_limit_get_htlc_by = 100; uint32_t api_limit_get_full_accounts = 50; uint32_t api_limit_get_full_accounts_lists = 500; uint32_t api_limit_get_full_accounts_subscribe = 100; uint32_t api_limit_get_top_voters = 200; - uint32_t api_limit_get_call_orders = 300; - uint32_t api_limit_get_settle_orders = 300; - uint32_t api_limit_get_assets = 101; uint32_t api_limit_get_limit_orders = 300; uint32_t api_limit_get_limit_orders_by_account = 101; + uint32_t api_limit_get_account_limit_orders = 101; + uint32_t api_limit_get_grouped_limit_orders = 101; uint32_t api_limit_get_order_book = 50; - uint32_t api_limit_list_htlcs = 100; + uint32_t api_limit_get_call_orders = 300; + uint32_t api_limit_get_settle_orders = 300; + uint32_t api_limit_get_collateral_bids = 100; uint32_t api_limit_lookup_accounts = 1000; uint32_t api_limit_lookup_witness_accounts = 1000; uint32_t api_limit_lookup_committee_member_accounts = 1000; uint32_t api_limit_lookup_vote_ids = 1000; - uint32_t api_limit_get_account_limit_orders = 101; - uint32_t api_limit_get_collateral_bids = 100; - uint32_t api_limit_get_top_markets = 100; - uint32_t api_limit_get_trade_history = 100; - uint32_t api_limit_get_trade_history_by_sequence = 100; + uint32_t api_limit_list_htlcs = 100; + uint32_t api_limit_get_htlc_by = 100; uint32_t api_limit_get_withdraw_permissions_by_giver = 101; uint32_t api_limit_get_withdraw_permissions_by_recipient = 101; uint32_t api_limit_get_tickets = 101; uint32_t api_limit_get_liquidity_pools = 101; - uint32_t api_limit_get_liquidity_pool_history = 101; uint32_t api_limit_get_samet_funds = 101; uint32_t api_limit_get_credit_offers = 101; From 6172efd62e14909282c05a467a109feee75ae140 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 25 Jul 2022 20:13:27 +0000 Subject: [PATCH 142/338] Add login_api::get_config API --- libraries/app/api.cpp | 16 ++---- libraries/app/application.cpp | 2 + libraries/app/include/graphene/app/api.hpp | 6 ++- .../app/include/graphene/app/application.hpp | 44 +++++++++++++++ tests/common/database_fixture.cpp | 5 ++ tests/tests/login_api_tests.cpp | 53 +++++++++++++++++++ 6 files changed, 114 insertions(+), 12 deletions(-) create mode 100644 tests/tests/login_api_tests.cpp diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 4b6ed994c4..1125330f35 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -25,21 +25,10 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include "database_api_helper.hxx" #include -#include #include #include @@ -85,6 +74,11 @@ namespace graphene { namespace app { return true; } + application_options login_api::get_config() const + { + return _app.get_options(); + } + void login_api::enable_api( const std::string& api_name ) { if( api_name == "database_api" ) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 714d1d3a77..a6432b7919 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -1388,3 +1388,5 @@ const application_options& application::get_options() const // namespace detail } } + +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::app::application_options ) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 7ebb43a224..7ffb52fa67 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -26,7 +26,6 @@ #include #include -#include #include #include @@ -666,6 +665,10 @@ namespace graphene { namespace app { * Other APIs may not be accessible until the client has sucessfully authenticated. */ bool login(const string& user, const string& password); + + /// @brief Retrieve configured application options + application_options get_config() const; + /// @brief Retrieve the network block API fc::api block()const; /// @brief Retrieve the network broadcast API @@ -777,6 +780,7 @@ FC_API(graphene::app::custom_operations_api, ) FC_API(graphene::app::login_api, (login) + (get_config) (block) (network_broadcast) (database) diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index ea11966d7e..a99860356f 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -167,3 +167,47 @@ namespace graphene { namespace app { }; } } + +FC_REFLECT( graphene::app::application_options, + ( enable_subscribe_to_all ) + ( has_api_helper_indexes_plugin ) + ( has_market_history_plugin ) + ( api_limit_get_account_history ) + ( api_limit_get_account_history_operations ) + ( api_limit_get_account_history_by_operations ) + ( api_limit_get_relative_account_history ) + ( api_limit_get_market_history ) + ( api_limit_get_trade_history ) + ( api_limit_get_trade_history_by_sequence ) + ( api_limit_get_liquidity_pool_history ) + ( api_limit_get_top_markets ) + ( api_limit_get_assets ) + ( api_limit_get_asset_holders ) + ( api_limit_get_key_references ) + ( api_limit_get_full_accounts ) + ( api_limit_get_full_accounts_lists ) + ( api_limit_get_full_accounts_subscribe ) + ( api_limit_get_top_voters ) + ( api_limit_get_limit_orders ) + ( api_limit_get_limit_orders_by_account ) + ( api_limit_get_account_limit_orders ) + ( api_limit_get_grouped_limit_orders ) + ( api_limit_get_order_book ) + ( api_limit_get_call_orders ) + ( api_limit_get_settle_orders ) + ( api_limit_get_collateral_bids ) + ( api_limit_lookup_accounts ) + ( api_limit_lookup_witness_accounts ) + ( api_limit_lookup_committee_member_accounts ) + ( api_limit_lookup_vote_ids ) + ( api_limit_list_htlcs ) + ( api_limit_get_htlc_by ) + ( api_limit_get_withdraw_permissions_by_giver ) + ( api_limit_get_withdraw_permissions_by_recipient ) + ( api_limit_get_tickets ) + ( api_limit_get_liquidity_pools ) + ( api_limit_get_samet_funds ) + ( api_limit_get_credit_offers ) + ) + +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::app::application_options ) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index ac7a74cd4a..436f538c32 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -306,6 +306,11 @@ std::shared_ptr database_fixture_base::in fc::set_option( options, "api-limit-get-full-accounts", (uint32_t)200 ); fc::set_option( options, "api-limit-get-full-accounts-lists", (uint32_t)120 ); } + if( fixture.current_suite_name == "login_api_tests" + && fixture.current_test_name =="get_config_test" ) + { + fc::set_option( options, "api-limit-get-full-accounts-subscribe", (uint32_t)120 ); + } // add account tracking for ahplugin for special test case with track-account enabled if( !options.count("track-account") && fixture.current_test_name == "track_account") { diff --git a/tests/tests/login_api_tests.cpp b/tests/tests/login_api_tests.cpp new file mode 100644 index 0000000000..558452c8f9 --- /dev/null +++ b/tests/tests/login_api_tests.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE(login_api_tests, database_fixture) + +BOOST_AUTO_TEST_CASE( get_config_test ) +{ try { + auto default_opt = graphene::app::application_options::get_default(); + auto opt = app.get_options(); + + graphene::app::login_api login_api1( app ); + auto config = login_api1.get_config(); + + BOOST_CHECK_EQUAL( default_opt.api_limit_get_call_orders, config.api_limit_get_call_orders ); + BOOST_CHECK_EQUAL( opt.api_limit_get_call_orders, config.api_limit_get_call_orders ); + + BOOST_CHECK_EQUAL( default_opt.api_limit_get_full_accounts_subscribe, uint32_t(100) ); + BOOST_CHECK_EQUAL( opt.api_limit_get_full_accounts_subscribe, uint32_t(120) ); + BOOST_CHECK_EQUAL( config.api_limit_get_full_accounts_subscribe, uint32_t(120) ); + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +BOOST_AUTO_TEST_SUITE_END() From dbef8ef5faec001c9769a82328364dd4ab839213 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 25 Jul 2022 20:20:07 +0000 Subject: [PATCH 143/338] Fix a code smell --- libraries/protocol/include/graphene/protocol/object_id.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index 585ddc96d0..e414f6ddae 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -66,7 +66,7 @@ namespace graphene { namespace db { template< typename T > bool is() const { - return (number >> 48) == ((uint64_t)(T::space_id << 8) | (T::type_id)); + return (number >> 48) == ( ((uint64_t)(((uint64_t)T::space_id) << 8)) | ((uint64_t)(T::type_id)) ); } template< typename T > From 0b8a270328ea82393b8394253ce4f626d53de7f9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 26 Jul 2022 17:42:44 +0000 Subject: [PATCH 144/338] Fix code smells --- .../custom_operations/custom_objects.hpp | 20 +++++++++---------- tests/tests/custom_operations.cpp | 2 +- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp index b5f6684c89..6f32d8ed31 100644 --- a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp +++ b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp @@ -30,20 +30,18 @@ namespace graphene { namespace custom_operations { using namespace chain; -#ifndef CUSTOM_OPERATIONS_SPACE_ID -#define CUSTOM_OPERATIONS_SPACE_ID 7 -#endif +constexpr uint8_t CUSTOM_OPERATIONS_SPACE_ID = 7; -#define CUSTOM_OPERATIONS_MAX_KEY_SIZE (200) +constexpr uint16_t CUSTOM_OPERATIONS_MAX_KEY_SIZE = 200; -enum types { +enum class custom_operations_object_types { account_map = 0 }; struct account_storage_object : public abstract_object { static constexpr uint8_t space_id = CUSTOM_OPERATIONS_SPACE_ID; - static constexpr uint8_t type_id = account_map; + static constexpr uint8_t type_id = static_cast( custom_operations_object_types::account_map ); account_id_type account; string catalog; @@ -53,7 +51,7 @@ struct account_storage_object : public abstract_object struct by_account_catalog_key; -typedef multi_index_container< +using account_storage_multi_idx_type = multi_index_container< account_storage_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -65,14 +63,14 @@ typedef multi_index_container< > > > -> account_storage_multi_index_type; +>; -typedef generic_index account_storage_index; +using account_storage_index = generic_index; -using account_storage_id_type = object_id; +using account_storage_id_type = object_id; } } //graphene::custom_operations FC_REFLECT_DERIVED( graphene::custom_operations::account_storage_object, (graphene::db::object), (account)(catalog)(key)(value)) -FC_REFLECT_ENUM( graphene::custom_operations::types, (account_map)) +FC_REFLECT_ENUM( graphene::custom_operations::custom_operations_object_types, (account_map)) diff --git a/tests/tests/custom_operations.cpp b/tests/tests/custom_operations.cpp index 8ff2cd1f7a..473d3edd01 100644 --- a/tests/tests/custom_operations.cpp +++ b/tests/tests/custom_operations.cpp @@ -55,7 +55,7 @@ void map_operation(flat_map>& pairs, bool remove, strin store.catalog = catalog; auto packed = fc::raw::pack(store); - packed.insert(packed.begin(), types::account_map); + packed.insert(packed.begin(), account_storage_object::type_id); op.payer = account; op.data = packed; From e1ef9d0651e3485fa9ed0c4ce2601ca4399d8d92 Mon Sep 17 00:00:00 2001 From: Abit Date: Wed, 27 Jul 2022 00:35:38 +0200 Subject: [PATCH 145/338] Update wording about API in README --- README.md | 52 +++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 77aa799931..9acb5b57c9 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Visit [Awesome BitShares](https://github.com/bitshares/awesome-bitshares) to fin * [Getting Started](#getting-started) * [Support](#support) * [Using the API](#using-the-api) -* [Accessing restrictable node API's](#accessing-restrictable-node-apis) +* [Accessing restrictable node API sets](#accessing-restrictable-node-api-sets) * [FAQ](#faq) * [License](#license) @@ -205,12 +205,12 @@ Using the API ### Node API -The `witness_node` software provides several different API's, known as *node API*. +The `witness_node` software provides several different API sets, known as *node API*. -Each API has its own ID and a name. -When running `witness_node` with RPC connection enabled, initially two API's are available: -* API 0 has name *"database"*, it provides read-only access to the database, -* API 1 has name *"login"*, it is used to login and gain access to additional, restrictable API's. +Each API set has its own ID and a name. +When running `witness_node` with RPC connection enabled, initially two API sets are available: +* API set with ID `0` has name *"database"*, it provides read-only access to the database, +* API set with ID `1` has name *"login"*, it is used to login and gain access to additional, restrictable API sets. Here is an example using `wscat` package from `npm` for websockets: @@ -219,26 +219,26 @@ Here is an example using `wscat` package from `npm` for websockets: > {"id":1, "method":"call", "params":[0,"get_accounts",[["1.2.0"]]]} < {"id":1,"result":[{"id":"1.2.0","annotations":[],"membership_expiration_date":"1969-12-31T23:59:59","registrar":"1.2.0","referrer":"1.2.0","lifetime_referrer":"1.2.0","network_fee_percentage":2000,"lifetime_referrer_fee_percentage":8000,"referrer_rewards_percentage":0,"name":"committee-account","owner":{"weight_threshold":1,"account_auths":[],"key_auths":[],"address_auths":[]},"active":{"weight_threshold":6,"account_auths":[["1.2.5",1],["1.2.6",1],["1.2.7",1],["1.2.8",1],["1.2.9",1],["1.2.10",1],["1.2.11",1],["1.2.12",1],["1.2.13",1],["1.2.14",1]],"key_auths":[],"address_auths":[]},"options":{"memo_key":"GPH1111111111111111111111111111111114T1Anm","voting_account":"1.2.0","num_witness":0,"num_committee":0,"votes":[],"extensions":[]},"statistics":"2.7.0","whitelisting_accounts":[],"blacklisting_accounts":[]}]} -We can do the same thing using an HTTP client such as `curl` for API's which do not require login or other session state: +We can do the same thing using an HTTP client such as `curl` for APIs which do not require login or other session state: $ curl --data '{"jsonrpc": "2.0", "method": "call", "params": [0, "get_accounts", [["1.2.0"]]], "id": 1}' http://127.0.0.1:8090/ {"id":1,"result":[{"id":"1.2.0","annotations":[],"membership_expiration_date":"1969-12-31T23:59:59","registrar":"1.2.0","referrer":"1.2.0","lifetime_referrer":"1.2.0","network_fee_percentage":2000,"lifetime_referrer_fee_percentage":8000,"referrer_rewards_percentage":0,"name":"committee-account","owner":{"weight_threshold":1,"account_auths":[],"key_auths":[],"address_auths":[]},"active":{"weight_threshold":6,"account_auths":[["1.2.5",1],["1.2.6",1],["1.2.7",1],["1.2.8",1],["1.2.9",1],["1.2.10",1],["1.2.11",1],["1.2.12",1],["1.2.13",1],["1.2.14",1]],"key_auths":[],"address_auths":[]},"options":{"memo_key":"GPH1111111111111111111111111111111114T1Anm","voting_account":"1.2.0","num_witness":0,"num_committee":0,"votes":[],"extensions":[]},"statistics":"2.7.0","whitelisting_accounts":[],"blacklisting_accounts":[]}]} -When using an HTTP client, the API ID can be replaced by the API name, E.G. +When using an HTTP client, the API set ID can be replaced by the API set name, E.G. $ curl --data '{"jsonrpc": "2.0", "method": "call", "params": ["database", "get_accounts", [["1.2.0"]]], "id": 1}' http://127.0.0.1:8090/ -The definition of all node API's is available in the source code files including +The definition of all node APIs is available in the source code files including [database_api.hpp](https://github.com/bitshares/bitshares-core/blob/master/libraries/app/include/graphene/app/database_api.hpp) and [api.hpp](https://github.com/bitshares/bitshares-core/blob/master/libraries/app/include/graphene/app/api.hpp). Corresponding documentation can be found in Doxygen: * [database API](https://doxygen.bitshares.org/classgraphene_1_1app_1_1database__api.html) -* [other API's](https://doxygen.bitshares.org/namespacegraphene_1_1app.html) +* [other APIs](https://doxygen.bitshares.org/namespacegraphene_1_1app.html) ### Wallet API -The `cli_wallet` program can also be configured to serve **all of its commands** as API's, known as *wallet API*. +The `cli_wallet` program can also be configured to serve **all of its commands** as APIs, known as *wallet API*. Start `cli_wallet` with RPC connection enabled: @@ -246,8 +246,8 @@ Start `cli_wallet` with RPC connection enabled: Access the wallet API using an HTTP client: - $ curl --data '{"jsonrpc": "2.0", "method": "info", "params": [], "id": 1}' http://127.0.0.1:8091/rpc - $ curl --data '{"jsonrpc": "2.0", "method": "get_account", "params": ["1.2.0"], "id": 1}' http://127.0.0.1:8091/rpc + $ curl --data '{"jsonrpc": "2.0", "method": "info", "params": [], "id": 1}' http://127.0.0.1:8091/ + $ curl --data '{"jsonrpc": "2.0", "method": "get_account", "params": ["1.2.0"], "id": 1}' http://127.0.0.1:8091/ Note: The syntax to access wallet API is a bit different than accessing node API. @@ -256,12 +256,12 @@ Note: The syntax to access wallet API is a bit different than accessing node API * When using wallet API, sensitive data E.G. the wallet password and private keys is transmitted as plain text, thus may be vulnerable to network sniffing. It is recommended that only use wallet API with localhost, or in a clean network, and / or use `--rpc-tls-endpoint` parameter to only serve wallet API via secure connections. -Accessing restrictable node API's ---------------------------------- +Accessing restrictable node API sets +------------------------------------ -You can restrict node API's to particular users by specifying an `api-access` file in `config.ini` -or by using the `--api-access /full/path/to/api-access.json` startup node command. Here is an example `api-access` file which allows -user `bytemaster` with password `supersecret` to access four different API's, while allowing any other user to access the three public API's +You can restrict node API sets to particular users by specifying an `api-access` file in `config.ini` +or by using the `--api-access /full/path/to/api-access.json` command line option on node startup. Here is an example `api-access` file which allows +user `bytemaster` with password `supersecret` to access four different API sets, while allowing any other user to access the three public API sets necessary to use the node: { @@ -286,25 +286,27 @@ necessary to use the node: ] } +Note: the `login` API set is always accessible. + Passwords are stored in `base64` as salted `sha256` hashes. A simple Python script, [`saltpass.py`](https://github.com/bitshares/bitshares-core/blob/master/programs/witness_node/saltpass.py) is avaliable to obtain hash and salt values from a password. A single asterisk `"*"` may be specified as username or password hash to accept any value. -With the above configuration, here is an example of how to call `add_node` from the `network_node` API: +With the above configuration, here is an example of how to call the `add_node` API from the `network_node` API set: {"id":1, "method":"call", "params":[1,"login",["bytemaster", "supersecret"]]} {"id":2, "method":"call", "params":[1,"network_node",[]]} {"id":3, "method":"call", "params":[2,"add_node",["127.0.0.1:9090"]]} -Note, the call to `network_node` is necessary to obtain the correct API identifier for the network API. It is not guaranteed that the network API identifier will always be `2`. +Note, the call to `network_node` is necessary to obtain the correct API set ID for the `network_node` API set. It is not guaranteed that the API set ID for the `network_node` API set will always be `2`. -The restricted API's are accessible via HTTP too using *basic access authentication*. E.G. +The restricted API sets are accessible via HTTP too using *basic access authentication*. E.G. $ curl --data '{"jsonrpc": "2.0", "method": "call", "params": ["network_node", "add_node", ["127.0.0.1:9090"]], "id": 1}' http://bytemaster:supersecret@127.0.0.1:8090/ Our `doxygen` documentation contains the most up-to-date information -about API's for the [node](https://doxygen.bitshares.org/namespacegraphene_1_1app.html) and the +about APIs for the [node](https://doxygen.bitshares.org/namespacegraphene_1_1app.html) and the [wallet](https://doxygen.bitshares.org/classgraphene_1_1wallet_1_1wallet__api.html). @@ -332,7 +334,7 @@ FAQ - Is there a way to access methods which require login over HTTP? - Yes. Most of the methods can be accessed by specifying the API name instead of an API ID. If an API is protected by a username and a password, it can be accessed by using *basic access authentication*. Please check the ["Accessing restrictable node API's"](#accessing-restrictable-node-apis) section for more info. + Yes. Most of the methods can be accessed by specifying the API name instead of an API ID. If an API is protected by a username and a password, it can be accessed by using *basic access authentication*. Please check the ["Accessing restrictable node API sets"](#accessing-restrictable-node-api-sets) section for more info. However, HTTP is not really designed for "server push" notifications, and we would have to figure out a way to queue notifications for a polling client. Websockets solves this problem. If you need to access the stateful methods, use Websockets. @@ -366,8 +368,8 @@ FAQ - How do I get the `network_add_nodes` command to work? Why is it so complicated? - You need to follow the instructions in the ["Accessing restrictable node API's"](#accessing-restrictable-node-apis) section to - allow a username/password access to the `network_node` API. Then you need + You need to follow the instructions in the ["Accessing restrictable node API sets"](#accessing-restrictable-node-api-sets) section to + allow a username/password access to the `network_node` API set. Then you need to pass the username/password to the `cli_wallet` on the command line. It's set up this way so that the default configuration is secure even if the RPC port is From 92956890a59bb8d609b502fde28ccc40481a8038 Mon Sep 17 00:00:00 2001 From: Abit Date: Wed, 27 Jul 2022 14:46:40 +0200 Subject: [PATCH 146/338] Rename the build-docker workflow --- .github/workflows/build-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 8a451332dd..7af0459012 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -1,4 +1,4 @@ -name: Build and push to DockerHub +name: Docker on: [ push, pull_request ] jobs: docker: From 46c7a1b0360de21e5293a13711ba8c04986772cc Mon Sep 17 00:00:00 2001 From: Abit Date: Wed, 27 Jul 2022 14:51:17 +0200 Subject: [PATCH 147/338] Add Docker build status badges to README --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 9acb5b57c9..a582191ff9 100644 --- a/README.md +++ b/README.md @@ -19,10 +19,10 @@ Visit [Awesome BitShares](https://github.com/bitshares/awesome-bitshares) to fin |Branch|Build Status| |---|---| -|`master`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Amaster)| -|`develop`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Adevelop)| -|`hardfork`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Ahardfork)| -|`testnet`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Atestnet)| +|`master`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Docker/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A%22Docker%22+branch%3Amaster)| +|`develop`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Docker/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A%22Docker%22+branch%3Adevelop)| +|`hardfork`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Docker/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A%22Docker%22+branch%3Ahardfork)| +|`testnet`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Docker/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A%22Docker%22+branch%3Atestnet)| |`master` of `bitshares-fc`|[![](https://github.com/bitshares/bitshares-fc/workflows/macOS/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"macOS"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-fc/workflows/Ubuntu%20Debug/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-fc/workflows/Ubuntu%20Release/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"Ubuntu+Release"+branch%3Amaster)| Getting Started From 8105a3f52cf2f84b30539b4488f3552d5244e801 Mon Sep 17 00:00:00 2001 From: Abit Date: Wed, 27 Jul 2022 18:14:30 +0200 Subject: [PATCH 148/338] Update default es-objects-max-mapping-depth to 10 --- libraries/plugins/es_objects/es_objects.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index fb3fee5776..0e7994867a 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -83,8 +83,9 @@ class es_objects_plugin_impl std::string index_prefix = "objects-"; - /// For the "index.mapping.depth.limit" setting in ES. The default value is 20. - uint16_t max_mapping_depth = 20; + /// For the "index.mapping.depth.limit" setting in ES whose default value is 20, + /// and need to be even smaller to not trigger the index.mapping.total_fields.limit error + uint16_t max_mapping_depth = 10; uint32_t start_es_after_block = 0; bool sync_db_on_startup = false; @@ -390,7 +391,8 @@ void es_objects_plugin::plugin_set_program_options( ("es-objects-index-prefix", boost::program_options::value(), "Add a prefix to the index(objects-)") ("es-objects-max-mapping-depth", boost::program_options::value(), - "The maximum index mapping depth (index.mapping.depth.limit) setting in ES (20)") + "Can not exceed the maximum index mapping depth (index.mapping.depth.limit) setting in ES, " + "and need to be even smaller to not trigger the index.mapping.total_fields.limit error (10)") ("es-objects-keep-only-current", boost::program_options::value(), "Deprecated. Please use the store-updates or no-delete options. " "Keep only current state of the objects(true)") From 8a3cb46b2fc57d7945e7dbca83b92f6ae026c284 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 29 Jul 2022 01:26:12 +0000 Subject: [PATCH 149/338] Refactor code and fix issues related to login_api --- libraries/app/api.cpp | 188 +++++++++++------- libraries/app/application.cpp | 38 ++-- libraries/app/include/graphene/app/api.hpp | 113 +++++++---- .../app/include/graphene/app/api_access.hpp | 9 +- 4 files changed, 216 insertions(+), 132 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 1125330f35..1174a5e9e8 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -41,6 +41,7 @@ template class fc::api; template class fc::api; template class fc::api; template class fc::api; +template class fc::api; template class fc::api; @@ -52,80 +53,57 @@ namespace graphene { namespace app { // Nothing to do } - bool login_api::login(const string& user, const string& password) + variant login_api::login(const optional& o_user, const optional& o_password) { + if( !o_user && !o_password ) + return uint32_t(1); // Note: hard code it here for backward compatibility + + FC_ASSERT( o_user.valid() && o_password.valid(), "Must provide both user and password" ); + string user = *o_user; + optional< api_access_info > acc = _app.get_api_access_info( user ); if( !acc.valid() ) - return false; + return logout(); if( acc->password_hash_b64 != "*" ) { std::string password_salt = fc::base64_decode( acc->password_salt_b64 ); std::string acc_password_hash = fc::base64_decode( acc->password_hash_b64 ); + string password = *o_password; fc::sha256 hash_obj = fc::sha256::hash( password + password_salt ); if( hash_obj.data_size() != acc_password_hash.length() ) - return false; + return logout(); if( memcmp( hash_obj.data(), acc_password_hash.c_str(), hash_obj.data_size() ) != 0 ) - return false; + return logout(); } - for( const std::string& api_name : acc->allowed_apis ) - enable_api( api_name ); + // Ideally, we should clean up the API sets that the previous user registered but the new user + // no longer has access to. + // However, the shared pointers to these objects are already saved elsewhere (in FC), + // so we are unable to clean up, so it does not make sense to reset the optional fields here. + + _allowed_apis = acc->allowed_apis; return true; } + bool login_api::logout() + { + // Ideally, we should clean up the API sets that the previous user registered. + // However, the shared pointers to these objects are already saved elsewhere (in FC), + // so we are unable to clean up, so it does not make sense to reset the optional fields here. + _allowed_apis.clear(); + return false; + } + application_options login_api::get_config() const { return _app.get_options(); } - void login_api::enable_api( const std::string& api_name ) + bool login_api::is_database_api_allowed() const { - if( api_name == "database_api" ) - { - _database_api = std::make_shared< database_api >( std::ref( *_app.chain_database() ), - &( _app.get_options() ) ); - } - else if( api_name == "block_api" ) - { - _block_api = std::make_shared< block_api >( std::ref( *_app.chain_database() ) ); - } - else if( api_name == "network_broadcast_api" ) - { - _network_broadcast_api = std::make_shared< network_broadcast_api >( std::ref( _app ) ); - } - else if( api_name == "history_api" ) - { - _history_api = std::make_shared< history_api >( _app ); - } - else if( api_name == "network_node_api" ) - { - _network_node_api = std::make_shared< network_node_api >( std::ref(_app) ); - } - else if( api_name == "crypto_api" ) - { - _crypto_api = std::make_shared< crypto_api >(); - } - else if( api_name == "asset_api" ) - { - _asset_api = std::make_shared< asset_api >( _app ); - } - else if( api_name == "orders_api" ) - { - _orders_api = std::make_shared< orders_api >( std::ref( _app ) ); - } - else if( api_name == "custom_operations_api" ) - { - if( _app.get_plugin( "custom_operations" ) ) - _custom_operations_api = std::make_shared< custom_operations_api >( std::ref( _app ) ); - } - else if( api_name == "debug_api" ) - { - // can only enable this API if the plugin was loaded - if( _app.get_plugin( "debug_witness" ) ) - _debug_api = std::make_shared< graphene::debug_witness::debug_api >( std::ref(_app) ); - } - return; + bool allowed = ( _allowed_apis.find("database_api") != _allowed_apis.end() ); + return allowed; } // block_api @@ -252,66 +230,132 @@ namespace graphene { namespace app { return _app.p2p_node()->set_advanced_node_parameters(params); } - fc::api login_api::network_broadcast()const + fc::api login_api::network_broadcast() { - FC_ASSERT(_network_broadcast_api); + bool allowed = ( _allowed_apis.find("network_broadcast_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_network_broadcast_api ) + { + _network_broadcast_api = std::make_shared< network_broadcast_api >( std::ref( _app ) ); + } return *_network_broadcast_api; } - fc::api login_api::block()const + fc::api login_api::block() { - FC_ASSERT(_block_api); + bool allowed = ( _allowed_apis.find("block_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_block_api ) + { + _block_api = std::make_shared< block_api >( std::ref( *_app.chain_database() ) ); + } return *_block_api; } - fc::api login_api::network_node()const + fc::api login_api::network_node() { - FC_ASSERT(_network_node_api); + bool allowed = ( _allowed_apis.find("network_node_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_network_node_api ) + { + _network_node_api = std::make_shared< network_node_api >( std::ref(_app) ); + } return *_network_node_api; } - fc::api login_api::database()const + fc::api login_api::database() { - FC_ASSERT(_database_api); + bool allowed = ( _allowed_apis.find("database_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_database_api ) + { + _database_api = std::make_shared< database_api >( std::ref( *_app.chain_database() ), + &( _app.get_options() ) ); + } return *_database_api; } - fc::api login_api::history() const + fc::api login_api::history() { - FC_ASSERT(_history_api); + bool allowed = ( _allowed_apis.find("history_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_history_api ) + { + _history_api = std::make_shared< history_api >( _app ); + } return *_history_api; } - fc::api login_api::crypto() const + fc::api login_api::crypto() { - FC_ASSERT(_crypto_api); + bool allowed = ( _allowed_apis.find("crypto_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_crypto_api ) + { + _crypto_api = std::make_shared< crypto_api >(); + } return *_crypto_api; } - fc::api login_api::asset() const + fc::api login_api::asset() { - FC_ASSERT(_asset_api); + bool allowed = ( _allowed_apis.find("asset_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_asset_api ) + { + _asset_api = std::make_shared< asset_api >( _app ); + } return *_asset_api; } - fc::api login_api::orders() const + fc::api login_api::orders() { - FC_ASSERT(_orders_api); + bool allowed = ( _allowed_apis.find("orders_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + if( !_orders_api ) + { + _orders_api = std::make_shared< orders_api >( std::ref( _app ) ); + } return *_orders_api; } - fc::api login_api::debug() const + fc::api login_api::debug() { - FC_ASSERT(_debug_api); + bool allowed = ( _allowed_apis.find("debug_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + // can only use this API set if the plugin was loaded + bool plugin_enabled = !!_app.get_plugin( "debug_witness" ); + FC_ASSERT( plugin_enabled, "The debug_witness plugin is not enabled" ); + if( ! _debug_api ) + { + _debug_api = std::make_shared< graphene::debug_witness::debug_api >( std::ref(_app) ); + } return *_debug_api; } - fc::api login_api::custom_operations() const + fc::api login_api::custom_operations() { - FC_ASSERT(_custom_operations_api); + bool allowed = ( _allowed_apis.find("custom_operations_api") != _allowed_apis.end() ); + FC_ASSERT( allowed, "Access denied" ); + // can only use this API set if the plugin was loaded + bool plugin_enabled = !!_app.get_plugin( "custom_operations" ); + FC_ASSERT( plugin_enabled, "The custom_operations plugin is not enabled" ); + if( !_custom_operations_api ) + { + _custom_operations_api = std::make_shared< custom_operations_api >( std::ref( _app ) ); + } return *_custom_operations_api; } + fc::api login_api::dummy() + { + if( !_dummy_api ) + { + _dummy_api = std::make_shared< dummy_api >(); + } + return *_dummy_api; + } + history_api::history_api(application& app) : _app(app) { // Nothing else to do diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index a6432b7919..427eddbb21 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -164,14 +164,6 @@ void application_impl::new_connection( const fc::http::websocket_connection_ptr& { auto wsc = std::make_shared(c, GRAPHENE_NET_MAX_NESTED_OBJECTS); auto login = std::make_shared( _self ); - login->enable_api("database_api"); - - wsc->register_api(login->database()); - wsc->register_api(fc::api(login)); - c->set_session_data( wsc ); - - std::string username = "*"; - std::string password = "*"; // Try to extract login information from "Authorization" header if present std::string auth = c->get_request_header("Authorization"); @@ -185,11 +177,21 @@ void application_impl::new_connection( const fc::http::websocket_connection_ptr& FC_ASSERT(parts.size() == 2); - username = parts[0]; - password = parts[1]; + const string& username = parts[0]; + const string& password = parts[1]; + login->login(username, password); } + else + login->login("", ""); - login->login(username, password); + // API set ID 0. Note: changing it may break client applications + if( login->is_database_api_allowed() ) + wsc->register_api(login->database()); + else + wsc->register_api(login->dummy()); + // API set ID 1. Note: changing it may break client applications + wsc->register_api(fc::api(login)); + c->set_session_data( wsc ); } void application_impl::reset_websocket_server() @@ -283,14 +285,12 @@ void application_impl::initialize(const fc::path& data_dir, shared_ptr; @@ -643,6 +651,7 @@ extern template class fc::api; extern template class fc::api; extern template class fc::api; extern template class fc::api; +extern template class fc::api; namespace graphene { namespace app { /** @@ -656,55 +665,75 @@ namespace graphene { namespace app { explicit login_api(application& a); /** - * @brief Authenticate to the RPC server - * @param user Username to login with - * @param password Password to login with - * @return True if logged in successfully; false otherwise + * @brief Authenticate to the RPC server, or retrieve the API set ID of the @a login API set + * @param user Username to login with, optional + * @param password Password to login with, optional + * @return @a true if to authenticate and logged in successfully, + * @a false if to authenticate and failed to log in, + * or the API set ID if to retrieve it * - * @note This must be called prior to requesting other APIs. - * Other APIs may not be accessible until the client has sucessfully authenticated. + * @note Provide both @p user and @p password to authenticate, + * or provide none of them (or @a null without quotes) to retrieve the API set ID + * of the @a login API set. + * @note This is called automatically for authentication when a HTTP or WebSocket connection is established, + * assuming credentials are provided with HTTP Basic authentication headers. + * @note When trying to authenticate again, even if failed to log in, already allocated API set IDs are + * still accessible. */ - bool login(const string& user, const string& password); + variant login(const optional& user, const optional& password); + + /// @brief Log out + /// @return @a false + /// @note Already allocated API set IDs are still accessible after calling this. + bool logout(); /// @brief Retrieve configured application options application_options get_config() const; - /// @brief Retrieve the network block API - fc::api block()const; - /// @brief Retrieve the network broadcast API - fc::api network_broadcast()const; - /// @brief Retrieve the database API - fc::api database()const; - /// @brief Retrieve the history API - fc::api history()const; - /// @brief Retrieve the network node API - fc::api network_node()const; - /// @brief Retrieve the cryptography API - fc::api crypto()const; - /// @brief Retrieve the asset API - fc::api asset()const; - /// @brief Retrieve the orders API - fc::api orders()const; - /// @brief Retrieve the debug API (if available) - fc::api debug()const; - /// @brief Retrieve the custom operations API - fc::api custom_operations()const; - - /// @brief Called to enable an API, not reflected. - void enable_api( const string& api_name ); - private: + /// @brief Retrieve the network block API set + fc::api block(); + /// @brief Retrieve the network broadcast API set + fc::api network_broadcast(); + /// @brief Retrieve the database API set + fc::api database(); + /// @brief Retrieve the history API set + fc::api history(); + /// @brief Retrieve the network node API set + fc::api network_node(); + /// @brief Retrieve the cryptography API set + fc::api crypto(); + /// @brief Retrieve the asset API set + fc::api asset(); + /// @brief Retrieve the orders API set + fc::api orders(); + /// @brief Retrieve the debug API set + fc::api debug(); + /// @brief Retrieve the custom operations API set + fc::api custom_operations(); + + /// @brief Retrieve a dummy API set, not reflected + fc::api dummy(); + + /// @brief Check whether database_api is allowed, not reflected + /// @return @a true if database_api is allowed, @a false otherwise + bool is_database_api_allowed() const; + private: application& _app; - optional< fc::api > _block_api; - optional< fc::api > _database_api; - optional< fc::api > _network_broadcast_api; - optional< fc::api > _network_node_api; - optional< fc::api > _history_api; - optional< fc::api > _crypto_api; - optional< fc::api > _asset_api; - optional< fc::api > _orders_api; + + flat_set< string > _allowed_apis; + + optional< fc::api > _block_api; + optional< fc::api > _database_api; + optional< fc::api > _network_broadcast_api; + optional< fc::api > _network_node_api; + optional< fc::api > _history_api; + optional< fc::api > _crypto_api; + optional< fc::api > _asset_api; + optional< fc::api > _orders_api; optional< fc::api > _debug_api; - optional< fc::api > _custom_operations_api; + optional< fc::api > _custom_operations_api; + optional< fc::api > _dummy_api; }; }} // graphene::app @@ -778,8 +807,12 @@ FC_API(graphene::app::orders_api, FC_API(graphene::app::custom_operations_api, (get_storage_info) ) +FC_API(graphene::app::dummy_api, + (dummy) + ) FC_API(graphene::app::login_api, (login) + (logout) (get_config) (block) (network_broadcast) diff --git a/libraries/app/include/graphene/app/api_access.hpp b/libraries/app/include/graphene/app/api_access.hpp index 8dbad1a1d9..46dc4925a2 100644 --- a/libraries/app/include/graphene/app/api_access.hpp +++ b/libraries/app/include/graphene/app/api_access.hpp @@ -25,6 +25,8 @@ #include +#include + #include #include #include @@ -33,9 +35,14 @@ namespace graphene { namespace app { struct api_access_info { + api_access_info() = default; + api_access_info( const std::string& hash, const std::string& salt ) + : password_hash_b64(hash), password_salt_b64(salt) + { /* Nothing else to do */ } + std::string password_hash_b64; std::string password_salt_b64; - std::vector< std::string > allowed_apis; + boost::container::flat_set< std::string > allowed_apis; }; struct api_access From a666963097c9ba81161e8e20f970b9fc69a488c5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 29 Jul 2022 05:35:48 +0000 Subject: [PATCH 150/338] Add login_api::get_available_api_sets API --- libraries/app/api.cpp | 5 +++++ libraries/app/include/graphene/app/api.hpp | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 1174a5e9e8..7dc09058c2 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -100,6 +100,11 @@ namespace graphene { namespace app { return _app.get_options(); } + flat_set login_api::get_available_api_sets() const + { + return _allowed_apis; + } + bool login_api::is_database_api_allowed() const { bool allowed = ( _allowed_apis.find("database_api") != _allowed_apis.end() ); diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index d6ff3cd52c..1abb1eeda1 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -690,6 +690,9 @@ namespace graphene { namespace app { /// @brief Retrieve configured application options application_options get_config() const; + /// @brief Retrieve a list of API sets that the user has access to + flat_set get_available_api_sets() const; + /// @brief Retrieve the network block API set fc::api block(); /// @brief Retrieve the network broadcast API set @@ -814,6 +817,7 @@ FC_API(graphene::app::login_api, (login) (logout) (get_config) + (get_available_api_sets) (block) (network_broadcast) (database) From 531c6119e90c542e0997e8ab902908c412325ece Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 29 Jul 2022 16:38:20 +0000 Subject: [PATCH 151/338] Add tests for login_api --- tests/common/database_fixture.cpp | 54 +++++++++++++++++++++++++-- tests/tests/login_api_tests.cpp | 62 +++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 3 deletions(-) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 436f538c32..2f4970b1e6 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -306,10 +307,57 @@ std::shared_ptr database_fixture_base::in fc::set_option( options, "api-limit-get-full-accounts", (uint32_t)200 ); fc::set_option( options, "api-limit-get-full-accounts-lists", (uint32_t)120 ); } - if( fixture.current_suite_name == "login_api_tests" - && fixture.current_test_name =="get_config_test" ) + + if( fixture.current_suite_name == "login_api_tests" ) { - fc::set_option( options, "api-limit-get-full-accounts-subscribe", (uint32_t)120 ); + if( fixture.current_test_name =="get_config_test" ) + fc::set_option( options, "api-limit-get-full-accounts-subscribe", (uint32_t)120 ); + if( fixture.current_test_name =="login_test" ) + { + // bytemaster/supersecret, user2/superpassword2 + string api_access_config = R"( + { + "permission_map" : + [ + [ + "bytemaster", + { + "password_hash_b64" : "9e9GF7ooXVb9k4BoSfNIPTelXeGOZ5DrgOYMj94elaY=", + "password_salt_b64" : "INDdM6iCi/8=", + "allowed_apis" : ["database_api", "network_broadcast_api", "history_api", "network_node_api", + "asset_api", "crypto_api", "block_api", "orders_api", "custom_operations_api" + "debug_api"] + } + ], + [ + "user2", + { + "password_hash_b64" : "myadjRISnFOWn2TTd91zqbY50q0w2j/oJGlcdQkUB0Y=", + "password_salt_b64" : "Zb8JrQDKNIQ=", + "allowed_apis" : ["history_api"] + } + ], + [ + "*", + { + "password_hash_b64" : "*", + "password_salt_b64" : "*", + "allowed_apis" : ["database_api", "network_broadcast_api", "history_api"] + } + ] + ] + } + )"; + + fc::json::save_to_file( fc::json::from_string( api_access_config ), + fixture.data_dir.path() / "api-access.json" ); + fc::set_option( options, "api-access", + boost::filesystem::path(fixture.data_dir.path() / "api-access.json") ); + + fixture.app.register_plugin(true); + fixture.app.register_plugin(true); + fc::set_option( options, "custom-operations-start-block", uint32_t(1) ); + } } // add account tracking for ahplugin for special test case with track-account enabled diff --git a/tests/tests/login_api_tests.cpp b/tests/tests/login_api_tests.cpp index 558452c8f9..a997349778 100644 --- a/tests/tests/login_api_tests.cpp +++ b/tests/tests/login_api_tests.cpp @@ -50,4 +50,66 @@ BOOST_AUTO_TEST_CASE( get_config_test ) } FC_CAPTURE_LOG_AND_RETHROW( (0) ) } +BOOST_AUTO_TEST_CASE( login_test ) +{ try { + graphene::app::login_api login_api1( app ); + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 0u ); + BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); + + login_api1.login("",""); // */* + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 3u ); + BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); + auto db_api1 = login_api1.database(); + auto his_api1 = login_api1.history(); + auto nb_api1 = login_api1.network_broadcast(); + + login_api1.login("user2","superpassword2"); + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 1u ); + BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); + BOOST_CHECK_THROW( login_api1.database(), fc::exception ); + auto his_api2 = login_api1.history(); + BOOST_CHECK( his_api1 == his_api2 ); + + login_api1.login("user2","superpassword3"); // wrong password + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 0u ); + BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); + BOOST_CHECK_THROW( login_api1.database(), fc::exception ); + BOOST_CHECK_THROW( login_api1.history(), fc::exception ); + + login_api1.login("bytemaster","supersecret4"); // wrong password + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 0u ); + BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); + BOOST_CHECK_THROW( login_api1.database(), fc::exception ); + BOOST_CHECK_THROW( login_api1.history(), fc::exception ); + + login_api1.login("bytemaster","supersecret"); + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 10u ); + auto nn_api3 = login_api1.network_node(); + auto db_api3 = login_api1.database(); + auto his_api3 = login_api1.history(); + auto ord_api3 = login_api1.orders(); + auto nb_api3 = login_api1.network_broadcast(); + auto as_api3 = login_api1.asset(); + auto cr_api3 = login_api1.crypto(); + auto blk_api3 = login_api1.block(); + auto co_api3 = login_api1.custom_operations(); + auto dbg_api3 = login_api1.debug(); + BOOST_CHECK( his_api1 == his_api3 ); + + login_api1.logout(); + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 0u ); + BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); + BOOST_CHECK_THROW( login_api1.database(), fc::exception ); + BOOST_CHECK_THROW( login_api1.history(), fc::exception ); + + login_api1.login("bytemaster2","randompassword"); // */* + BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 3u ); + BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); + auto db_api4 = login_api1.database(); + auto his_api4 = login_api1.history(); + auto nb_api4 = login_api1.network_broadcast(); + BOOST_CHECK( his_api1 == his_api4 ); + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + BOOST_AUTO_TEST_SUITE_END() From 8c93d58d38db8debd8c39467169fa01ea147e1d3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 29 Jul 2022 17:15:15 +0000 Subject: [PATCH 152/338] Update tests --- tests/tests/login_api_tests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests/login_api_tests.cpp b/tests/tests/login_api_tests.cpp index a997349778..513d92a2d2 100644 --- a/tests/tests/login_api_tests.cpp +++ b/tests/tests/login_api_tests.cpp @@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE( login_test ) BOOST_CHECK_THROW( login_api1.database(), fc::exception ); BOOST_CHECK_THROW( login_api1.history(), fc::exception ); - login_api1.login("bytemaster","supersecret4"); // wrong password + login_api1.login("bytemaster","looooooooooooooooongpassword"); // wrong password BOOST_CHECK_EQUAL( login_api1.get_available_api_sets().size(), 0u ); BOOST_CHECK_THROW( login_api1.network_node(), fc::exception ); BOOST_CHECK_THROW( login_api1.database(), fc::exception ); From 717f8927aad1b2d5a7a6b013c32f493323bb1bdb Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 29 Jul 2022 19:33:16 +0000 Subject: [PATCH 153/338] Update login_api::get_config API to require login --- libraries/app/api.cpp | 2 ++ tests/tests/login_api_tests.cpp | 3 +++ 2 files changed, 5 insertions(+) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 7dc09058c2..7ced0281c0 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -97,6 +97,8 @@ namespace graphene { namespace app { application_options login_api::get_config() const { + bool allowed = !_allowed_apis.empty(); + FC_ASSERT( allowed, "Access denied, please login" ); return _app.get_options(); } diff --git a/tests/tests/login_api_tests.cpp b/tests/tests/login_api_tests.cpp index 513d92a2d2..5c66087659 100644 --- a/tests/tests/login_api_tests.cpp +++ b/tests/tests/login_api_tests.cpp @@ -39,6 +39,9 @@ BOOST_AUTO_TEST_CASE( get_config_test ) auto opt = app.get_options(); graphene::app::login_api login_api1( app ); + BOOST_CHECK_THROW( login_api1.get_config(), fc::exception ); + + login_api1.login("",""); // */* auto config = login_api1.get_config(); BOOST_CHECK_EQUAL( default_opt.api_limit_get_call_orders, config.api_limit_get_call_orders ); From 0faac5e2d00375600138956b7ea0d493d41a0765 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 29 Jul 2022 19:44:10 +0000 Subject: [PATCH 154/338] Update docs for login_api::get_config API --- libraries/app/include/graphene/app/api.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 1abb1eeda1..521fe3e8d0 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -688,6 +688,7 @@ namespace graphene { namespace app { bool logout(); /// @brief Retrieve configured application options + /// @note It requires the user to be logged in and have access to at least one API set other than login_api. application_options get_config() const; /// @brief Retrieve a list of API sets that the user has access to From 351010b6a34a20864a872ffd5a5a2dc2995d5e2c Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 29 Jul 2022 20:10:54 +0000 Subject: [PATCH 155/338] Add api-node-info startup option and get_info API --- libraries/app/api.cpp | 5 +++++ libraries/app/application.cpp | 10 ++++++++++ libraries/app/application_impl.hxx | 3 +++ libraries/app/include/graphene/app/api.hpp | 4 ++++ libraries/app/include/graphene/app/application.hpp | 2 ++ tests/common/database_fixture.cpp | 3 +++ tests/tests/login_api_tests.cpp | 3 +++ 7 files changed, 30 insertions(+) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 7ced0281c0..657b4f424e 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -95,6 +95,11 @@ namespace graphene { namespace app { return false; } + string login_api::get_info() const + { + return _app.get_node_info(); + } + application_options login_api::get_config() const { bool allowed = !_allowed_apis.empty(); diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 427eddbb21..5628f6f475 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -268,6 +268,9 @@ void application_impl::initialize(const fc::path& data_dir, shared_ptrcount("api-node-info") > 0) + _node_info = _options->at("api-node-info").as(); + if( _options->count("api-access") > 0 ) { @@ -1166,6 +1169,8 @@ void application::set_program_options(boost::program_options::options_descriptio ("genesis-json", bpo::value(), "File to read Genesis State from") ("dbg-init-key", bpo::value(), "Block signing key to use for init witnesses, overrides genesis file, for debug") + ("api-node-info", bpo::value(), + "A string defined by the node operator, which can be retrieved via the login_api::get_info API") ("api-access", bpo::value(), "JSON file specifying API permissions") ("io-threads", bpo::value()->implicit_value(0), "Number of IO threads, default to 0 for auto-configuration") @@ -1386,6 +1391,11 @@ const application_options& application::get_options() const return my->_app_options; } +const string& application::get_node_info() const +{ + return my->_node_info; +} + // namespace detail } } diff --git a/libraries/app/application_impl.hxx b/libraries/app/application_impl.hxx index 4774dedc89..ec6d03db89 100644 --- a/libraries/app/application_impl.hxx +++ b/libraries/app/application_impl.hxx @@ -224,6 +224,9 @@ class application_impl : public net::node_delegate, public std::enable_shared_fr bool _is_finished_syncing = false; + /// A string defined by the node operator, which can be retrieved via the login_api::get_info API + string _node_info; + fc::serial_valve valve; }; diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 521fe3e8d0..1647d5e78f 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -687,6 +687,9 @@ namespace graphene { namespace app { /// @note Already allocated API set IDs are still accessible after calling this. bool logout(); + /// @brief Retrive the node info string configured by the node operator + string get_info() const; + /// @brief Retrieve configured application options /// @note It requires the user to be logged in and have access to at least one API set other than login_api. application_options get_config() const; @@ -817,6 +820,7 @@ FC_API(graphene::app::dummy_api, FC_API(graphene::app::login_api, (login) (logout) + (get_info) (get_config) (get_available_api_sets) (block) diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index a99860356f..8d6ff2a672 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -156,6 +156,8 @@ namespace graphene { namespace app { std::shared_ptr elasticsearch_thread; + const string& get_node_info() const; + private: /// Add an available plugin void add_available_plugin( std::shared_ptr p ) const; diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 2f4970b1e6..33c4396814 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -311,7 +311,10 @@ std::shared_ptr database_fixture_base::in if( fixture.current_suite_name == "login_api_tests" ) { if( fixture.current_test_name =="get_config_test" ) + { + fc::set_option( options, "api-node-info", string("Test API node") ); fc::set_option( options, "api-limit-get-full-accounts-subscribe", (uint32_t)120 ); + } if( fixture.current_test_name =="login_test" ) { // bytemaster/supersecret, user2/superpassword2 diff --git a/tests/tests/login_api_tests.cpp b/tests/tests/login_api_tests.cpp index 5c66087659..b052ee7345 100644 --- a/tests/tests/login_api_tests.cpp +++ b/tests/tests/login_api_tests.cpp @@ -39,6 +39,9 @@ BOOST_AUTO_TEST_CASE( get_config_test ) auto opt = app.get_options(); graphene::app::login_api login_api1( app ); + + BOOST_CHECK( login_api1.get_info() == "Test API node" ); + BOOST_CHECK_THROW( login_api1.get_config(), fc::exception ); login_api1.login("",""); // */* From 1103a6ba696ca2e9234b3e1ec704e217cf157526 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 5 Aug 2022 23:24:39 +0000 Subject: [PATCH 156/338] Extend custom_operations_api::get_storage_info API --- libraries/app/api.cpp | 73 ++++++++++++++++--- libraries/app/application.cpp | 7 ++ libraries/app/include/graphene/app/api.hpp | 32 ++++++-- .../app/include/graphene/app/application.hpp | 2 + .../custom_operations/custom_objects.hpp | 30 ++++++++ 5 files changed, 127 insertions(+), 17 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 657b4f424e..70d0d04331 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -960,21 +960,72 @@ namespace graphene { namespace app { } vector custom_operations_api::get_storage_info( - const std::string& account_id_or_name, - const std::string& catalog)const + const optional& o_account_name_or_id, + const optional& catalog, + const optional& key, + const optional& limit, + const optional& start_id )const { auto plugin = _app.get_plugin("custom_operations"); - FC_ASSERT( plugin ); + FC_ASSERT( plugin, "The custom_operations plugin is not enabled" ); database_api_helper db_api_helper( _app ); - const account_id_type account_id = db_api_helper.get_account_from_string(account_id_or_name)->id; - vector results; - const auto& storage_index = _app.chain_database()->get_index_type(); - const auto& by_account_catalog_idx = storage_index.indices().get(); - auto range = by_account_catalog_idx.equal_range(make_tuple(account_id, catalog)); - for( const account_storage_object& aso : boost::make_iterator_range( range.first, range.second ) ) - results.push_back(aso); - return results; + const auto& storage_index = _app.chain_database()->get_index_type().indices(); + + if( o_account_name_or_id.valid() ) + { + const string& account_name_or_id = *o_account_name_or_id; + const account_id_type account_id = db_api_helper.get_account_from_string(account_name_or_id)->id; + if( catalog.valid() ) + { + if( key.valid() ) + return db_api_helper.get_objects_by_x< account_storage_object, + account_storage_id_type + >( &application_options::api_limit_get_storage_info, + storage_index.get(), + limit, start_id, account_id, *catalog, *key ); + else + return db_api_helper.get_objects_by_x< account_storage_object, + account_storage_id_type + >( &application_options::api_limit_get_storage_info, + storage_index.get(), + limit, start_id, account_id, *catalog ); + } + else + { + FC_ASSERT( !key.valid(), "Can not specify key if catalog is not specified" ); + return db_api_helper.get_objects_by_x< account_storage_object, + account_storage_id_type + >( &application_options::api_limit_get_storage_info, + storage_index.get(), + limit, start_id, account_id ); + } + } + else if( catalog.valid() ) + { + if( key.valid() ) + return db_api_helper.get_objects_by_x< account_storage_object, + account_storage_id_type + >( &application_options::api_limit_get_storage_info, + storage_index.get(), + limit, start_id, *catalog, *key ); + else + return db_api_helper.get_objects_by_x< account_storage_object, + account_storage_id_type + >( &application_options::api_limit_get_storage_info, + storage_index.get(), + limit, start_id, *catalog ); + } + else + { + FC_ASSERT( !key.valid(), "Can not specify key if catalog is not specified" ); + return db_api_helper.get_objects_by_x< account_storage_object, + account_storage_id_type + >( &application_options::api_limit_get_storage_info, + storage_index.get(), + limit, start_id ); + } + } } } // graphene::app diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 5628f6f475..50e10cfb1a 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -445,6 +445,10 @@ void application_impl::set_api_limit() { _app_options.api_limit_get_credit_offers = _options->at("api-limit-get-credit-offers").as(); } + if(_options->count("api-limit-get-storage-info") > 0) { + _app_options.api_limit_get_storage_info = + _options->at("api-limit-get-storage-info").as(); + } } graphene::chain::genesis_state_type application_impl::initialize_genesis_state() const @@ -1287,6 +1291,9 @@ void application::set_program_options(boost::program_options::options_descriptio ("api-limit-get-credit-offers", bpo::value()->default_value(default_opts.api_limit_get_credit_offers), "Set maximum limit value for database APIs which query for credit offers or credit deals") + ("api-limit-get-storage-info", + bpo::value()->default_value(default_opts.api_limit_get_storage_info), + "Set maximum limit value for APIs which query for account storage info") ; command_line_options.add(configuration_file_options); command_line_options.add_options() diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 1647d5e78f..6aa4181ed1 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -618,16 +618,36 @@ namespace graphene { namespace app { explicit custom_operations_api(application& app); /** - * @brief Get all stored objects of an account in a particular catalog + * @brief Get stored objects * - * @param account_name_or_id The account name or ID to get info from - * @param catalog Category classification. Each account can store multiple catalogs. + * @param account_name_or_id The account name or ID to get info from. Optional. + * @param catalog Category classification. Each account can store multiple catalogs. Optional. + * @param key Key classification. Each catalog can contain multiple keys. Optional. + * @param limit The limitation of items each query can fetch, not greater than the configured value of + * @a api_limit_get_storage_info + * @param start_id Start ID of stored object, fetch objects whose IDs are greater than or equal to this ID + * @return The stored objects found, ordered by their IDs * - * @return The vector of objects of the account or empty + * @note + * 1. By passing @a null to various optional arguments, this API can be used to query stored objects by + * a) account, catalog and key, or + * b) account and catalog, or + * c) account, or + * d) catalog and key, or + * e) catalog, or + * f) no condition. + * 2. If @p account_name_or_id is specified but cannot be tied to an account, an error will be returned. + * 3. @p limit can be omitted or be @a null, if so the default value of + * @ref application_options::api_limit_get_tickets will be used. + * 4. @p start_id can be omitted or be null, if so the api will return the "first page" of objects. + * 5. Can only omit one or more arguments in the end of the list, but not one or more in the middle. */ vector get_storage_info( - const std::string& account_name_or_id, - const std::string& catalog )const; + const optional& account_name_or_id = optional(), + const optional& catalog = optional(), + const optional& key = optional(), + const optional& limit = application_options::get_default().api_limit_get_storage_info, + const optional& start_id = optional() )const; private: application& _app; diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index 8d6ff2a672..594610e614 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -79,6 +79,7 @@ namespace graphene { namespace app { uint32_t api_limit_get_liquidity_pools = 101; uint32_t api_limit_get_samet_funds = 101; uint32_t api_limit_get_credit_offers = 101; + uint32_t api_limit_get_storage_info = 101; static constexpr application_options get_default() { @@ -210,6 +211,7 @@ FC_REFLECT( graphene::app::application_options, ( api_limit_get_liquidity_pools ) ( api_limit_get_samet_funds ) ( api_limit_get_credit_offers ) + ( api_limit_get_storage_info ) ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::app::application_options ) diff --git a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp index 6f32d8ed31..2a550ac46d 100644 --- a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp +++ b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp @@ -50,6 +50,10 @@ struct account_storage_object : public abstract_object }; struct by_account_catalog_key; +struct by_account_catalog; +struct by_account; +struct by_catalog_key; +struct by_catalog; using account_storage_multi_idx_type = multi_index_container< account_storage_object, @@ -61,6 +65,32 @@ using account_storage_multi_idx_type = multi_index_container< member< account_storage_object, string, &account_storage_object::catalog >, member< account_storage_object, string, &account_storage_object::key > > + >, + ordered_unique< tag, + composite_key< account_storage_object, + member< account_storage_object, account_id_type, &account_storage_object::account >, + member< account_storage_object, string, &account_storage_object::catalog >, + member< object, object_id_type, &object::id > + > + >, + ordered_unique< tag, + composite_key< account_storage_object, + member< account_storage_object, account_id_type, &account_storage_object::account >, + member< object, object_id_type, &object::id > + > + >, + ordered_unique< tag, + composite_key< account_storage_object, + member< account_storage_object, string, &account_storage_object::catalog >, + member< account_storage_object, string, &account_storage_object::key >, + member< object, object_id_type, &object::id > + > + >, + ordered_unique< tag, + composite_key< account_storage_object, + member< account_storage_object, string, &account_storage_object::catalog >, + member< object, object_id_type, &object::id > + > > > >; From 5ee1c9a7bc7e9d68a06f551e74bda16c7bd56c0f Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 6 Aug 2022 00:26:24 +0000 Subject: [PATCH 157/338] Add tests for extended get_storage_info API --- tests/common/database_fixture.cpp | 2 + tests/tests/custom_operations.cpp | 173 ++++++++++++++++++++++++++++-- 2 files changed, 164 insertions(+), 11 deletions(-) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 33c4396814..5eb7b4d545 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -440,6 +440,8 @@ std::shared_ptr database_fixture_base::in fixture.current_test_name == "custom_operations_account_storage_list_test") { fixture.app.register_plugin(true); fc::set_option( options, "custom-operations-start-block", uint32_t(1) ); + if( fixture.current_test_name == "custom_operations_account_storage_map_test" ) + fc::set_option( options, "api-limit-get-storage-info", uint32_t(120) ); } fc::set_option( options, "bucket-size", string("[15]") ); diff --git a/tests/tests/custom_operations.cpp b/tests/tests/custom_operations.cpp index 473d3edd01..1099778c45 100644 --- a/tests/tests/custom_operations.cpp +++ b/tests/tests/custom_operations.cpp @@ -123,7 +123,7 @@ try { // check nathan stored data with the api storage_results_nathan = custom_operations_api.get_storage_info("nathan", "settings"); - BOOST_CHECK_EQUAL(storage_results_nathan.size(), 2 ); + BOOST_REQUIRE_EQUAL(storage_results_nathan.size(), 2U ); BOOST_CHECK_EQUAL(storage_results_nathan[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results_nathan[0].key, "image_url"); BOOST_CHECK_EQUAL(storage_results_nathan[0].value->as_string(), "http://some.image.url/img.jpg"); @@ -140,7 +140,7 @@ try { // check old and new stuff storage_results_nathan = custom_operations_api.get_storage_info("nathan", "settings"); - BOOST_CHECK_EQUAL(storage_results_nathan.size(), 3 ); + BOOST_REQUIRE_EQUAL(storage_results_nathan.size(), 3U ); BOOST_CHECK_EQUAL(storage_results_nathan[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results_nathan[0].key, "image_url"); BOOST_CHECK_EQUAL(storage_results_nathan[0].value->as_string(), "http://new.image.url/newimg.jpg"); @@ -158,7 +158,7 @@ try { // theme is removed from the storage storage_results_nathan = custom_operations_api.get_storage_info("nathan", "settings"); - BOOST_CHECK_EQUAL(storage_results_nathan.size(), 2 ); + BOOST_REQUIRE_EQUAL(storage_results_nathan.size(), 2U ); BOOST_CHECK_EQUAL(storage_results_nathan[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results_nathan[0].key, "image_url"); BOOST_CHECK_EQUAL(storage_results_nathan[0].value->as_string(), "http://new.image.url/newimg.jpg"); @@ -174,7 +174,7 @@ try { // nothing changes storage_results_nathan = custom_operations_api.get_storage_info("nathan", "settings"); - BOOST_CHECK_EQUAL(storage_results_nathan.size(), 2 ); + BOOST_REQUIRE_EQUAL(storage_results_nathan.size(), 2U ); BOOST_CHECK_EQUAL(storage_results_nathan[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results_nathan[0].key, "image_url"); BOOST_CHECK_EQUAL(storage_results_nathan[0].value->as_string(), "http://new.image.url/newimg.jpg"); @@ -191,7 +191,7 @@ try { generate_block(); vector storage_results_alice = custom_operations_api.get_storage_info("alice", "random"); - BOOST_CHECK_EQUAL(storage_results_alice.size(), 1 ); + BOOST_REQUIRE_EQUAL(storage_results_alice.size(), 1U ); BOOST_CHECK_EQUAL(storage_results_alice[0].account.instance.value, 17 ); BOOST_CHECK_EQUAL(storage_results_alice[0].key, "key1"); BOOST_CHECK_EQUAL(storage_results_alice[0].value->as_string(), "value2"); @@ -204,7 +204,7 @@ try { generate_block(); storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object"); - BOOST_CHECK_EQUAL(storage_results_alice.size(), 1); + BOOST_REQUIRE_EQUAL(storage_results_alice.size(), 1U); BOOST_CHECK_EQUAL(storage_results_alice[0].account.instance.value, 17); BOOST_CHECK_EQUAL(storage_results_alice[0].key, "nathan"); BOOST_CHECK_EQUAL(storage_results_alice[0].value->as(20).name, "nathan"); @@ -218,7 +218,7 @@ try { generate_block(); storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object"); - BOOST_CHECK_EQUAL(storage_results_alice.size(), 3); + BOOST_REQUIRE_EQUAL(storage_results_alice.size(), 3U); BOOST_CHECK_EQUAL(storage_results_alice[0].account.instance.value, 17); BOOST_CHECK_EQUAL(storage_results_alice[0].key, "nathan"); BOOST_CHECK_EQUAL(storage_results_alice[0].value->as(20).name, "nathan"); @@ -227,6 +227,154 @@ try { BOOST_CHECK_EQUAL(storage_results_alice[1].value->as(20).name, "patty"); BOOST_CHECK_EQUAL(storage_results_alice[2].key, "robert"); BOOST_CHECK_EQUAL(storage_results_alice[2].value->as(20).name, "robert"); + + // alice adds key-value data via custom operation to a settings catalog + catalog = "settings"; + pairs.clear(); + pairs["image_url"] = fc::json::to_string("http://some.other.image.url/img.jpg"); + map_operation(pairs, false, catalog, alice_id, alice_private_key, db); + generate_block(); + + // test API limit config + BOOST_CHECK_THROW( custom_operations_api.get_storage_info("alice", "account_object", {}, 121), fc::exception ); + + // This does not throw + storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object", {}, 120); + BOOST_REQUIRE_EQUAL(storage_results_alice.size(), 3U); + BOOST_CHECK_EQUAL(storage_results_alice[0].account.instance.value, 17); + BOOST_CHECK_EQUAL(storage_results_alice[0].key, "nathan"); + BOOST_CHECK_EQUAL(storage_results_alice[0].value->as(20).name, "nathan"); + BOOST_CHECK_EQUAL(storage_results_alice[1].account.instance.value, 17); + BOOST_CHECK_EQUAL(storage_results_alice[1].key, "patty"); + BOOST_CHECK_EQUAL(storage_results_alice[1].value->as(20).name, "patty"); + BOOST_CHECK_EQUAL(storage_results_alice[2].key, "robert"); + BOOST_CHECK_EQUAL(storage_results_alice[2].value->as(20).name, "robert"); + + // query by a wrong account + BOOST_CHECK_THROW( custom_operations_api.get_storage_info("alice1", "account_object" ), fc::exception ); + + // query by account and key + BOOST_CHECK_THROW( custom_operations_api.get_storage_info("alice", {}, "patty" ), fc::exception ); + + // query by key only + BOOST_CHECK_THROW( custom_operations_api.get_storage_info({}, {}, "patty" ), fc::exception ); + + // query by account, catalog and key + storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object", "alice1"); + BOOST_CHECK_EQUAL(storage_results_alice.size(), 0 ); + + storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object1", "patty"); + BOOST_CHECK_EQUAL(storage_results_alice.size(), 0 ); + + storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object", "patty"); + BOOST_REQUIRE_EQUAL(storage_results_alice.size(), 1U ); + BOOST_CHECK_EQUAL(storage_results_alice[0].key, "patty"); + BOOST_CHECK_EQUAL(storage_results_alice[0].value->as(20).name, "patty"); + + // query by account only + storage_results_alice = custom_operations_api.get_storage_info("alice"); + BOOST_REQUIRE_EQUAL(storage_results_alice.size(), 5U ); + BOOST_CHECK_EQUAL(storage_results_alice[0].catalog, "random"); + BOOST_CHECK_EQUAL(storage_results_alice[0].key, "key1"); + BOOST_CHECK_EQUAL(storage_results_alice[0].value->as_string(), "value2"); + BOOST_CHECK_EQUAL(storage_results_alice[1].catalog, "account_object"); + BOOST_CHECK_EQUAL(storage_results_alice[1].key, "nathan"); + BOOST_CHECK_EQUAL(storage_results_alice[1].value->as(20).name, "nathan"); + BOOST_CHECK_EQUAL(storage_results_alice[2].catalog, "account_object"); + BOOST_CHECK_EQUAL(storage_results_alice[2].key, "patty"); + BOOST_CHECK_EQUAL(storage_results_alice[2].value->as(20).name, "patty"); + BOOST_CHECK_EQUAL(storage_results_alice[3].catalog, "account_object"); + BOOST_CHECK_EQUAL(storage_results_alice[3].key, "robert"); + BOOST_CHECK_EQUAL(storage_results_alice[3].value->as(20).name, "robert"); + BOOST_CHECK_EQUAL(storage_results_alice[4].catalog, "settings"); + BOOST_CHECK_EQUAL(storage_results_alice[4].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results_alice[4].value->as_string(), "http://some.other.image.url/img.jpg"); + + // query by catalog only + auto storage_results = custom_operations_api.get_storage_info({}, "settings1"); + BOOST_REQUIRE_EQUAL(storage_results.size(), 0 ); + + storage_results = custom_operations_api.get_storage_info({}, "settings"); + BOOST_REQUIRE_EQUAL(storage_results.size(), 3U ); + BOOST_CHECK_EQUAL(storage_results[0].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[0].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[0].value->as_string(), "http://new.image.url/newimg.jpg"); + BOOST_CHECK_EQUAL(storage_results[1].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[1].key, "language"); + BOOST_CHECK_EQUAL(storage_results[1].value->as_string(), "en"); + BOOST_CHECK_EQUAL(storage_results[2].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[2].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[2].value->as_string(), "http://some.other.image.url/img.jpg"); + + // Pagination + storage_results = custom_operations_api.get_storage_info({}, "settings", {}, 2); + BOOST_REQUIRE_EQUAL(storage_results.size(), 2U ); + BOOST_CHECK_EQUAL(storage_results[0].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[0].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[0].value->as_string(), "http://new.image.url/newimg.jpg"); + BOOST_CHECK_EQUAL(storage_results[1].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[1].key, "language"); + BOOST_CHECK_EQUAL(storage_results[1].value->as_string(), "en"); + + account_storage_id_type storage_id = storage_results[1].id; + + storage_results = custom_operations_api.get_storage_info({}, "settings", {}, 2, storage_id); + BOOST_REQUIRE_EQUAL(storage_results.size(), 2U ); + BOOST_CHECK_EQUAL(storage_results[0].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[0].key, "language"); + BOOST_CHECK_EQUAL(storage_results[0].value->as_string(), "en"); + BOOST_CHECK_EQUAL(storage_results[1].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[1].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[1].value->as_string(), "http://some.other.image.url/img.jpg"); + + // query by catalog and key + storage_results = custom_operations_api.get_storage_info({}, "settings", "test"); + BOOST_REQUIRE_EQUAL(storage_results.size(), 0 ); + + storage_results = custom_operations_api.get_storage_info({}, "settings1", "image_url"); + BOOST_REQUIRE_EQUAL(storage_results.size(), 0 ); + + storage_results = custom_operations_api.get_storage_info({}, "settings", "image_url"); + BOOST_REQUIRE_EQUAL(storage_results.size(), 2U ); + BOOST_CHECK_EQUAL(storage_results[0].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[0].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[0].value->as_string(), "http://new.image.url/newimg.jpg"); + BOOST_CHECK_EQUAL(storage_results[1].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[1].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[1].value->as_string(), "http://some.other.image.url/img.jpg"); + + // query all + storage_results = custom_operations_api.get_storage_info(); + BOOST_REQUIRE_EQUAL(storage_results.size(), 7U ); + BOOST_CHECK_EQUAL(storage_results[0].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[0].catalog, "settings"); + BOOST_CHECK_EQUAL(storage_results[0].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[0].value->as_string(), "http://new.image.url/newimg.jpg"); + BOOST_CHECK_EQUAL(storage_results[1].account.instance.value, 16 ); + BOOST_CHECK_EQUAL(storage_results[1].catalog, "settings"); + BOOST_CHECK_EQUAL(storage_results[1].key, "language"); + BOOST_CHECK_EQUAL(storage_results[1].value->as_string(), "en"); + BOOST_CHECK_EQUAL(storage_results[2].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[2].catalog, "random"); + BOOST_CHECK_EQUAL(storage_results[2].key, "key1"); + BOOST_CHECK_EQUAL(storage_results[2].value->as_string(), "value2"); + BOOST_CHECK_EQUAL(storage_results[3].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[3].catalog, "account_object"); + BOOST_CHECK_EQUAL(storage_results[3].key, "nathan"); + BOOST_CHECK_EQUAL(storage_results[3].value->as(20).name, "nathan"); + BOOST_CHECK_EQUAL(storage_results[4].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[4].catalog, "account_object"); + BOOST_CHECK_EQUAL(storage_results[4].key, "patty"); + BOOST_CHECK_EQUAL(storage_results[4].value->as(20).name, "patty"); + BOOST_CHECK_EQUAL(storage_results[5].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[5].catalog, "account_object"); + BOOST_CHECK_EQUAL(storage_results[5].key, "robert"); + BOOST_CHECK_EQUAL(storage_results[5].value->as(20).name, "robert"); + BOOST_CHECK_EQUAL(storage_results[6].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[6].catalog, "settings"); + BOOST_CHECK_EQUAL(storage_results[6].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[6].value->as_string(), "http://some.other.image.url/img.jpg"); + } catch (fc::exception &e) { edump((e.to_detail_string())); @@ -259,6 +407,9 @@ try { auto storage_results_nathan = custom_operations_api.get_storage_info("nathan", catalog); BOOST_CHECK_EQUAL(storage_results_nathan.size(), 0 ); + // This throws due to API limit + BOOST_CHECK_THROW( custom_operations_api.get_storage_info("nathan", catalog, {}, 120), fc::exception ); + // keys are indexed so they cant be too big(greater than CUSTOM_OPERATIONS_MAX_KEY_SIZE(200) is not allowed) catalog = "whatever"; std::string value(201, 'a'); @@ -280,7 +431,7 @@ try { // get the account list for nathan, check alice and robert are there storage_results_nathan = custom_operations_api.get_storage_info("nathan", "contact_list"); - BOOST_CHECK_EQUAL(storage_results_nathan.size(), 2 ); + BOOST_REQUIRE_EQUAL(storage_results_nathan.size(), 2U ); BOOST_CHECK_EQUAL(storage_results_nathan[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results_nathan[0].key, alice.name); BOOST_CHECK_EQUAL(storage_results_nathan[1].account.instance.value, 16 ); @@ -294,7 +445,7 @@ try { // nothing changes storage_results_nathan = custom_operations_api.get_storage_info("nathan", "contact_list"); - BOOST_CHECK_EQUAL(storage_results_nathan.size(), 2 ); + BOOST_REQUIRE_EQUAL(storage_results_nathan.size(), 2U ); BOOST_CHECK_EQUAL(storage_results_nathan[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results_nathan[0].key, alice.name); BOOST_CHECK_EQUAL(storage_results_nathan[1].account.instance.value, 16 ); @@ -308,7 +459,7 @@ try { // alice gone storage_results_nathan = custom_operations_api.get_storage_info("nathan", "contact_list"); - BOOST_CHECK_EQUAL(storage_results_nathan.size(), 1 ); + BOOST_CHECK_EQUAL(storage_results_nathan.size(), 1U ); BOOST_CHECK_EQUAL(storage_results_nathan[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results_nathan[0].key, robert.name); @@ -320,7 +471,7 @@ try { generate_block(); auto storage_results_alice = custom_operations_api.get_storage_info("alice", "contact_list"); - BOOST_CHECK_EQUAL(storage_results_alice.size(), 1 ); + BOOST_CHECK_EQUAL(storage_results_alice.size(), 1U ); BOOST_CHECK_EQUAL(storage_results_alice[0].account.instance.value, 17 ); BOOST_CHECK_EQUAL(storage_results_alice[0].key, robert.name); } From 4a82499a9919c41694c7ddb10181b3b8d99b0587 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 6 Aug 2022 08:00:57 +0000 Subject: [PATCH 158/338] Update docs for get_storage_info API --- libraries/app/include/graphene/app/api.hpp | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 6aa4181ed1..2d9c6ee24e 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -621,26 +621,29 @@ namespace graphene { namespace app { * @brief Get stored objects * * @param account_name_or_id The account name or ID to get info from. Optional. - * @param catalog Category classification. Each account can store multiple catalogs. Optional. - * @param key Key classification. Each catalog can contain multiple keys. Optional. + * @param catalog The catalog to get info from. Each account can store data in multiple catalogs. Optional. + * @param key The key to get info from. Each catalog can contain multiple keys. Optional. * @param limit The limitation of items each query can fetch, not greater than the configured value of * @a api_limit_get_storage_info * @param start_id Start ID of stored object, fetch objects whose IDs are greater than or equal to this ID - * @return The stored objects found, ordered by their IDs + * @return The stored objects found, sorted by their ID * * @note - * 1. By passing @a null to various optional arguments, this API can be used to query stored objects by + * 1. By passing @a null to various optional parameters, or omitting where applicable, this API can be used to + * query stored objects by * a) account, catalog and key, or * b) account and catalog, or * c) account, or * d) catalog and key, or * e) catalog, or - * f) no condition. + * f) unconditionally. + * Queries with keys without a catalog are not allowed. * 2. If @p account_name_or_id is specified but cannot be tied to an account, an error will be returned. * 3. @p limit can be omitted or be @a null, if so the default value of * @ref application_options::api_limit_get_tickets will be used. - * 4. @p start_id can be omitted or be null, if so the api will return the "first page" of objects. - * 5. Can only omit one or more arguments in the end of the list, but not one or more in the middle. + * 4. @p start_id can be omitted or be @a null, if so the API will return the "first page" of objects. + * 5. One or more parameters can be omitted from the end of the parameter list, and the parameters in the + * middle cannot be omitted (but can be @a null). */ vector get_storage_info( const optional& account_name_or_id = optional(), From 9651d7aa97a98398e93bfe09c0b3da0567f41f24 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 6 Aug 2022 12:00:19 +0000 Subject: [PATCH 159/338] Use configured limit as API page size if omitted --- libraries/app/api.cpp | 4 +- libraries/app/database_api.cpp | 7 +- libraries/app/database_api_helper.hxx | 3 +- libraries/app/database_api_impl.hxx | 7 +- libraries/app/include/graphene/app/api.hpp | 38 ++- .../app/include/graphene/app/database_api.hpp | 268 ++++++++++-------- 6 files changed, 171 insertions(+), 156 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 70d0d04331..868486fdd1 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -638,10 +638,8 @@ namespace graphene { namespace app { { FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); - uint32_t limit = olimit.valid() ? *olimit - : application_options::get_default().api_limit_get_liquidity_pool_history; - const auto configured_limit = _app.get_options().api_limit_get_liquidity_pool_history; + uint32_t limit = olimit.valid() ? *olimit : configured_limit; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index d6c59a17fa..6ad18d1d29 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -1035,18 +1035,17 @@ vector database_api_impl::get_limit_orders( const std::strin } vector database_api::get_limit_orders_by_account( const string& account_name_or_id, - optional limit, optional start_id ) + const optional& limit, const optional& start_id ) { return my->get_limit_orders_by_account( account_name_or_id, limit, start_id ); } vector database_api_impl::get_limit_orders_by_account( const string& account_name_or_id, - optional olimit, optional ostart_id ) + const optional& olimit, const optional& ostart_id ) { - uint32_t limit = olimit.valid() ? *olimit : 101; - FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_limit_orders_by_account; + uint32_t limit = olimit.valid() ? *olimit : configured_limit; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); diff --git a/libraries/app/database_api_helper.hxx b/libraries/app/database_api_helper.hxx index 2df0180ab4..1c2898b42f 100644 --- a/libraries/app/database_api_helper.hxx +++ b/libraries/app/database_api_helper.hxx @@ -69,10 +69,9 @@ public: const optional& ostart_id, X... x ) const { - uint64_t limit = olimit.valid() ? *olimit : ( application_options::get_default().*app_opt_member_ptr ); - FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->*app_opt_member_ptr; + uint64_t limit = olimit.valid() ? *olimit : configured_limit; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index ed119b7107..f6b4cad206 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -101,8 +101,8 @@ class database_api_impl : public std::enable_shared_from_this vector get_limit_orders( const std::string& a, const std::string& b, uint32_t limit)const; vector get_limit_orders_by_account( const string& account_name_or_id, - optional limit, - optional start_id ); + const optional& limit, + const optional& start_id ); vector get_account_limit_orders( const string& account_name_or_id, const string &base, const string "e, uint32_t limit, @@ -278,10 +278,9 @@ class database_api_impl : public std::enable_shared_from_this const optional& with_statistics, X... x )const { - uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; - FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + uint32_t limit = olimit.valid() ? *olimit : configured_limit; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 2d9c6ee24e..e1049c8289 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -208,9 +208,9 @@ namespace graphene { namespace app { * If specified, only the operations occurred not later than this time will be returned. * @param stop A UNIX timestamp. Optional. * If specified, only the operations occurred later than this time will be returned. - * @param limit Maximum quantity of operations in the history to retrieve. Optional. - * If not specified, the default value of - * @ref application_options::api_limit_get_liquidity_pool_history will be used. + * @param limit Maximum quantity of operations in the history to retrieve. Optional. + * If not specified, the configured value of + * @a api_limit_get_liquidity_pool_history will be used. * If specified, it must not exceed the configured value of * @a api_limit_get_liquidity_pool_history. * @param operation_type Optional. If specified, only the operations whose type is the specified type @@ -223,15 +223,14 @@ namespace graphene { namespace app { * the most recent records, the rest records can be retrieved with the * @ref get_liquidity_pool_history_by_sequence API. * 3. List of operation type code: 59-creation, 60-deletion, 61-deposit, 62-withdrawal, 63-exchange. - * 4. Can only omit one or more arguments in the end of the list, but not one or more in the middle. - * If need to not specify an individual argument, can specify \c null in the place. + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_liquidity_pool_history( liquidity_pool_id_type pool_id, const optional& start = optional(), const optional& stop = optional(), - const optional& limit = application_options::get_default() - .api_limit_get_liquidity_pool_history, + const optional& limit = optional(), const optional& operation_type = optional() )const; /** @@ -241,9 +240,9 @@ namespace graphene { namespace app { * If specified, only the operations whose sequences are not greater than this will be returned. * @param stop A UNIX timestamp. Optional. * If specified, only operations occurred later than this time will be returned. - * @param limit Maximum quantity of operations in the history to retrieve. Optional. - * If not specified, the default value of - * @ref application_options::api_limit_get_liquidity_pool_history will be used. + * @param limit Maximum quantity of operations in the history to retrieve. Optional. + * If not specified, the configured value of + * @a api_limit_get_liquidity_pool_history will be used. * If specified, it must not exceed the configured value of * @a api_limit_get_liquidity_pool_history. * @param operation_type Optional. If specified, only the operations whose type is the specified type @@ -253,15 +252,14 @@ namespace graphene { namespace app { * @note * 1. The time must be UTC. The range is (stop, start]. * 2. List of operation type code: 59-creation, 60-deletion, 61-deposit, 62-withdrawal, 63-exchange. - * 3. Can only omit one or more arguments in the end of the list, but not one or more in the middle. - * If need to not specify an individual argument, can specify \c null in the place. + * 3. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_liquidity_pool_history_by_sequence( liquidity_pool_id_type pool_id, const optional& start = optional(), const optional& stop = optional(), - const optional& limit = application_options::get_default() - .api_limit_get_liquidity_pool_history, + const optional& limit = optional(), const optional& operation_type = optional() )const; private: @@ -624,7 +622,7 @@ namespace graphene { namespace app { * @param catalog The catalog to get info from. Each account can store data in multiple catalogs. Optional. * @param key The key to get info from. Each catalog can contain multiple keys. Optional. * @param limit The limitation of items each query can fetch, not greater than the configured value of - * @a api_limit_get_storage_info + * @a api_limit_get_storage_info. Optional. * @param start_id Start ID of stored object, fetch objects whose IDs are greater than or equal to this ID * @return The stored objects found, sorted by their ID * @@ -639,17 +637,17 @@ namespace graphene { namespace app { * f) unconditionally. * Queries with keys without a catalog are not allowed. * 2. If @p account_name_or_id is specified but cannot be tied to an account, an error will be returned. - * 3. @p limit can be omitted or be @a null, if so the default value of - * @ref application_options::api_limit_get_tickets will be used. + * 3. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_storage_info will be used. * 4. @p start_id can be omitted or be @a null, if so the API will return the "first page" of objects. - * 5. One or more parameters can be omitted from the end of the parameter list, and the parameters in the - * middle cannot be omitted (but can be @a null). + * 5. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_storage_info( const optional& account_name_or_id = optional(), const optional& catalog = optional(), const optional& key = optional(), - const optional& limit = application_options::get_default().api_limit_get_storage_info, + const optional& limit = optional(), const optional& start_id = optional() )const; private: diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 85e10d50c7..4d9cf12c53 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -465,15 +465,17 @@ class database_api * @return List of limit orders of the specified account * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_limit_orders_by_account will be used + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_limit_orders_by_account will be used * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of orders - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ - vector get_limit_orders_by_account( const string& account_name_or_id, - optional limit = application_options::get_default().api_limit_get_limit_orders_by_account, - optional start_id = optional() ); + vector get_limit_orders_by_account( + const string& account_name_or_id, + const optional& limit = optional(), + const optional& start_id = optional() ); /** * @brief Fetch all orders relevant to the specified account and specified market, result orders @@ -491,7 +493,7 @@ class database_api * @return List of orders from @p account_name_or_id to the corresponding account * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned * 2. @p ostart_id and @p ostart_price can be empty, if so the api will return the "first page" of orders; * if @p ostart_id is specified, its price will be used to do page query preferentially, * otherwise the @p ostart_price will be used; @@ -670,13 +672,14 @@ class database_api * @return The liquidity pools * * @note - * 1. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_liquidity_pools will be used - * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of pools - * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_liquidity_pools will be used + * 2. @p start_id can be omitted or be @a null, if so the api will return the "first page" of pools + * 3. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector list_liquidity_pools( - const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& limit = optional(), const optional& start_id = optional(), const optional& with_statistics = false )const; @@ -690,15 +693,16 @@ class database_api * @return The liquidity pools * * @note - * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_liquidity_pools will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_liquidity_pools will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of pools + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_liquidity_pools_by_asset_a( const std::string& asset_symbol_or_id, - const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& limit = optional(), const optional& start_id = optional(), const optional& with_statistics = false )const; @@ -712,15 +716,16 @@ class database_api * @return The liquidity pools * * @note - * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_liquidity_pools will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_liquidity_pools will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of pools + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_liquidity_pools_by_asset_b( const std::string& asset_symbol_or_id, - const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& limit = optional(), const optional& start_id = optional(), const optional& with_statistics = false )const; @@ -734,15 +739,16 @@ class database_api * @return The liquidity pools * * @note - * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_liquidity_pools will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_liquidity_pools will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of pools + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_liquidity_pools_by_one_asset( const std::string& asset_symbol_or_id, - const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& limit = optional(), const optional& start_id = optional(), const optional& with_statistics = false )const; @@ -757,17 +763,18 @@ class database_api * @return The liquidity pools * * @note - * 1. if @p asset_symbol_or_id_a or @p asset_symbol_or_id_b cannot be tied to an asset, + * 1. If @p asset_symbol_or_id_a or @p asset_symbol_or_id_b cannot be tied to an asset, * an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_liquidity_pools will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_liquidity_pools will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of pools + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_liquidity_pools_by_both_assets( const std::string& asset_symbol_or_id_a, const std::string& asset_symbol_or_id_b, - const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& limit = optional(), const optional& start_id = optional(), const optional& with_statistics = false )const; @@ -819,15 +826,16 @@ class database_api * @return The liquidity pools * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_liquidity_pools will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_liquidity_pools will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of pools + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_liquidity_pools_by_owner( const std::string& account_name_or_id, - const optional& limit = application_options::get_default().api_limit_get_liquidity_pools, + const optional& limit = optional(), const optional& start_id = optional(), const optional& with_statistics = false )const; @@ -844,13 +852,14 @@ class database_api * @return The SameT Funds * * @note - * 1. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_samet_funds will be used - * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_samet_funds will be used + * 2. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 3. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector list_samet_funds( - const optional& limit = application_options::get_default().api_limit_get_samet_funds, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -862,15 +871,16 @@ class database_api * @return The SameT Funds * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_samet_funds will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_samet_funds will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_samet_funds_by_owner( const std::string& account_name_or_id, - const optional& limit = application_options::get_default().api_limit_get_samet_funds, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -882,15 +892,16 @@ class database_api * @return The SameT Funds * * @note - * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_samet_funds will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_samet_funds will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_samet_funds_by_asset( const std::string& asset_symbol_or_id, - const optional& limit = application_options::get_default().api_limit_get_samet_funds, + const optional& limit = optional(), const optional& start_id = optional() )const; /// @} @@ -907,13 +918,14 @@ class database_api * @return The credit offers * * @note - * 1. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 2. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 3. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector list_credit_offers( - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -925,15 +937,16 @@ class database_api * @return The credit offers * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_credit_offers_by_owner( const std::string& account_name_or_id, - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -945,15 +958,16 @@ class database_api * @return The credit offers * * @note - * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_credit_offers_by_asset( const std::string& asset_symbol_or_id, - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -964,13 +978,14 @@ class database_api * @return The credit deals * * @note - * 1. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 2. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 3. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector list_credit_deals( - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -982,15 +997,16 @@ class database_api * @return The credit deals * * @note - * 1. if @p offer_id cannot be tied to a credit offer, an empty list will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p offer_id cannot be tied to a credit offer, an empty list will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_credit_deals_by_offer_id( const credit_offer_id_type& offer_id, - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -1002,15 +1018,16 @@ class database_api * @return The credit deals * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_credit_deals_by_offer_owner( const std::string& account_name_or_id, - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -1022,15 +1039,16 @@ class database_api * @return The credit deals * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_credit_deals_by_borrower( const std::string& account_name_or_id, - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -1042,15 +1060,16 @@ class database_api * @return The credit deals * * @note - * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_credit_deals_by_debt_asset( const std::string& asset_symbol_or_id, - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -1062,15 +1081,16 @@ class database_api * @return The credit deals * * @note - * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_credit_offers will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of data - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_credit_offers will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of data + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_credit_deals_by_collateral_asset( const std::string& asset_symbol_or_id, - const optional& limit = application_options::get_default().api_limit_get_credit_offers, + const optional& limit = optional(), const optional& start_id = optional() )const; /// @} @@ -1384,13 +1404,14 @@ class database_api * @return The tickets * * @note - * 1. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_tickets will be used - * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of tickets - * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_tickets will be used + * 2. @p start_id can be omitted or be @a null, if so the api will return the "first page" of tickets + * 3. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector list_tickets( - const optional& limit = application_options::get_default().api_limit_get_tickets, + const optional& limit = optional(), const optional& start_id = optional() )const; /** @@ -1402,15 +1423,16 @@ class database_api * @return The tickets * * @note - * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned - * 2. @p limit can be omitted or be null, if so the default value of - * @ref application_options::api_limit_get_tickets will be used - * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of tickets - * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + * 1. If @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_tickets will be used + * 3. @p start_id can be omitted or be @a null, if so the api will return the "first page" of tickets + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). */ vector get_tickets_by_account( const std::string& account_name_or_id, - const optional& limit = application_options::get_default().api_limit_get_tickets, + const optional& limit = optional(), const optional& start_id = optional() )const; private: From d5de492e0d07c9abe33b6afedc2dd3999c51faf2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 6 Aug 2022 12:41:55 +0000 Subject: [PATCH 160/338] Use configured limit as API page size if omitted --- libraries/app/database_api.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 6ad18d1d29..e474075fef 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -1776,8 +1776,8 @@ vector database_api_impl::get_liquidity_pools_by FC_ASSERT( _app_options && _app_options->has_api_helper_indexes_plugin, "api_helper_indexes plugin is not enabled on this server." ); - uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + uint32_t limit = olimit.valid() ? *olimit : configured_limit; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); @@ -1934,10 +1934,9 @@ vector database_api_impl::get_liquidity_pools_by const optional& ostart_id, const optional& with_statistics )const { - uint32_t limit = olimit.valid() ? *olimit : application_options::get_default().api_limit_get_liquidity_pools; - FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + uint32_t limit = olimit.valid() ? *olimit : configured_limit; FC_ASSERT( limit <= configured_limit, "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); From 9c4fb6527fbe5b037093d8c8d277860225550315 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 6 Aug 2022 21:06:26 +0000 Subject: [PATCH 161/338] Add tests for small API limits --- tests/common/database_fixture.cpp | 3 ++- tests/tests/custom_operations.cpp | 23 ++++++++++++++++------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 5eb7b4d545..0240ef86f7 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -441,7 +441,8 @@ std::shared_ptr database_fixture_base::in fixture.app.register_plugin(true); fc::set_option( options, "custom-operations-start-block", uint32_t(1) ); if( fixture.current_test_name == "custom_operations_account_storage_map_test" ) - fc::set_option( options, "api-limit-get-storage-info", uint32_t(120) ); + // Set a small limit + fc::set_option( options, "api-limit-get-storage-info", uint32_t(6) ); } fc::set_option( options, "bucket-size", string("[15]") ); diff --git a/tests/tests/custom_operations.cpp b/tests/tests/custom_operations.cpp index 1099778c45..37dfacc18d 100644 --- a/tests/tests/custom_operations.cpp +++ b/tests/tests/custom_operations.cpp @@ -236,10 +236,10 @@ try { generate_block(); // test API limit config - BOOST_CHECK_THROW( custom_operations_api.get_storage_info("alice", "account_object", {}, 121), fc::exception ); + BOOST_CHECK_THROW( custom_operations_api.get_storage_info("alice", "account_object", {}, 7), fc::exception ); // This does not throw - storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object", {}, 120); + storage_results_alice = custom_operations_api.get_storage_info("alice", "account_object", {}, 6); BOOST_REQUIRE_EQUAL(storage_results_alice.size(), 3U); BOOST_CHECK_EQUAL(storage_results_alice[0].account.instance.value, 17); BOOST_CHECK_EQUAL(storage_results_alice[0].key, "nathan"); @@ -345,7 +345,7 @@ try { // query all storage_results = custom_operations_api.get_storage_info(); - BOOST_REQUIRE_EQUAL(storage_results.size(), 7U ); + BOOST_REQUIRE_EQUAL(storage_results.size(), 6U ); // the configured limit, the first page BOOST_CHECK_EQUAL(storage_results[0].account.instance.value, 16 ); BOOST_CHECK_EQUAL(storage_results[0].catalog, "settings"); BOOST_CHECK_EQUAL(storage_results[0].key, "image_url"); @@ -370,10 +370,19 @@ try { BOOST_CHECK_EQUAL(storage_results[5].catalog, "account_object"); BOOST_CHECK_EQUAL(storage_results[5].key, "robert"); BOOST_CHECK_EQUAL(storage_results[5].value->as(20).name, "robert"); - BOOST_CHECK_EQUAL(storage_results[6].account.instance.value, 17 ); - BOOST_CHECK_EQUAL(storage_results[6].catalog, "settings"); - BOOST_CHECK_EQUAL(storage_results[6].key, "image_url"); - BOOST_CHECK_EQUAL(storage_results[6].value->as_string(), "http://some.other.image.url/img.jpg"); + + storage_id = storage_results[5].id; + + storage_results = custom_operations_api.get_storage_info({},{},{},{},storage_id); + BOOST_REQUIRE_EQUAL(storage_results.size(), 2U ); // the 2nd page + BOOST_CHECK_EQUAL(storage_results[0].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[0].catalog, "account_object"); + BOOST_CHECK_EQUAL(storage_results[0].key, "robert"); + BOOST_CHECK_EQUAL(storage_results[0].value->as(20).name, "robert"); + BOOST_CHECK_EQUAL(storage_results[1].account.instance.value, 17 ); + BOOST_CHECK_EQUAL(storage_results[1].catalog, "settings"); + BOOST_CHECK_EQUAL(storage_results[1].key, "image_url"); + BOOST_CHECK_EQUAL(storage_results[1].value->as_string(), "http://some.other.image.url/img.jpg"); } catch (fc::exception &e) { From 9056a2427986b52ab8358d1b0b775d65d2fb9def Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 8 Aug 2022 08:51:19 +0000 Subject: [PATCH 162/338] Delete objects from ES before loading from DB --- libraries/plugins/es_objects/es_objects.cpp | 27 ++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 0e7994867a..719edb90fb 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -110,8 +110,12 @@ class es_objects_plugin_impl { index_database( ids, action_type::deletion ); } void index_database(const vector& ids, action_type action); + /// Load all data from the object database into ES void sync_db(); - void remove_from_database( const object_id_type& id, const plugin_options::object_options& opt ); + /// Delete one object from ES + void delete_from_database( const object_id_type& id, const plugin_options::object_options& opt ); + /// Delete all objects of the specified type from ES + void delete_all_from_database( const plugin_options::object_options& opt ); es_objects_plugin& _self; plugin_options _options; @@ -150,6 +154,10 @@ struct data_loader if( !opt.enabled ) return; + // If no_delete or store_updates is true, do not delete + if( !( opt.no_delete || opt.store_updates ) ) + my->delete_all_from_database( opt ); + db.get_index( ObjType::space_id, ObjType::type_id ).inspect_all_objects( [this, &opt](const graphene::db::object &o) { my->prepareTemplate( static_cast(o), opt ); @@ -213,7 +221,7 @@ void es_objects_plugin_impl::index_database(const vector& ids, a continue; const auto& opt = itr->second; if( action_type::deletion == action ) - remove_from_database( value, opt ); + delete_from_database( value, opt ); else { switch( itr->first ) @@ -247,7 +255,7 @@ void es_objects_plugin_impl::index_database(const vector& ids, a } -void es_objects_plugin_impl::remove_from_database( +void es_objects_plugin_impl::delete_from_database( const object_id_type& id, const es_objects_plugin_impl::plugin_options::object_options& opt ) { if( opt.no_delete ) @@ -266,6 +274,19 @@ void es_objects_plugin_impl::remove_from_database( send_bulk_if_ready(); } +void es_objects_plugin_impl::delete_all_from_database( const plugin_options::object_options& opt ) +{ + if( opt.no_delete ) + return; + // Note: + // 1. The _delete_by_query API deletes the data but keeps the index mapping, so the function is OK. + // Simply deleting the index is probably faster, but it requires the "delete_index" permission, and + // may probably mess up the index mapping and other existing settings. + // Don't know if there is a good way to only delete objects that do not exist in the object database. + // 2. We don't check the return value here, it's probably OK + es->query( _options.index_prefix + opt.index_name + "/_delete_by_query", "{\"query\":{\"match_all\":{}}}" ); +} + template void es_objects_plugin_impl::prepareTemplate( const T& blockchain_object, const es_objects_plugin_impl::plugin_options::object_options& opt ) From adccebfd45f243fecc32629e0618f7b4ef8bd144 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 8 Aug 2022 08:57:57 +0000 Subject: [PATCH 163/338] Remove a unnecessary check --- libraries/plugins/es_objects/es_objects.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 719edb90fb..0952a586a5 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -276,8 +276,6 @@ void es_objects_plugin_impl::delete_from_database( void es_objects_plugin_impl::delete_all_from_database( const plugin_options::object_options& opt ) { - if( opt.no_delete ) - return; // Note: // 1. The _delete_by_query API deletes the data but keeps the index mapping, so the function is OK. // Simply deleting the index is probably faster, but it requires the "delete_index" permission, and From 1758c796c01adb165c7c03e66b4bdb4be0e4f468 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 8 Aug 2022 09:59:55 +0000 Subject: [PATCH 164/338] Always delete objects from ES first on resync --- libraries/plugins/es_objects/es_objects.cpp | 27 ++++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 0952a586a5..ae6b69ccb3 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -111,7 +111,7 @@ class es_objects_plugin_impl void index_database(const vector& ids, action_type action); /// Load all data from the object database into ES - void sync_db(); + void sync_db( bool delete_before_load = false ); /// Delete one object from ES void delete_from_database( const object_id_type& id, const plugin_options::object_options& opt ); /// Delete all objects of the specified type from ES @@ -149,13 +149,14 @@ struct data_loader } template - void load( const es_objects_plugin_impl::plugin_options::object_options& opt ) + void load( const es_objects_plugin_impl::plugin_options::object_options& opt, + bool force_delete = false ) { if( !opt.enabled ) return; // If no_delete or store_updates is true, do not delete - if( !( opt.no_delete || opt.store_updates ) ) + if( force_delete || !( opt.no_delete || opt.store_updates ) ) my->delete_all_from_database( opt ); db.get_index( ObjType::space_id, ObjType::type_id ).inspect_all_objects( @@ -165,7 +166,7 @@ struct data_loader } }; -void es_objects_plugin_impl::sync_db() +void es_objects_plugin_impl::sync_db( bool delete_before_load ) { ilog("elasticsearch OBJECTS: loading data from the object database (chain state)"); @@ -176,13 +177,13 @@ void es_objects_plugin_impl::sync_db() data_loader loader( this ); - loader.load( _options.accounts ); - loader.load( _options.assets ); - loader.load( _options.asset_bitasset ); - loader.load( _options.balances ); - loader.load( _options.proposals ); - loader.load( _options.limit_orders ); - loader.load( _options.budget ); + loader.load( _options.accounts, delete_before_load ); + loader.load( _options.assets, delete_before_load ); + loader.load( _options.asset_bitasset, delete_before_load ); + loader.load( _options.balances, delete_before_load ); + loader.load( _options.proposals, delete_before_load ); + loader.load( _options.limit_orders, delete_before_load ); + loader.load( _options.budget, delete_before_load ); } void es_objects_plugin_impl::index_database(const vector& ids, action_type action) @@ -482,7 +483,9 @@ void es_objects_plugin::plugin_initialize(const boost::program_options::variable void es_objects_plugin::plugin_startup() { - if( my->_options.sync_db_on_startup || 0 == database().head_block_num() ) + if( 0 == database().head_block_num() ) + my->sync_db( true ); + else if( my->_options.sync_db_on_startup ) my->sync_db(); } From 0ea89c6a41a1b67069a1fd46b05ae0dbc62d831c Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 8 Aug 2022 10:25:28 +0000 Subject: [PATCH 165/338] Add logging --- libraries/plugins/es_objects/es_objects.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index ae6b69ccb3..50c5de941a 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -157,8 +157,12 @@ struct data_loader // If no_delete or store_updates is true, do not delete if( force_delete || !( opt.no_delete || opt.store_updates ) ) + { + ilog( "Deleting all data in index " + my->_options.index_prefix + opt.index_name ); my->delete_all_from_database( opt ); + } + ilog( "Loading data into index " + my->_options.index_prefix + opt.index_name ); db.get_index( ObjType::space_id, ObjType::type_id ).inspect_all_objects( [this, &opt](const graphene::db::object &o) { my->prepareTemplate( static_cast(o), opt ); @@ -184,6 +188,8 @@ void es_objects_plugin_impl::sync_db( bool delete_before_load ) loader.load( _options.proposals, delete_before_load ); loader.load( _options.limit_orders, delete_before_load ); loader.load( _options.budget, delete_before_load ); + + ilog("elasticsearch OBJECTS: done loading data from the object database (chain state)"); } void es_objects_plugin_impl::index_database(const vector& ids, action_type action) From d23a6e1c0c39e62d752c86e95407110f8278ef9f Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 8 Aug 2022 10:42:00 +0000 Subject: [PATCH 166/338] Fix code smells --- libraries/plugins/es_objects/es_objects.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index 50c5de941a..f7a2f4181e 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -115,7 +115,7 @@ class es_objects_plugin_impl /// Delete one object from ES void delete_from_database( const object_id_type& id, const plugin_options::object_options& opt ); /// Delete all objects of the specified type from ES - void delete_all_from_database( const plugin_options::object_options& opt ); + void delete_all_from_database( const plugin_options::object_options& opt ) const; es_objects_plugin& _self; plugin_options _options; @@ -281,7 +281,7 @@ void es_objects_plugin_impl::delete_from_database( send_bulk_if_ready(); } -void es_objects_plugin_impl::delete_all_from_database( const plugin_options::object_options& opt ) +void es_objects_plugin_impl::delete_all_from_database( const plugin_options::object_options& opt ) const { // Note: // 1. The _delete_by_query API deletes the data but keeps the index mapping, so the function is OK. @@ -289,7 +289,7 @@ void es_objects_plugin_impl::delete_all_from_database( const plugin_options::obj // may probably mess up the index mapping and other existing settings. // Don't know if there is a good way to only delete objects that do not exist in the object database. // 2. We don't check the return value here, it's probably OK - es->query( _options.index_prefix + opt.index_name + "/_delete_by_query", "{\"query\":{\"match_all\":{}}}" ); + es->query( _options.index_prefix + opt.index_name + "/_delete_by_query", R"({"query":{"match_all":{}}})" ); } template From d54da7ec886550bd7769f122d49c52061741e583 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 8 Aug 2022 11:07:35 +0000 Subject: [PATCH 167/338] Add tests for limit order object processing in ES --- tests/elasticsearch/main.cpp | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 1e60ed4de3..779c887406 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -235,7 +235,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { if(delete_objects) { // all records deleted // asset and bitasset - create_bitasset("USD", account_id_type()); + asset_id_type usd_id = create_bitasset("USD", account_id_type()).id; generate_block(); string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; @@ -268,8 +268,28 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { auto bitasset_object_id = j["hits"]["hits"][size_t(0)]["_source"]["object_id"].as_string(); BOOST_CHECK_EQUAL(bitasset_object_id, bitasset_data_id); + // create a limit order that expires at the next maintenance time + create_sell_order( account_id_type(), asset(1), asset(1, usd_id), + db.get_dynamic_global_properties().next_maintenance_time ); + generate_block(); + + es.endpoint = es.index_prefix + "limitorder/_doc/_count"; + es.query = ""; + fc::wait_for( ES_WAIT_TIME, [&]() { + res = graphene::utilities::getEndPoint(es); + j = fc::json::from_string(res); + if( !j.is_object() ) + return false; + const auto& obj = j.get_object(); + if( obj.find("count") == obj.end() ) + return false; + total = obj["count"].as_string(); + return (total == "1"); + }); + // maintenance, for budget records generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + generate_block(); es.endpoint = es.index_prefix + "budget/_doc/_count"; es.query = ""; @@ -285,6 +305,16 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { return (total == "1"); // new record inserted at the first maintenance block }); + es.endpoint = es.index_prefix + "limitorder/_doc/_count"; + es.query = ""; + res = graphene::utilities::getEndPoint(es); + j = fc::json::from_string(res); + BOOST_REQUIRE( j.is_object() ); + const auto& obj = j.get_object(); + BOOST_REQUIRE( obj.find("count") != obj.end() ); + total = obj["count"].as_string(); + BOOST_CHECK( total == "0" ); // the limit order expired, so the object is removed + } } catch (fc::exception &e) { From d0ead7893cfc794dd3adb4e1ddb46fac74145d5f Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 8 Aug 2022 11:57:07 +0000 Subject: [PATCH 168/338] Fix es_objects plugin limit order processing tests --- tests/common/database_fixture.cpp | 4 ++-- tests/elasticsearch/main.cpp | 18 +++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 0240ef86f7..67683e9421 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -413,8 +413,8 @@ std::shared_ptr database_fixture_base::in fixture.app.register_plugin(true); fc::set_option( options, "es-objects-elasticsearch-url", GRAPHENE_TESTING_ES_URL ); - fc::set_option( options, "es-objects-bulk-replay", uint32_t(2) ); - fc::set_option( options, "es-objects-bulk-sync", uint32_t(2) ); + fc::set_option( options, "es-objects-bulk-replay", uint32_t(1) ); + fc::set_option( options, "es-objects-bulk-sync", uint32_t(1) ); fc::set_option( options, "es-objects-proposals", true ); fc::set_option( options, "es-objects-accounts", true ); fc::set_option( options, "es-objects-assets", true ); diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 779c887406..eb7f5d3795 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -307,13 +307,17 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { es.endpoint = es.index_prefix + "limitorder/_doc/_count"; es.query = ""; - res = graphene::utilities::getEndPoint(es); - j = fc::json::from_string(res); - BOOST_REQUIRE( j.is_object() ); - const auto& obj = j.get_object(); - BOOST_REQUIRE( obj.find("count") != obj.end() ); - total = obj["count"].as_string(); - BOOST_CHECK( total == "0" ); // the limit order expired, so the object is removed + fc::wait_for( ES_WAIT_TIME, [&]() { + res = graphene::utilities::getEndPoint(es); + j = fc::json::from_string(res); + if( !j.is_object() ) + return false; + const auto& obj = j.get_object(); + if( obj.find("count") == obj.end() ) + return false; + total = obj["count"].as_string(); + return (total == "0"); // the limit order expired, so the object is removed + }); } } From ab61d2b82b90c02a406c06571bf0344c8a836706 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 9 Aug 2022 00:14:36 +0000 Subject: [PATCH 169/338] Store fee payer in account history in ES --- .../elasticsearch/elasticsearch_plugin.cpp | 20 ++++++++++++++++--- .../elasticsearch/elasticsearch_plugin.hpp | 6 ++++-- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 465b497b3e..298e459a34 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -249,13 +249,29 @@ void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) bulk_lines.reserve(limit_documents); } +struct get_fee_payer_visitor +{ + using result_type = account_id_type; + + template + account_id_type operator()(const OpType& op) const + { + return op.fee_payer(); + } +}; + void elasticsearch_plugin_impl::doOperationHistory( const optional & oho, operation_history_struct& os ) const { try { os.trx_in_block = oho->trx_in_block; os.op_in_trx = oho->op_in_trx; - os.operation_result = fc::json::to_string(oho->result); os.virtual_op = oho->virtual_op; + os.fee_payer = oho->op.visit( get_fee_payer_visitor() ); + + if(_options.operation_string) + os.op = fc::json::to_string(oho->op); + + os.operation_result = fc::json::to_string(oho->result); if(_options.operation_object) { constexpr uint16_t current_depth = 2; @@ -269,8 +285,6 @@ void elasticsearch_plugin_impl::doOperationHistory( const optional op); } FC_CAPTURE_LOG_AND_RETHROW( (oho) ) } void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs) const diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index d25770b017..797fe3b8c0 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -82,9 +82,10 @@ class elasticsearch_plugin : public graphene::app::plugin struct operation_history_struct { uint16_t trx_in_block; uint16_t op_in_trx; - std::string operation_result; uint32_t virtual_op; + account_id_type fee_payer; std::string op; + std::string operation_result; variant op_object; variant operation_result_object; }; @@ -146,7 +147,8 @@ struct bulk_struct { FC_REFLECT_ENUM( graphene::elasticsearch::mode, (only_save)(only_query)(all) ) FC_REFLECT( graphene::elasticsearch::operation_history_struct, - (trx_in_block)(op_in_trx)(operation_result)(virtual_op)(op)(op_object)(operation_result_object) ) + (trx_in_block)(op_in_trx)(virtual_op)(fee_payer) + (op)(operation_result)(op_object)(operation_result_object) ) FC_REFLECT( graphene::elasticsearch::block_struct, (block_num)(block_time)(trx_id) ) FC_REFLECT( graphene::elasticsearch::fee_struct, (asset)(asset_name)(amount)(amount_units) ) FC_REFLECT( graphene::elasticsearch::transfer_struct, (asset)(asset_name)(amount)(amount_units)(from)(to) ) From 4dac52cb6c081634df44fdf064f553943b89a5ae Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 9 Aug 2022 00:22:45 +0000 Subject: [PATCH 170/338] Add tests for fee payer in ES --- tests/elasticsearch/main.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index eb7f5d3795..5589d9a8c7 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -127,6 +127,8 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { j = fc::json::from_string(res); auto last_transfer_amount = j["_source"]["operation_history"]["op_object"]["amount_"]["amount"].as_string(); BOOST_CHECK_EQUAL(last_transfer_amount, "300"); + auto last_transfer_payer = j["_source"]["operation_history"]["fee_payer"].as_string(); + BOOST_CHECK_EQUAL(last_transfer_payer, "1.2.0"); // To test credit offers generate_blocks( HARDFORK_CORE_2362_TIME ); From d2a56ee199a09295bc5b3a540bd6b7f1fc57ab7f Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 9 Aug 2022 20:31:54 +0000 Subject: [PATCH 171/338] Lower log level of an exception in LP_exchange_op Specifically, the unfillable_price exception, usually due to competition among trading bots. --- libraries/chain/exceptions.cpp | 4 ++++ libraries/chain/include/graphene/chain/exceptions.hpp | 3 +++ libraries/chain/liquidity_pool_evaluator.cpp | 4 +++- libraries/net/node.cpp | 1 + 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/libraries/chain/exceptions.cpp b/libraries/chain/exceptions.cpp index 37f72607dd..05b3712583 100644 --- a/libraries/chain/exceptions.cpp +++ b/libraries/chain/exceptions.cpp @@ -164,6 +164,10 @@ namespace graphene { namespace chain { //GRAPHENE_IMPLEMENT_OP_BASE_EXCEPTIONS( htlc_redeem_operation ) //GRAPHENE_IMPLEMENT_OP_BASE_EXCEPTIONS( htlc_extend_operation ) + GRAPHENE_IMPLEMENT_OP_BASE_EXCEPTIONS( liquidity_pool_exchange ); + GRAPHENE_IMPLEMENT_OP_EVALUATE_EXCEPTION( unfillable_price, liquidity_pool_exchange, 1, + "Unable to exchange at expected price" ); + #define GRAPHENE_RECODE_EXC( cause_type, effect_type ) \ catch( const cause_type& e ) \ { throw( effect_type( e.what(), e.get_log() ) ); } diff --git a/libraries/chain/include/graphene/chain/exceptions.hpp b/libraries/chain/include/graphene/chain/exceptions.hpp index e2100bdfe8..07d2968c1f 100644 --- a/libraries/chain/include/graphene/chain/exceptions.hpp +++ b/libraries/chain/include/graphene/chain/exceptions.hpp @@ -213,6 +213,9 @@ namespace graphene { namespace chain { //GRAPHENE_DECLARE_OP_BASE_EXCEPTIONS( htlc_redeem_operation ) //GRAPHENE_DECLARE_OP_BASE_EXCEPTIONS( htlc_extend_operation ) + GRAPHENE_DECLARE_OP_BASE_EXCEPTIONS( liquidity_pool_exchange ); + GRAPHENE_DECLARE_OP_EVALUATE_EXCEPTION( unfillable_price, liquidity_pool_exchange, 1 ) + #define GRAPHENE_RECODE_EXC( cause_type, effect_type ) \ catch( const cause_type& e ) \ { throw( effect_type( e.what(), e.get_log() ) ); } diff --git a/libraries/chain/liquidity_pool_evaluator.cpp b/libraries/chain/liquidity_pool_evaluator.cpp index edc8979d47..d0c6d14e9d 100644 --- a/libraries/chain/liquidity_pool_evaluator.cpp +++ b/libraries/chain/liquidity_pool_evaluator.cpp @@ -391,7 +391,9 @@ void_result liquidity_pool_exchange_evaluator::do_evaluate(const liquidity_pool_ FC_ASSERT( _taker_market_fee <= _pool_pays, "Market fee should not be greater than the amount to receive" ); _account_receives = _pool_pays - _taker_market_fee; - FC_ASSERT( _account_receives.amount >= op.min_to_receive.amount, "Unable to exchange at expected price" ); + GRAPHENE_ASSERT( _account_receives.amount >= op.min_to_receive.amount, + liquidity_pool_exchange_unfillable_price, + "Unable to exchange at expected price" ); _pool_taker_fee = asset( static_cast( pool_taker_fee ), op.min_to_receive.asset_id ); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index ca84fbaed3..73e3f34498 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3436,6 +3436,7 @@ namespace graphene { namespace net { namespace detail { case graphene::chain::limit_order_create_insufficient_balance::code_enum::code_value : case graphene::chain::limit_order_cancel_nonexist_order::code_enum::code_value : case graphene::chain::limit_order_cancel_owner_mismatch::code_enum::code_value : + case graphene::chain::liquidity_pool_exchange_unfillable_price::code_enum::code_value : dlog( "client rejected message sent by peer ${peer}, ${e}", ("peer", originating_peer->get_remote_endpoint() )("e", e) ); break; From 38225a1422c8018c2762672103e73f0b8974355d Mon Sep 17 00:00:00 2001 From: Abit Date: Wed, 10 Aug 2022 11:14:53 +0200 Subject: [PATCH 172/338] Update license year to 2022 --- LICENSE.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE.txt b/LICENSE.txt index 973952b911..dab0004a09 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2015-2021 Cryptonomex Inc. and +Copyright (c) 2015-2022 Cryptonomex Inc. and contributors (see CONTRIBUTORS.txt) The MIT License From 94053f940e2e1be3ebe83fb7d17f18142be184da Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 12 Aug 2022 13:24:26 +0000 Subject: [PATCH 173/338] Do not listen to network if not accept connections --- libraries/app/application.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 5a2ea4864a..ea76f483a5 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -167,8 +167,6 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) true ); else _p2p_network->set_listen_port(0, false); - _p2p_network->listen_to_p2p_network(); - ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); if ( _options->count("accept-incoming-connections") > 0 ) _p2p_network->set_accept_incoming_connections( _options->at("accept-incoming-connections").as() ); @@ -176,6 +174,9 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) if ( _options->count("connect-to-new-peers") > 0 ) _p2p_network->set_connect_to_new_peers( _options->at( "connect-to-new-peers" ).as() ); + _p2p_network->listen_to_p2p_network(); + ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); + _p2p_network->connect_to_p2p_network(); _p2p_network->sync_from(net::item_id(net::core_message_type_enum::block_message_type, _chain_db->head_block_id()), From ad71e5c1b7310a48664c5db4b96d81a9bb4d9b74 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 12 Aug 2022 13:55:48 +0000 Subject: [PATCH 174/338] Remove unnecessary code --- libraries/net/node.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 3f93ee0ee7..10067c173f 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3950,8 +3950,6 @@ namespace graphene { namespace net { namespace detail { uint32_t port = GRAPHENE_NET_DEFAULT_P2P_PORT; #endif _node_configuration.listen_endpoint.set_port( port ); - _node_configuration.accept_incoming_connections = true; - _node_configuration.wait_if_endpoint_is_busy = false; ilog( "generating new private key for this node" ); _node_configuration.private_key = fc::ecc::private_key::generate(); From 7d63ebc087346a7f6334e1bf4e8045e2d10f30d3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 12 Aug 2022 14:02:11 +0000 Subject: [PATCH 175/338] Remove trailing whitespaces --- libraries/net/include/graphene/net/node.hpp | 6 ++-- libraries/net/node.cpp | 37 ++++++++++----------- libraries/net/node_impl.hxx | 6 ++-- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 51e5881348..a2617214a5 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -76,7 +76,7 @@ namespace graphene { namespace net { */ virtual bool handle_block( const graphene::net::block_message& blk_msg, bool sync_mode, std::vector& contained_transaction_msg_ids ) = 0; - + /** * @brief Called when a new transaction comes in from the network * @@ -87,7 +87,7 @@ namespace graphene { namespace net { /** * @brief Called when a new message comes in from the network other than a - * block or a transaction. Currently there are no other possible + * block or a transaction. Currently there are no other possible * messages, so this should never be called. * * @throws exception if error validating the item, otherwise the item is @@ -197,7 +197,7 @@ namespace graphene { namespace net { void set_node_delegate( std::shared_ptr del ) const; void load_configuration( const fc::path& configuration_directory ); - + virtual void listen_to_p2p_network(); virtual void connect_to_p2p_network(); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 10067c173f..8d30d813f3 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -195,10 +195,10 @@ namespace graphene { namespace net { namespace detail { list.insert(tmp); } } - catch(const fc::exception& ) + catch(const fc::exception& ) { wlog( "Address ${addr} invalid.", ("addr", str) ); - } + } } ); } @@ -208,7 +208,7 @@ namespace graphene { namespace net { namespace detail { // only pass those that are in the list AND we are connected to for(auto& it : advertise_list) { - graphene::net::peer_connection_ptr peer_conn + graphene::net::peer_connection_ptr peer_conn = impl->get_active_connection_for_endpoint( it.remote_endpoint ); if ( peer_conn != peer_connection_ptr() ) ret_val.push_back( it ); @@ -247,7 +247,7 @@ namespace graphene { namespace net { namespace detail { // filter out those in the exclude list for(const peer_connection_ptr& active_peer : impl->_active_connections) { - if (exclude_list.find( *active_peer->get_remote_endpoint() ) == exclude_list.end()) + if (exclude_list.find( *active_peer->get_remote_endpoint() ) == exclude_list.end()) reply.addresses.emplace_back(update_address_record(impl, active_peer)); } reply.addresses.shrink_to_fit(); @@ -700,7 +700,7 @@ namespace graphene { namespace net { namespace detail { }); break; } - } + } } if (!item_fetched) ++item_iter; @@ -711,7 +711,7 @@ namespace graphene { namespace net { namespace detail { for (const peer_and_items_to_fetch& peer_and_items : items_by_peer) { // the item lists are heterogenous and - // the fetch_items_message can only deal with one item type at a time. + // the fetch_items_message can only deal with one item type at a time. std::map > items_to_fetch_by_type; for (const item_id& item : peer_and_items.item_ids) items_to_fetch_by_type[item.item_type].push_back(item.item_hash); @@ -866,7 +866,7 @@ namespace graphene { namespace net { namespace detail { // This might not be so bad because it could make us initiate more connections and // reconnect with the rest of the network, or it might just futher isolate us. // As usual, the first step is to walk through all our peers and figure out which - // peers need action (disconneting, sending keepalives, etc), then we walk through + // peers need action (disconneting, sending keepalives, etc), then we walk through // those lists yielding at our leisure later. uint32_t handshaking_timeout = _peer_inactivity_timeout; @@ -967,7 +967,7 @@ namespace graphene { namespace net { namespace detail { wlog( "Sending a keepalive message to peer ${peer} who hasn't sent us any messages in the last ${timeout} seconds", ( "peer", active_peer->get_remote_endpoint() )("timeout", active_send_keepalive_timeout ) ); peers_to_send_keep_alive.push_back(active_peer); - } + } else if (active_peer->we_need_sync_items_from_peer && !active_peer->is_currently_handling_message() && !active_peer->item_ids_requested_from_peer && @@ -1084,7 +1084,6 @@ namespace graphene { namespace net { namespace detail { void node_impl::fetch_updated_peer_lists_loop() { VERIFY_CORRECT_THREAD(); - { fc::scoped_lock lock(_active_connections.get_mutex()); // JMJ 2018-10-22 Unsure why we're making a copy here, but this is probably unnecessary @@ -1107,7 +1106,7 @@ namespace graphene { namespace net { namespace detail { } } - // this has nothing to do with updating the peer list, but we need to prune this list + // this has nothing to do with updating the peer list, but we need to prune this list // at regular intervals, this is a fine place to do it. fc::time_point_sec oldest_failed_ids_to_keep(fc::time_point::now() - fc::minutes(15)); auto oldest_failed_ids_to_keep_iter = _recently_failed_items.get() @@ -1867,7 +1866,7 @@ namespace graphene { namespace net { namespace detail { catch (const peer_is_on_an_unreachable_fork&) { dlog("Peer is on a fork and there's no set of blocks we can provide to switch them to our fork"); - // we reply with an empty list as if we had an empty blockchain; + // we reply with an empty list as if we had an empty blockchain; // we don't want to disconnect because they may be able to provide // us with blocks on their chain } @@ -1991,7 +1990,7 @@ namespace graphene { namespace net { namespace detail { synopsis = _delegate->get_blockchain_synopsis(reference_point, number_of_blocks_after_reference_point); // TODO: it's possible that the returned synopsis is empty if the blockchain is empty (that's fine) - // or if the reference point is now past our undo history (that's not). + // or if the reference point is now past our undo history (that's not). // in the second case, we should mark this peer as one we're unable to sync with and // disconnect them. if (reference_point != item_hash_t() && synopsis.empty()) @@ -2004,10 +2003,10 @@ namespace graphene { namespace net { namespace detail { uint32_t first_block_num_in_ids_to_get = _delegate->get_block_number(original_ids_of_items_to_get.front()); uint32_t true_high_block_num = first_block_num_in_ids_to_get + original_ids_of_items_to_get.size() - 1; - // in order to generate a seamless synopsis, we need to be using the same low_block_num as the + // in order to generate a seamless synopsis, we need to be using the same low_block_num as the // backend code; the first block in the synopsis will be the low block number it used uint32_t low_block_num = synopsis.empty() ? 1 : _delegate->get_block_number(synopsis.front()); - + do { if( low_block_num >= first_block_num_in_ids_to_get ) @@ -2033,7 +2032,7 @@ namespace graphene { namespace net { namespace detail { try { std::vector blockchain_synopsis = create_blockchain_synopsis_for_peer( peer ); - + item_hash_t last_item_seen = blockchain_synopsis.empty() ? item_hash_t() : blockchain_synopsis.back(); dlog( "sync: sending a request for the next items after ${last_item_seen} to peer ${peer}, " "(full request is ${blockchain_synopsis})", @@ -2059,7 +2058,7 @@ namespace graphene { namespace net { namespace detail { if( originating_peer->item_ids_requested_from_peer ) { // verify that the peer's the block ids the peer sent is a valid response to our request; - // It should either be an empty list of blocks, or a list of blocks that builds off of one of + // It should either be an empty list of blocks, or a list of blocks that builds off of one of // the blocks in the synopsis we sent if (!blockchain_item_ids_inventory_message_received.item_hashes_available.empty()) { @@ -2283,7 +2282,7 @@ namespace graphene { namespace net { namespace detail { // append the remaining items to the peer's list boost::push_back(originating_peer->ids_of_items_to_get, item_hashes_received); - + uint32_t new_number_of_unfetched_items = calculate_unsynced_block_count_from_all_peers(); if (new_number_of_unfetched_items != _total_num_of_unfetched_items) _delegate->sync_status(blockchain_item_ids_inventory_message_received.item_type, @@ -2539,7 +2538,7 @@ namespace graphene { namespace net { namespace detail { // too, we can expect it to be around in this peer's cache for longer, so update its timestamp _items_to_fetch.get().modify(items_to_fetch_iter, [](prioritized_item_id& item) { item.timestamp = fc::time_point::now(); }); - } + } } } } @@ -3122,7 +3121,7 @@ namespace graphene { namespace net { namespace detail { { throw; } - catch (const unlinkable_block_exception& e) + catch (const unlinkable_block_exception& e) { restart_sync_exception = e; } diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 1ec7242bd7..86410f72ac 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -174,7 +174,7 @@ public: fc::scoped_lock lock(mux); return std::unordered_set::find(key); } -}; +}; class blockchain_tied_message_cache { @@ -782,7 +782,7 @@ public: void listen_to_p2p_network(); void connect_to_p2p_network(node_impl_ptr self); void add_node( const fc::ip::endpoint& ep ); - void set_advertise_algorithm( const std::string& algo, + void set_advertise_algorithm( const std::string& algo, const std::vector& advertise_or_exclude_list ); void add_seed_node( const std::string& seed_string ); void resolve_seed_node_and_add( const std::string& seed_string ); @@ -830,7 +830,7 @@ public: }}} // end of namespace graphene::net::detail -FC_REFLECT( graphene::net::detail::node_configuration, +FC_REFLECT( graphene::net::detail::node_configuration, (listen_endpoint) (accept_incoming_connections) (connect_to_new_peers) From baf04681e72effa71c9ed40d70550d23291c2ba7 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 12 Aug 2022 14:08:25 +0000 Subject: [PATCH 176/338] Update docs --- libraries/net/include/graphene/net/node.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index a2617214a5..ed6b75161d 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -277,7 +277,7 @@ namespace graphene { namespace net { * Allows the caller to determine how to respond to requests for peers * @param algo the algorithm to use ("exclude_list", "list", "nothing", "all") * @param advertise_or_exclude_list a list of nodes to - * advertise (if algo = "list") or exclude (if algo is "exclude") + * advertise (if algo is "list") or exclude (if algo is "exclude_list") */ void set_advertise_algorithm( const std::string& algo, const std::vector& advertise_or_exclude_list = std::vector() ); From d484ce8953ff43139b1da351b043a52f3a9a9ab4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 12 Aug 2022 14:13:19 +0000 Subject: [PATCH 177/338] Add virtual destructor to address_builder class --- libraries/net/node_impl.hxx | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 86410f72ac..45f45341f8 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -415,6 +415,7 @@ public: static std::shared_ptr create_default_address_builder(); virtual void build( node_impl* impl, address_message& ) const = 0; virtual bool should_advertise(const fc::ip::endpoint& in ) const = 0; + virtual ~address_builder() = default; }; #ifdef P2P_IN_DEDICATED_THREAD From 0db53adf74238a1f5584723d95d6af678e2c1cfd Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 12 Aug 2022 14:27:25 +0000 Subject: [PATCH 178/338] Remove trailing whitespaces --- libraries/net/node.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 8d30d813f3..58fe7581ab 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1031,8 +1031,8 @@ namespace graphene { namespace net { namespace detail { } // scoped_lock peers_to_terminate.clear(); - // if we're going to abruptly disconnect anyone, do it here - // (it doesn't yield). I don't think there would be any harm if this were + // if we're going to abruptly disconnect anyone, do it here + // (it doesn't yield). I don't think there would be any harm if this were // moved to the yielding section for( const peer_connection_ptr& peer : peers_to_disconnect_forcibly ) { From 73fb7a002406b24ee6aebc8671dce868b9942e82 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 13 Aug 2022 03:50:49 +0000 Subject: [PATCH 179/338] Fix network_mapper --- programs/network_mapper/network_mapper.cpp | 212 +++++++++++++-------- 1 file changed, 135 insertions(+), 77 deletions(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index f228cff5ca..f26b08e352 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -16,65 +16,54 @@ class peer_probe : public graphene::net::peer_connection_delegate { public: - bool _peer_closed_connection; - bool _we_closed_connection; - graphene::net::peer_connection_ptr _connection; - std::vector _peers; - fc::ecc::public_key _node_id; + bool _connection_was_rejected = false; + bool _peer_closed_connection = false; + bool _we_closed_connection = false; + bool _done = false; + graphene::net::peer_connection_ptr _connection = graphene::net::peer_connection::make_shared(this); + fc::promise::ptr _probe_complete_promise = fc::promise::create("probe_complete"); + fc::ip::endpoint _remote; - bool _connection_was_rejected; - bool _done; - fc::promise::ptr _probe_complete_promise; + graphene::net::node_id_t _node_id; + std::vector _peers; public: - peer_probe() : - _peer_closed_connection(false), - _we_closed_connection(false), - _connection(graphene::net::peer_connection::make_shared(this)), - _connection_was_rejected(false), - _done(false), - _probe_complete_promise(fc::promise::create("probe_complete")) - {} - - void start(const fc::ip::endpoint& endpoint_to_probe, - const fc::ecc::private_key& my_node_id, - const graphene::chain::chain_id_type& chain_id) - { - _remote = endpoint_to_probe; - fc::future connect_task = fc::async([this](){ _connection->connect_to(_remote); }, "connect_task"); - try - { - connect_task.wait(fc::seconds(10)); - } - catch (const fc::timeout_exception&) - { - ilog("timeout connecting to node ${endpoint}", ("endpoint", endpoint_to_probe)); - connect_task.cancel(__FUNCTION__); - throw; - } + explicit peer_probe( const fc::ip::endpoint& remote ) : _remote(remote) { /* Nothing else to do */ } + + void start( const fc::ecc::public_key& my_node_id, + const fc::ecc::private_key& my_node_key, + const graphene::chain::chain_id_type& chain_id ) + { try { + // This blocks + _connection->connect_to(_remote); fc::sha256::encoder shared_secret_encoder; fc::sha512 shared_secret = _connection->get_shared_secret(); shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); - fc::ecc::compact_signature signature = my_node_id.sign_compact(shared_secret_encoder.result()); + fc::ecc::compact_signature signature = my_node_key.sign_compact(shared_secret_encoder.result()); graphene::net::hello_message hello("network_mapper", GRAPHENE_NET_PROTOCOL_VERSION, fc::ip::address(), 0, 0, - my_node_id.get_public_key(), + my_node_id, signature, - chain_id, + chain_id, fc::variant_object()); _connection->send_message(hello); - } + } catch( const fc::exception& e ) { + ilog( "Got exception when connecting to peer ${endpoint} ${e}", + ("endpoint", _remote) ("e", e.to_detail_string()) ); + _probe_complete_promise->set_exception( std::make_shared(e) ); + } } void on_message(graphene::net::peer_connection* originating_peer, const graphene::net::message& received_message) override { graphene::net::message_hash_type message_hash = received_message.id(); dlog( "handling message ${type} ${hash} size ${size} from peer ${endpoint}", - ( "type", graphene::net::core_message_type_enum(received_message.msg_type.value() ) )("hash", message_hash ) + ("type", graphene::net::core_message_type_enum(received_message.msg_type.value() ) ) + ("hash", message_hash ) ("size", received_message.size )("endpoint", originating_peer->get_remote_endpoint() ) ); switch ( received_message.msg_type.value() ) { @@ -82,19 +71,24 @@ class peer_probe : public graphene::net::peer_connection_delegate on_hello_message( originating_peer, received_message.as() ); break; case graphene::net::core_message_type_enum::connection_accepted_message_type: - on_connection_accepted_message( originating_peer, received_message.as() ); + on_connection_accepted_message( originating_peer, + received_message.as() ); break; case graphene::net::core_message_type_enum::connection_rejected_message_type: - on_connection_rejected_message( originating_peer, received_message.as() ); + on_connection_rejected_message( originating_peer, + received_message.as() ); break; case graphene::net::core_message_type_enum::address_request_message_type: - on_address_request_message( originating_peer, received_message.as() ); + on_address_request_message( originating_peer, + received_message.as() ); break; case graphene::net::core_message_type_enum::address_message_type: - on_address_message( originating_peer, received_message.as() ); + on_address_message( originating_peer, + received_message.as() ); break; case graphene::net::core_message_type_enum::closing_connection_message_type: - on_closing_connection_message( originating_peer, received_message.as() ); + on_closing_connection_message( originating_peer, + received_message.as() ); break; default: break; @@ -102,7 +96,7 @@ class peer_probe : public graphene::net::peer_connection_delegate } void on_hello_message(graphene::net::peer_connection* originating_peer, - const graphene::net::hello_message& hello_message_received) + const graphene::net::hello_message& hello_message_received) { _node_id = hello_message_received.node_public_key; if (hello_message_received.user_data.contains("node_id")) @@ -111,28 +105,33 @@ class peer_probe : public graphene::net::peer_connection_delegate } void on_connection_accepted_message(graphene::net::peer_connection* originating_peer, - const graphene::net::connection_accepted_message& connection_accepted_message_received) + const graphene::net::connection_accepted_message& connection_accepted_message_received) { _connection_was_rejected = false; originating_peer->send_message(graphene::net::address_request_message()); } void on_connection_rejected_message( graphene::net::peer_connection* originating_peer, - const graphene::net::connection_rejected_message& connection_rejected_message_received ) + const graphene::net::connection_rejected_message& connection_rejected_message_received ) { + // Note: We will be rejected and disconnected if our chain_id is not the same as the peer's . + // If we aren't be disconnected, it is OK to send an address request message. _connection_was_rejected = true; + wlog( "peer ${endpoint} rejected our connection with reason ${reason}", + ("endpoint", originating_peer->get_remote_endpoint() ) + ("reason", connection_rejected_message_received.reason_code ) ); originating_peer->send_message(graphene::net::address_request_message()); } void on_address_request_message(graphene::net::peer_connection* originating_peer, - const graphene::net::address_request_message& address_request_message_received) + const graphene::net::address_request_message& address_request_message_received) { originating_peer->send_message(graphene::net::address_message()); } void on_address_message(graphene::net::peer_connection* originating_peer, - const graphene::net::address_message& address_message_received) + const graphene::net::address_message& address_message_received) { _peers = address_message_received.addresses; originating_peer->send_message(graphene::net::closing_connection_message("Thanks for the info")); @@ -140,7 +139,7 @@ class peer_probe : public graphene::net::peer_connection_delegate } void on_closing_connection_message(graphene::net::peer_connection* originating_peer, - const graphene::net::closing_connection_message& closing_connection_message_received) + const graphene::net::closing_connection_message& closing_connection_message_received) { if (_we_closed_connection) _connection->close_connection(); @@ -195,40 +194,37 @@ int main(int argc, char** argv) fc::ip::endpoint seed_node1 = nodes_to_visit.front(); - fc::ecc::private_key my_node_id = fc::ecc::private_key::generate(); + fc::ecc::private_key my_node_key = fc::ecc::private_key::generate(); + auto my_node_id = my_node_key.get_public_key(); std::map address_info_by_node_id; std::map > connections_by_node_id; + std::map node_id_by_endpoint; + std::set outdated_nodes; std::vector> probes; + constexpr size_t max_concurrent_probes = 200; while (!nodes_to_visit.empty() || !probes.empty()) { - while (!nodes_to_visit.empty()) + while (!nodes_to_visit.empty() && probes.size() < max_concurrent_probes ) { fc::ip::endpoint remote = nodes_to_visit.front(); nodes_to_visit.pop(); nodes_to_visit_set.erase( remote ); nodes_already_visited.insert( remote ); - try - { - auto probe = std::make_shared(); - probe->start(remote, my_node_id, chain_id); - probes.emplace_back( std::move( probe ) ); - } - catch (const fc::exception&) - { - std::cerr << "Failed to connect " << fc::string(remote) << " - skipping!" << std::endl; - } + probes.emplace_back( std::make_shared(remote) ); + auto& probe = *probes.back(); + fc::async( [&probe, &my_node_id, &my_node_key, &chain_id](){ + probe.start(my_node_id, my_node_key, chain_id); + }); } if (!probes.empty()) { - fc::yield(); std::vector> running; for ( auto& probe : probes ) { if (probe->_probe_complete_promise->error()) { - std::cerr << fc::string(probe->_remote) << " ran into an error!\n"; continue; } if (!probe->_probe_complete_promise->ready()) @@ -237,20 +233,31 @@ int main(int argc, char** argv) continue; } - if( probe->_node_id.valid() ) + idump( (probe->_node_id)(probe->_remote)(probe->_peers.size()) ); + { graphene::net::address_info this_node_info; this_node_info.direction = graphene::net::peer_connection_direction::outbound; + this_node_info.firewalled = graphene::net::firewalled_state::not_firewalled; this_node_info.remote_endpoint = probe->_remote; this_node_info.node_id = probe->_node_id; connections_by_node_id[this_node_info.node_id] = probe->_peers; - if (address_info_by_node_id.find(this_node_info.node_id) == address_info_by_node_id.end()) - address_info_by_node_id[this_node_info.node_id] = this_node_info; + // Note: Update if already exists + address_info_by_node_id[this_node_info.node_id] = this_node_info; + node_id_by_endpoint[probe->_remote] = probe->_node_id; + + for( const auto& info: address_info_by_node_id ) + { + if( info.second.remote_endpoint == probe->_remote && info.first != probe->_node_id ) + outdated_nodes.insert( info.first ); + } } for (const graphene::net::address_info& info : probe->_peers) { + if (info.node_id == my_node_id) + continue; if (nodes_already_visited.find(info.remote_endpoint) == nodes_already_visited.end() && nodes_to_visit_set.find(info.remote_endpoint) == nodes_to_visit_set.end()) { @@ -258,27 +265,68 @@ int main(int argc, char** argv) nodes_to_visit_set.insert(info.remote_endpoint); } if (address_info_by_node_id.find(info.node_id) == address_info_by_node_id.end()) + { address_info_by_node_id[info.node_id] = info; + // Set it to unknown here, we will check later + address_info_by_node_id[info.node_id].firewalled = graphene::net::firewalled_state::unknown; + } } } - probes = std::move( running ); - std::cout << address_info_by_node_id.size() << " checked, " - << probes.size() << " active, " - << nodes_to_visit.size() << " to do\n"; + constexpr uint32_t five = 5; + if( running.size() == probes.size() ) + fc::usleep( fc::seconds( five ) ); + else + probes = std::move( running ); + } + ilog( "${total} nodes detected, ${outdated} outdated, ${tried} endpoints tried, " + "${reachable} reachable, ${trying} trying, ${todo} to do", + ( "total", address_info_by_node_id.size() ) + ( "outdated", outdated_nodes.size() ) + ( "tried", nodes_already_visited.size() ) + ( "reachable", node_id_by_endpoint.size() ) + ( "trying", probes.size() ) + ( "todo", nodes_to_visit.size() ) ); + } + + // Remove outdated nodes + for( const auto& node : outdated_nodes ) + { + address_info_by_node_id.erase(node); + connections_by_node_id.erase(node); + } + for( auto& connection_by_id : connections_by_node_id ) + { + std::vector updated_connections; + for( const auto& connection : connection_by_id.second ) + { + if( outdated_nodes.find( connection.node_id ) == outdated_nodes.end() ) + updated_connections.push_back( connection ); } + if( updated_connections.size() != connection_by_id.second.size() ) + std::swap( updated_connections, connection_by_id.second ); } + ilog( "${total} nodes, ${reachable} reachable", + ( "total", address_info_by_node_id.size() ) + ( "reachable", node_id_by_endpoint.size() ) ); + graphene::net::node_id_t seed_node_id; std::set non_firewalled_nodes_set; for (const auto& address_info_for_node : address_info_by_node_id) { if (address_info_for_node.second.remote_endpoint == seed_node1) seed_node_id = address_info_for_node.first; - non_firewalled_nodes_set.insert(address_info_for_node.first); + if (address_info_for_node.second.firewalled == graphene::net::firewalled_state::not_firewalled) + non_firewalled_nodes_set.insert(address_info_for_node.first); } std::set seed_node_connections; + std::set seed_node_non_fw_connections; for (const graphene::net::address_info& info : connections_by_node_id[seed_node_id]) + { seed_node_connections.insert(info.node_id); + if( non_firewalled_nodes_set.find(info.node_id) != non_firewalled_nodes_set.end() ) + seed_node_non_fw_connections.insert(info.node_id); + } std::set seed_node_missing_connections; std::set_difference(non_firewalled_nodes_set.begin(), non_firewalled_nodes_set.end(), seed_node_connections.begin(), seed_node_connections.end(), @@ -288,11 +336,18 @@ int main(int argc, char** argv) std::ofstream dot_stream((data_dir / "network_graph.dot").string().c_str()); dot_stream << "graph G {\n"; - dot_stream << " // Total " << address_info_by_node_id.size() << " nodes, firewalled: " << (address_info_by_node_id.size() - non_firewalled_nodes_set.size()) + dot_stream << " // Total " << address_info_by_node_id.size() << " nodes, firewalled: " + << (address_info_by_node_id.size() - non_firewalled_nodes_set.size()) << ", non-firewalled: " << non_firewalled_nodes_set.size() << "\n"; - dot_stream << " // Seed node is " << (std::string)address_info_by_node_id[seed_node_id].remote_endpoint << " id: " << fc::variant( seed_node_id, 1 ).as_string() << "\n"; + dot_stream << " // Seed node is " << (std::string)address_info_by_node_id[seed_node_id].remote_endpoint + << " id: " << fc::variant( seed_node_id, 1 ).as_string() << "\n"; dot_stream << " // Seed node is connected to " << connections_by_node_id[seed_node_id].size() << " nodes\n"; - dot_stream << " // Seed node is missing connections to " << seed_node_missing_connections.size() << " non-firewalled nodes:\n"; + dot_stream << " // Seed node is connected to " << seed_node_non_fw_connections.size() + << " non-firewalled nodes:\n"; + for (const graphene::net::node_id_t& id : seed_node_non_fw_connections) + dot_stream << " // " << (std::string)address_info_by_node_id[id].remote_endpoint << "\n"; + dot_stream << " // Seed node is missing connections to " << seed_node_missing_connections.size() + << " non-firewalled nodes:\n"; for (const graphene::net::node_id_t& id : seed_node_missing_connections) dot_stream << " // " << (std::string)address_info_by_node_id[id].remote_endpoint << "\n"; @@ -300,13 +355,16 @@ int main(int argc, char** argv) for (const auto& address_info_for_node : address_info_by_node_id) { - dot_stream << " \"" << fc::variant( address_info_for_node.first, 1 ).as_string() << "\"[label=\"" << (std::string)address_info_for_node.second.remote_endpoint << "\""; - dot_stream << ",shape=rectangle"; + dot_stream << " \"" << fc::variant( address_info_for_node.first, 1 ).as_string() + << "\"[label=\"" << (std::string)address_info_for_node.second.remote_endpoint << "\""; + if (address_info_for_node.second.firewalled != graphene::net::firewalled_state::not_firewalled) + dot_stream << ",shape=rectangle"; dot_stream << "];\n"; } for (auto& node_and_connections : connections_by_node_id) for (const graphene::net::address_info& this_connection : node_and_connections.second) - dot_stream << " \"" << fc::variant( node_and_connections.first, 2 ).as_string() << "\" -- \"" << fc::variant( this_connection.node_id, 1 ).as_string() << "\";\n"; + dot_stream << " \"" << fc::variant( node_and_connections.first, 2 ).as_string() + << "\" -- \"" << fc::variant( this_connection.node_id, 1 ).as_string() << "\";\n"; dot_stream << "}\n"; From 896049df985b9e41a61a8facd6a2ffcf71d07730 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 13 Aug 2022 08:16:51 +0000 Subject: [PATCH 180/338] Deal with nodes that have changed ID --- programs/network_mapper/network_mapper.cpp | 26 ++++++++++------------ 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index f26b08e352..d7fb0fb084 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -199,7 +199,7 @@ int main(int argc, char** argv) std::map address_info_by_node_id; std::map > connections_by_node_id; std::map node_id_by_endpoint; - std::set outdated_nodes; + std::map outdated_nodes; std::vector> probes; constexpr size_t max_concurrent_probes = 200; @@ -242,15 +242,18 @@ int main(int argc, char** argv) this_node_info.remote_endpoint = probe->_remote; this_node_info.node_id = probe->_node_id; + // Note: Update if already exists. + // Some nodes may have the same node_id, E.G. created by copying the whole data directory of + // another node. In this case data here could be overwritten. connections_by_node_id[this_node_info.node_id] = probe->_peers; - // Note: Update if already exists address_info_by_node_id[this_node_info.node_id] = this_node_info; + node_id_by_endpoint[probe->_remote] = probe->_node_id; for( const auto& info: address_info_by_node_id ) { if( info.second.remote_endpoint == probe->_remote && info.first != probe->_node_id ) - outdated_nodes.insert( info.first ); + outdated_nodes[info.first] = probe->_node_id; } } @@ -289,21 +292,16 @@ int main(int argc, char** argv) } // Remove outdated nodes - for( const auto& node : outdated_nodes ) - { - address_info_by_node_id.erase(node); - connections_by_node_id.erase(node); - } + for( const auto& node_pair : outdated_nodes ) + address_info_by_node_id.erase(node_pair.first); + // Update connection info, replace outdated node_id with new node_id for( auto& connection_by_id : connections_by_node_id ) { - std::vector updated_connections; - for( const auto& connection : connection_by_id.second ) + for( auto& connection : connection_by_id.second ) { - if( outdated_nodes.find( connection.node_id ) == outdated_nodes.end() ) - updated_connections.push_back( connection ); + if( outdated_nodes.find( connection.node_id ) != outdated_nodes.end() ) + connection.node_id = outdated_nodes[connection.node_id]; } - if( updated_connections.size() != connection_by_id.second.size() ) - std::swap( updated_connections, connection_by_id.second ); } ilog( "${total} nodes, ${reachable} reachable", From 9fa8e97a3711379c0360536bdc01adab107403f9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 13 Aug 2022 09:20:26 +0000 Subject: [PATCH 181/338] Fix code smells --- programs/network_mapper/network_mapper.cpp | 57 ++++++++++++---------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index d7fb0fb084..c729d037c7 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -219,10 +219,12 @@ int main(int argc, char** argv) }); } - if (!probes.empty()) + if( probes.empty() ) + continue; + + std::vector> running; + for ( auto& probe : probes ) { - std::vector> running; - for ( auto& probe : probes ) { if (probe->_probe_complete_promise->error()) { continue; @@ -235,26 +237,24 @@ int main(int argc, char** argv) idump( (probe->_node_id)(probe->_remote)(probe->_peers.size()) ); - { - graphene::net::address_info this_node_info; - this_node_info.direction = graphene::net::peer_connection_direction::outbound; - this_node_info.firewalled = graphene::net::firewalled_state::not_firewalled; - this_node_info.remote_endpoint = probe->_remote; - this_node_info.node_id = probe->_node_id; + graphene::net::address_info this_node_info; + this_node_info.direction = graphene::net::peer_connection_direction::outbound; + this_node_info.firewalled = graphene::net::firewalled_state::not_firewalled; + this_node_info.remote_endpoint = probe->_remote; + this_node_info.node_id = probe->_node_id; - // Note: Update if already exists. - // Some nodes may have the same node_id, E.G. created by copying the whole data directory of - // another node. In this case data here could be overwritten. - connections_by_node_id[this_node_info.node_id] = probe->_peers; - address_info_by_node_id[this_node_info.node_id] = this_node_info; + // Note: Update if already exists. + // Some nodes may have the same node_id, E.G. created by copying the whole data directory of + // another node. In this case data here could be overwritten. + connections_by_node_id[this_node_info.node_id] = probe->_peers; + address_info_by_node_id[this_node_info.node_id] = this_node_info; - node_id_by_endpoint[probe->_remote] = probe->_node_id; + node_id_by_endpoint[probe->_remote] = probe->_node_id; - for( const auto& info: address_info_by_node_id ) - { - if( info.second.remote_endpoint == probe->_remote && info.first != probe->_node_id ) - outdated_nodes[info.first] = probe->_node_id; - } + for( const auto& info: address_info_by_node_id ) + { + if( info.second.remote_endpoint == probe->_remote && info.first != probe->_node_id ) + outdated_nodes[info.first] = probe->_node_id; } for (const graphene::net::address_info& info : probe->_peers) @@ -274,13 +274,14 @@ int main(int argc, char** argv) address_info_by_node_id[info.node_id].firewalled = graphene::net::firewalled_state::unknown; } } - } - constexpr uint32_t five = 5; - if( running.size() == probes.size() ) - fc::usleep( fc::seconds( five ) ); - else - probes = std::move( running ); } + + constexpr uint32_t five = 5; + if( running.size() == probes.size() ) + fc::usleep( fc::seconds( five ) ); + else + probes = std::move( running ); + ilog( "${total} nodes detected, ${outdated} outdated, ${tried} endpoints tried, " "${reachable} reachable, ${trying} trying, ${todo} to do", ( "total", address_info_by_node_id.size() ) @@ -289,6 +290,7 @@ int main(int argc, char** argv) ( "reachable", node_id_by_endpoint.size() ) ( "trying", probes.size() ) ( "todo", nodes_to_visit.size() ) ); + } // Remove outdated nodes @@ -359,9 +361,10 @@ int main(int argc, char** argv) dot_stream << ",shape=rectangle"; dot_stream << "];\n"; } + constexpr uint16_t pair_depth = 2; for (auto& node_and_connections : connections_by_node_id) for (const graphene::net::address_info& this_connection : node_and_connections.second) - dot_stream << " \"" << fc::variant( node_and_connections.first, 2 ).as_string() + dot_stream << " \"" << fc::variant( node_and_connections.first, pair_depth ).as_string() << "\" -- \"" << fc::variant( this_connection.node_id, 1 ).as_string() << "\";\n"; dot_stream << "}\n"; From 50bfefc4609623c0b6f88497d4d441317465082f Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 03:20:14 +0000 Subject: [PATCH 182/338] Fix code smells --- programs/network_mapper/network_mapper.cpp | 71 ++++++++++++---------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index c729d037c7..5207c4165c 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -202,39 +202,11 @@ int main(int argc, char** argv) std::map outdated_nodes; std::vector> probes; - constexpr size_t max_concurrent_probes = 200; - while (!nodes_to_visit.empty() || !probes.empty()) + const auto& update_info_by_probe = [ &connections_by_node_id, &address_info_by_node_id, + &node_id_by_endpoint, &outdated_nodes, &my_node_id, + &nodes_already_visited, &nodes_to_visit_set, &nodes_to_visit ] + ( const std::shared_ptr& probe ) { - while (!nodes_to_visit.empty() && probes.size() < max_concurrent_probes ) - { - fc::ip::endpoint remote = nodes_to_visit.front(); - nodes_to_visit.pop(); - nodes_to_visit_set.erase( remote ); - nodes_already_visited.insert( remote ); - - probes.emplace_back( std::make_shared(remote) ); - auto& probe = *probes.back(); - fc::async( [&probe, &my_node_id, &my_node_key, &chain_id](){ - probe.start(my_node_id, my_node_key, chain_id); - }); - } - - if( probes.empty() ) - continue; - - std::vector> running; - for ( auto& probe : probes ) - { - if (probe->_probe_complete_promise->error()) - { - continue; - } - if (!probe->_probe_complete_promise->ready()) - { - running.push_back( probe ); - continue; - } - idump( (probe->_node_id)(probe->_remote)(probe->_peers.size()) ); graphene::net::address_info this_node_info; @@ -274,6 +246,41 @@ int main(int argc, char** argv) address_info_by_node_id[info.node_id].firewalled = graphene::net::firewalled_state::unknown; } } + }; + + constexpr size_t max_concurrent_probes = 200; + while (!nodes_to_visit.empty() || !probes.empty()) + { + while (!nodes_to_visit.empty() && probes.size() < max_concurrent_probes ) + { + fc::ip::endpoint remote = nodes_to_visit.front(); + nodes_to_visit.pop(); + nodes_to_visit_set.erase( remote ); + nodes_already_visited.insert( remote ); + + probes.emplace_back( std::make_shared(remote) ); + auto& probe = *probes.back(); + fc::async( [&probe, &my_node_id, &my_node_key, &chain_id](){ + probe.start(my_node_id, my_node_key, chain_id); + }); + } + + if( probes.empty() ) + continue; + + std::vector> running; + for ( auto& probe : probes ) + { + if (probe->_probe_complete_promise->error()) + { + continue; + } + if (!probe->_probe_complete_promise->ready()) + { + running.push_back( probe ); + continue; + } + update_info_by_probe(probe); } constexpr uint32_t five = 5; From a08ddd725f52a5a90a553cc732b68443084ac1b5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 03:44:22 +0000 Subject: [PATCH 183/338] Replace non-public addresses with public addresses --- programs/network_mapper/network_mapper.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index 5207c4165c..a18a868a4c 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -245,6 +245,12 @@ int main(int argc, char** argv) // Set it to unknown here, we will check later address_info_by_node_id[info.node_id].firewalled = graphene::net::firewalled_state::unknown; } + else if ( !address_info_by_node_id[info.node_id].remote_endpoint.get_address().is_public_address() + && info.remote_endpoint.get_address().is_public_address() ) + { + // Replace private or local addresses with public addresses when possible + address_info_by_node_id[info.node_id].remote_endpoint = info.remote_endpoint; + } } }; From 52c6649f26e88420c8dccc9be70b3ab1c35bb43b Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 04:02:49 +0000 Subject: [PATCH 184/338] Add comments --- programs/network_mapper/network_mapper.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index a18a868a4c..fc1f354f4d 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -149,6 +149,9 @@ class peer_probe : public graphene::net::peer_connection_delegate void on_connection_closed(graphene::net::peer_connection* originating_peer) override { + // Note: In rare cases, the peer may neither send us an address_message nor close the connection, + // causing us to wait forever. + // We tolerate it, because this program (network_mapper) is not critical. _done = true; _probe_complete_promise->set_value(); } From 812b93538b38f1b63826b4231c98113e202811d4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 04:51:47 +0000 Subject: [PATCH 185/338] Fix a code smell --- programs/network_mapper/network_mapper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index fc1f354f4d..951dbddcb6 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -278,7 +278,7 @@ int main(int argc, char** argv) continue; std::vector> running; - for ( auto& probe : probes ) + for ( const auto& probe : probes ) { if (probe->_probe_complete_promise->error()) { From b4f632944d4d4bc0a895f194951d48c2c545084a Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 11:06:13 +0000 Subject: [PATCH 186/338] Update address records when building by list --- libraries/net/node.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 58fe7581ab..8e2f4bfc86 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -210,8 +210,8 @@ namespace graphene { namespace net { namespace detail { { graphene::net::peer_connection_ptr peer_conn = impl->get_active_connection_for_endpoint( it.remote_endpoint ); - if ( peer_conn != peer_connection_ptr() ) - ret_val.push_back( it ); + if( peer_conn != peer_connection_ptr() ) + ret_val.emplace_back( update_address_record( impl, peer_conn ) ); } reply.addresses = std::move(ret_val); } From a0a7410c11440da9c82d168e8dfd7bf64746a2f6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 12:08:48 +0000 Subject: [PATCH 187/338] Listen to default but not random port by default And * add locking for address builders * refactor some code to fix code smells --- libraries/app/application.cpp | 3 +- .../include/graphene/net/core_messages.hpp | 17 --- libraries/net/include/graphene/net/node.hpp | 11 -- libraries/net/node.cpp | 120 ++++++++++-------- libraries/net/node_impl.hxx | 2 +- 5 files changed, 66 insertions(+), 87 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index ea76f483a5..e2f45a9b73 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -165,8 +165,7 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) if( _options->count("p2p-endpoint") > 0 ) _p2p_network->set_listen_endpoint( fc::ip::endpoint::from_string(_options->at("p2p-endpoint").as()), true ); - else - _p2p_network->set_listen_port(0, false); + // else try to listen on the default port first, if failed, use a random port if ( _options->count("accept-incoming-connections") > 0 ) _p2p_network->set_accept_incoming_connections( _options->at("accept-incoming-connections").as() ); diff --git a/libraries/net/include/graphene/net/core_messages.hpp b/libraries/net/include/graphene/net/core_messages.hpp index 04a08ba94f..bce3fd0522 100644 --- a/libraries/net/include/graphene/net/core_messages.hpp +++ b/libraries/net/include/graphene/net/core_messages.hpp @@ -295,23 +295,6 @@ namespace graphene { namespace net { {} }; - struct address_endpoint_comparator - { - using is_transparent = void; - bool operator()(const address_info& lhs, const address_info& rhs) const - { - return lhs.remote_endpoint < rhs.remote_endpoint; - } - bool operator()(const fc::ip::endpoint in, const address_info& addr) const - { - return in < addr.remote_endpoint; - } - bool operator()(const address_info& addr, const fc::ip::endpoint& in) const - { - return addr.remote_endpoint < in; - } - }; - struct address_message { static const core_message_type_enum type; diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index ed6b75161d..065b4d130d 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -225,17 +225,6 @@ namespace graphene { namespace net { */ virtual void connect_to_endpoint( const fc::ip::endpoint& ep ); - /** - * @brief Helper to convert a string to a collection of endpoints - * - * This converts a string (i.e. "bitshares.eu:665535") to a collection of endpoints. - * NOTE: Throws an exception if not in correct format or was unable to resolve URL. - * - * @param in the incoming string - * @returns a vector of endpoints - */ - static std::vector resolve_string_to_ip_endpoints( const std::string& in ); - /** * Specifies the network interface and port upon which incoming * connections should be accepted. diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 8e2f4bfc86..fc67dbcfb5 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -182,18 +182,7 @@ namespace graphene { namespace net { namespace detail { // ignore fc exceptions (like poorly formatted endpoints) try { - fc::ip::endpoint ep = fc::ip::endpoint::from_string(str); - if (list.find(ep) == list.end() ) - { - graphene::net::address_info tmp( - ep, - fc::time_point_sec(), - fc::microseconds(0), - node_id_t(), - peer_connection_direction::unknown, - firewalled_state::unknown ); - list.insert(tmp); - } + list.insert( fc::ip::endpoint::from_string(str) ); } catch(const fc::exception& ) { @@ -205,11 +194,12 @@ namespace graphene { namespace net { namespace detail { void build(node_impl* impl, address_message& reply) const override { std::vector ret_val; + ret_val.reserve( advertise_list.size() ); // only pass those that are in the list AND we are connected to - for(auto& it : advertise_list) + for( const auto& it : advertise_list ) { - graphene::net::peer_connection_ptr peer_conn - = impl->get_active_connection_for_endpoint( it.remote_endpoint ); + fc::scoped_lock lock(impl->_active_connections.get_mutex()); + graphene::net::peer_connection_ptr peer_conn = impl->get_active_connection_for_endpoint( it ); if( peer_conn != peer_connection_ptr() ) ret_val.emplace_back( update_address_record( impl, peer_conn ) ); } @@ -222,7 +212,7 @@ namespace graphene { namespace net { namespace detail { } private: - std::set advertise_list; + fc::flat_set advertise_list; }; /**** @@ -235,29 +225,37 @@ namespace graphene { namespace net { namespace detail { { FC_ASSERT( !address_list.empty(), "The exclude peer node list must not be empty" ); std::for_each( address_list.begin(), address_list.end(), - [&exclude_list = exclude_list](const std::string& input) + [&exclude_list = exclude_list](const std::string& str) { - exclude_list.insert(input); + // ignore fc exceptions (like poorly formatted endpoints) + try + { + exclude_list.insert( fc::ip::endpoint::from_string(str) ); + } + catch(const fc::exception& ) + { + wlog( "Address ${addr} invalid", ("addr", str) ); + } }); } void build(node_impl* impl, address_message& reply) const override { reply.addresses.clear(); reply.addresses.reserve(impl->_active_connections.size()); + fc::scoped_lock lock(impl->_active_connections.get_mutex()); // filter out those in the exclude list - for(const peer_connection_ptr& active_peer : impl->_active_connections) + for( const peer_connection_ptr& active_peer : impl->_active_connections ) { - if (exclude_list.find( *active_peer->get_remote_endpoint() ) == exclude_list.end()) - reply.addresses.emplace_back(update_address_record(impl, active_peer)); + if( exclude_list.find( *active_peer->get_remote_endpoint() ) == exclude_list.end() ) + reply.addresses.emplace_back( update_address_record( impl, active_peer ) ); } - reply.addresses.shrink_to_fit(); } bool should_advertise( const fc::ip::endpoint& in ) const override { return ( exclude_list.find( in ) == exclude_list.end() ); } private: - fc::flat_set exclude_list; + fc::flat_set exclude_list; }; /*** @@ -269,9 +267,10 @@ namespace graphene { namespace net { namespace detail { { reply.addresses.clear(); reply.addresses.reserve(impl->_active_connections.size()); - for (const peer_connection_ptr& active_peer : impl->_active_connections) + fc::scoped_lock lock(impl->_active_connections.get_mutex()); + for( const peer_connection_ptr& active_peer : impl->_active_connections ) { - reply.addresses.emplace_back(update_address_record(impl, active_peer)); + reply.addresses.emplace_back( update_address_record( impl, active_peer ) ); } } bool should_advertise( const fc::ip::endpoint& in ) const override @@ -3952,6 +3951,7 @@ namespace graphene { namespace net { namespace detail { ilog( "generating new private key for this node" ); _node_configuration.private_key = fc::ecc::private_key::generate(); + save_node_configuration(); } _node_public_key = _node_configuration.private_key.get_public_key().serialize(); @@ -4131,6 +4131,44 @@ namespace graphene { namespace net { namespace detail { resolve_seed_node_and_add( endpoint_string ); } + /** + * @brief Helper to convert a string to a collection of endpoints + * + * This converts a string (i.e. "bitshares.eu:665535") to a collection of endpoints. + * NOTE: Throws an exception if not in correct format or was unable to resolve URL. + * + * @param in the incoming string + * @returns a vector of endpoints + */ + static std::vector resolve_string_to_ip_endpoints(const std::string& in) + { + try + { + std::string::size_type colon_pos = in.find(':'); + if (colon_pos == std::string::npos) + FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", + ("endpoint_string", in)); + std::string port_string = in.substr(colon_pos + 1); + try + { + uint16_t port = boost::lexical_cast(port_string); + + std::string hostname = in.substr(0, colon_pos); + std::vector endpoints = fc::resolve(hostname, port); + if (endpoints.empty()) + FC_THROW_EXCEPTION( fc::unknown_host_exception, + "The host name can not be resolved: ${hostname}", + ("hostname", hostname) ); + return endpoints; + } + catch (const boost::bad_lexical_cast&) + { + FC_THROW("Bad port: ${port}", ("port", port_string)); + } + } + FC_CAPTURE_AND_RETHROW((in)) + } + void node_impl::resolve_seed_node_and_add(const std::string& endpoint_string) { VERIFY_CORRECT_THREAD(); @@ -4138,7 +4176,7 @@ namespace graphene { namespace net { namespace detail { ilog("Resolving seed node ${endpoint}", ("endpoint", endpoint_string)); try { - endpoints = graphene::net::node::resolve_string_to_ip_endpoints(endpoint_string); + endpoints = resolve_string_to_ip_endpoints(endpoint_string); } catch(...) { @@ -5037,36 +5075,6 @@ namespace graphene { namespace net { namespace detail { } // end namespace detail - // TODO move this function to impl class - std::vector node::resolve_string_to_ip_endpoints(const std::string& in) - { - try - { - std::string::size_type colon_pos = in.find(':'); - if (colon_pos == std::string::npos) - FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", - ("endpoint_string", in)); - std::string port_string = in.substr(colon_pos + 1); - try - { - uint16_t port = boost::lexical_cast(port_string); - - std::string hostname = in.substr(0, colon_pos); - std::vector endpoints = fc::resolve(hostname, port); - if (endpoints.empty()) - FC_THROW_EXCEPTION( fc::unknown_host_exception, - "The host name can not be resolved: ${hostname}", - ("hostname", hostname) ); - return endpoints; - } - catch (const boost::bad_lexical_cast&) - { - FC_THROW("Bad port: ${port}", ("port", port_string)); - } - } - FC_CAPTURE_AND_RETHROW((in)) - } - void node::add_seed_nodes(std::vector seeds) { for(const std::string& endpoint_string : seeds ) diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 45f45341f8..825823526b 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -396,7 +396,7 @@ struct node_configuration fc::ip::endpoint listen_endpoint; bool accept_incoming_connections = true; bool connect_to_new_peers = true; - bool wait_if_endpoint_is_busy = true; + bool wait_if_endpoint_is_busy = false; /** * Originally, our p2p code just had a 'node-id' that was a random number identifying this node * on the network. This is now a private key/public key pair, where the public key is used From 53ab60acb52a05ee7934f8eb1a8e2d5966a4602b Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 13:11:01 +0000 Subject: [PATCH 188/338] Simplify code --- libraries/net/node.cpp | 101 ++++++++++++------------------------ libraries/net/node_impl.hxx | 2 +- 2 files changed, 35 insertions(+), 68 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index fc67dbcfb5..48e36797b1 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -167,17 +167,17 @@ namespace graphene { namespace net { namespace detail { active_peer->node_id, active_peer->direction, firewalled_state::unknown); } - /****** - * Use information passed from command line or config file to advertise nodes - */ - class list_address_builder : public node_impl::address_builder + /// Base class for list address builder and exclude_list address builder + class generic_list_address_builder : public node_impl::address_builder { public: - explicit list_address_builder(const std::vector& address_list) + fc::flat_set list; + + explicit generic_list_address_builder(const std::vector& address_list) { - FC_ASSERT( !address_list.empty(), "The advertise peer node list must not be empty" ); + FC_ASSERT( !address_list.empty(), "The peer node list must not be empty" ); - std::for_each( address_list.begin(), address_list.end(), [&list = advertise_list]( const std::string& str ) + std::for_each( address_list.begin(), address_list.end(), [&list = list]( const std::string& str ) { // ignore fc exceptions (like poorly formatted endpoints) try @@ -190,72 +190,36 @@ namespace graphene { namespace net { namespace detail { } } ); } + }; - void build(node_impl* impl, address_message& reply) const override - { - std::vector ret_val; - ret_val.reserve( advertise_list.size() ); - // only pass those that are in the list AND we are connected to - for( const auto& it : advertise_list ) - { - fc::scoped_lock lock(impl->_active_connections.get_mutex()); - graphene::net::peer_connection_ptr peer_conn = impl->get_active_connection_for_endpoint( it ); - if( peer_conn != peer_connection_ptr() ) - ret_val.emplace_back( update_address_record( impl, peer_conn ) ); - } - reply.addresses = std::move(ret_val); - } + /****** + * Use information passed from command line or config file to advertise nodes + */ + class list_address_builder : public generic_list_address_builder + { + public: + explicit list_address_builder(const std::vector& address_list) + : generic_list_address_builder( address_list ) { /* Nothing to do */ } bool should_advertise( const fc::ip::endpoint& in ) const override { - return !( advertise_list.find(in) == advertise_list.end() ); + return !( list.find(in) == list.end() ); } - - private: - fc::flat_set advertise_list; }; /**** * Advertise all nodes except a predefined list */ - class exclude_address_builder : public node_impl::address_builder + class exclude_address_builder : public generic_list_address_builder { public: explicit exclude_address_builder(const std::vector& address_list) - { - FC_ASSERT( !address_list.empty(), "The exclude peer node list must not be empty" ); - std::for_each( address_list.begin(), address_list.end(), - [&exclude_list = exclude_list](const std::string& str) - { - // ignore fc exceptions (like poorly formatted endpoints) - try - { - exclude_list.insert( fc::ip::endpoint::from_string(str) ); - } - catch(const fc::exception& ) - { - wlog( "Address ${addr} invalid", ("addr", str) ); - } - }); - } - void build(node_impl* impl, address_message& reply) const override - { - reply.addresses.clear(); - reply.addresses.reserve(impl->_active_connections.size()); - fc::scoped_lock lock(impl->_active_connections.get_mutex()); - // filter out those in the exclude list - for( const peer_connection_ptr& active_peer : impl->_active_connections ) - { - if( exclude_list.find( *active_peer->get_remote_endpoint() ) == exclude_list.end() ) - reply.addresses.emplace_back( update_address_record( impl, active_peer ) ); - } - } + : generic_list_address_builder( address_list ) { /* Nothing to do */ } + bool should_advertise( const fc::ip::endpoint& in ) const override { - return ( exclude_list.find( in ) == exclude_list.end() ); + return ( list.find( in ) == list.end() ); } - private: - fc::flat_set exclude_list; }; /*** @@ -263,16 +227,6 @@ namespace graphene { namespace net { namespace detail { */ class all_address_builder : public node_impl::address_builder { - void build( node_impl* impl, address_message& reply ) const override - { - reply.addresses.clear(); - reply.addresses.reserve(impl->_active_connections.size()); - fc::scoped_lock lock(impl->_active_connections.get_mutex()); - for( const peer_connection_ptr& active_peer : impl->_active_connections ) - { - reply.addresses.emplace_back( update_address_record( impl, active_peer ) ); - } - } bool should_advertise( const fc::ip::endpoint& in ) const override { return true; @@ -284,6 +238,19 @@ namespace graphene { namespace net { namespace detail { return std::make_shared(); } + void node_impl::address_builder::build(node_impl* impl, address_message& reply) const + { + reply.addresses.clear(); + reply.addresses.reserve( impl->_active_connections.size() ); + fc::scoped_lock lock(impl->_active_connections.get_mutex()); + // only pass those that are allowed to advertise AND we are connected to + for( const peer_connection_ptr& active_peer : impl->_active_connections ) + { + if( should_advertise( *active_peer->get_remote_endpoint() ) ) + reply.addresses.emplace_back( update_address_record( impl, active_peer ) ); + } + } + node_impl::node_impl(const std::string& user_agent) : _user_agent_string(user_agent) { diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 825823526b..d07b73e3aa 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -413,7 +413,7 @@ public: { public: static std::shared_ptr create_default_address_builder(); - virtual void build( node_impl* impl, address_message& ) const = 0; + void build( node_impl* impl, address_message& ) const; virtual bool should_advertise(const fc::ip::endpoint& in ) const = 0; virtual ~address_builder() = default; }; From c56ed0675b4be58729a4bebba3eae30edd6a57ef Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 13:51:28 +0000 Subject: [PATCH 189/338] Only save new or updated address to peer database --- libraries/net/node.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 48e36797b1..06a3c16999 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1295,11 +1295,17 @@ namespace graphene { namespace net { namespace detail { bool new_information_received = false; for (const address_info& address : addresses) { - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); + // Note: if found, a copy is returned + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); + // Note: + // 1. node_id of that peer may have changed, but we don't check or update + // 2. we don't check by node_id either, in case when a peer's IP address has changed, we don't handle it if (address.last_seen_time > updated_peer_record.last_seen_time) - new_information_received = true; - updated_peer_record.last_seen_time = std::max(address.last_seen_time, updated_peer_record.last_seen_time); - _potential_peer_db.update_entry(updated_peer_record); + { + new_information_received = true; + updated_peer_record.last_seen_time = std::max(address.last_seen_time, updated_peer_record.last_seen_time); + _potential_peer_db.update_entry(updated_peer_record); + } } return new_information_received; } From c2c21cdf8379ed49b828de8a05ed6ce915131a1e Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 14 Aug 2022 19:38:44 +0000 Subject: [PATCH 190/338] Fix code smells --- libraries/net/include/graphene/net/node.hpp | 50 ++++++------- libraries/net/node.cpp | 77 ++++++++++----------- libraries/net/node_impl.hxx | 8 +-- tests/common/simulated_network.cpp | 4 +- tests/common/simulated_network.hpp | 13 ++-- 5 files changed, 76 insertions(+), 76 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 065b4d130d..12ae9dfddd 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -192,38 +192,38 @@ namespace graphene { namespace net { explicit node(const std::string& user_agent); virtual ~node(); - void close(); + void close() const; void set_node_delegate( std::shared_ptr del ) const; - void load_configuration( const fc::path& configuration_directory ); + void load_configuration( const fc::path& configuration_directory ) const; - virtual void listen_to_p2p_network(); - virtual void connect_to_p2p_network(); + virtual void listen_to_p2p_network() const; + virtual void connect_to_p2p_network() const; /** * Add endpoint to internal level_map database of potential nodes * to attempt to connect to. This database is consulted any time * the number connected peers falls below the target. */ - void add_node( const fc::ip::endpoint& ep ); + void add_node( const fc::ip::endpoint& ep ) const; /***** * @brief add a list of nodes to seed the p2p network * @param seeds a vector of url strings */ - void add_seed_nodes( std::vector seeds ); + void add_seed_nodes( std::vector seeds ) const; /**** * @brief add a node to seed the p2p network * @param in the url as a string */ - void add_seed_node( const std::string& in); + void add_seed_node( const std::string& in) const; /** * Attempt to connect to the specified endpoint immediately. */ - virtual void connect_to_endpoint( const fc::ip::endpoint& ep ); + virtual void connect_to_endpoint( const fc::ip::endpoint& ep ) const; /** * Specifies the network interface and port upon which incoming @@ -231,7 +231,7 @@ namespace graphene { namespace net { * @param ep the endpoint (network interface and port) * @param wait_if_not_available keep retrying if port is not available */ - void set_listen_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ); + void set_listen_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ) const; /** * Specifies the port upon which incoming connections should be accepted. @@ -241,19 +241,19 @@ namespace graphene { namespace net { * available. If false and the port is not available, * just choose a random available port */ - void set_listen_port( uint16_t port, bool wait_if_not_available ); + void set_listen_port( uint16_t port, bool wait_if_not_available ) const; /** * Enable or disable listening for incoming connections * @param accept set to true to listen for incoming connections, false otherwise */ - void set_accept_incoming_connections( bool accept ); + void set_accept_incoming_connections( bool accept ) const; /*** * Enable or disable connection attempts when new connections are advertised * @param connect true to attempt new connections, false otherwise */ - void set_connect_to_new_peers( bool connect ); + void set_connect_to_new_peers( bool connect ) const; /** * Returns the endpoint the node is listening on. This is usually the same @@ -269,7 +269,7 @@ namespace graphene { namespace net { * advertise (if algo is "list") or exclude (if algo is "exclude_list") */ void set_advertise_algorithm( const std::string& algo, - const std::vector& advertise_or_exclude_list = std::vector() ); + const std::vector& advertise_or_exclude_list = std::vector() ) const; /** * @return a list of peers that are currently connected. @@ -283,8 +283,8 @@ namespace graphene { namespace net { * Add message to outgoing inventory list, notify peers that * I have a message ready. */ - virtual void broadcast( const message& item_to_broadcast ); - virtual void broadcast_transaction( const signed_transaction& trx ) + virtual void broadcast( const message& item_to_broadcast ) const; + virtual void broadcast_transaction( const signed_transaction& trx ) const { broadcast( trx_message(trx) ); } @@ -293,31 +293,33 @@ namespace graphene { namespace net { * Node starts the process of fetching all items after item_id of the * given item_type. During this process messages are not broadcast. */ - virtual void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers); + virtual void sync_from( const item_id& current_head_block, + const std::vector& hard_fork_block_numbers ) const; bool is_connected() const; - void set_advanced_node_parameters(const fc::variant_object& params); - fc::variant_object get_advanced_node_parameters(); - message_propagation_data get_transaction_propagation_data(const graphene::protocol::transaction_id_type& transaction_id); - message_propagation_data get_block_propagation_data(const graphene::protocol::block_id_type& block_id); + void set_advanced_node_parameters(const fc::variant_object& params) const; + fc::variant_object get_advanced_node_parameters() const; + message_propagation_data get_transaction_propagation_data( + const graphene::protocol::transaction_id_type& transaction_id) const; + message_propagation_data get_block_propagation_data(const graphene::protocol::block_id_type& block_id) const; node_id_t get_node_id() const; - void set_allowed_peers(const std::vector& allowed_peers); + void set_allowed_peers(const std::vector& allowed_peers) const; /** * Instructs the node to forget everything in its peer database, mostly for debugging * problems where nodes are failing to connect to the network */ - void clear_peer_database(); + void clear_peer_database() const; - void set_total_bandwidth_limit(uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second); + void set_total_bandwidth_limit(uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second) const; fc::variant_object network_get_info() const; fc::variant_object network_get_usage_stats() const; std::vector get_potential_peers() const; - void disable_peer_advertising(); + void disable_peer_advertising() const; fc::variant_object get_call_statistics() const; protected: node_impl_ptr my; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 06a3c16999..eee96cb8ef 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -198,8 +198,7 @@ namespace graphene { namespace net { namespace detail { class list_address_builder : public generic_list_address_builder { public: - explicit list_address_builder(const std::vector& address_list) - : generic_list_address_builder( address_list ) { /* Nothing to do */ } + using generic_list_address_builder::generic_list_address_builder; bool should_advertise( const fc::ip::endpoint& in ) const override { @@ -213,8 +212,7 @@ namespace graphene { namespace net { namespace detail { class exclude_address_builder : public generic_list_address_builder { public: - explicit exclude_address_builder(const std::vector& address_list) - : generic_list_address_builder( address_list ) { /* Nothing to do */ } + using generic_list_address_builder::generic_list_address_builder; bool should_advertise( const fc::ip::endpoint& in ) const override { @@ -361,7 +359,7 @@ namespace graphene { namespace net { namespace detail { fc::microseconds delay_until_retry = fc::seconds( (iter->number_of_failed_connection_attempts + 1) * _peer_connection_retry_timeout ); - if (!is_connection_to_endpoint_in_progress(iter->endpoint) && + if (!is_connected_to_endpoint(iter->endpoint) && ((iter->last_connection_disposition != last_connection_failed && iter->last_connection_disposition != last_connection_rejected && iter->last_connection_disposition != last_connection_handshaking_failed) || @@ -1627,15 +1625,13 @@ namespace graphene { namespace net { namespace detail { // If they match the IP and port we see, we assume that they're actually on the internet. fc::ip::endpoint peers_actual_outbound_endpoint = originating_peer->get_socket().remote_endpoint(); if( peers_actual_outbound_endpoint.get_address() == originating_peer->inbound_address && - peers_actual_outbound_endpoint.port() == originating_peer->outbound_port ) + peers_actual_outbound_endpoint.port() == originating_peer->outbound_port && + originating_peer->inbound_port != 0 ) { - if( originating_peer->inbound_port != 0 ) - { // add to the peer database fc::ip::endpoint peers_inbound_endpoint(originating_peer->inbound_address, originating_peer->inbound_port); potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(peers_inbound_endpoint); _potential_peer_db.update_entry(updated_peer_record); - } } if (!is_accepting_new_connections()) @@ -1767,10 +1763,10 @@ namespace graphene { namespace net { namespace detail { std::vector updated_addresses = address_message_received.addresses; for (address_info& address : updated_addresses) address.last_seen_time = fc::time_point_sec(fc::time_point::now()); - if ( _node_configuration.connect_to_new_peers ) + if ( _node_configuration.connect_to_new_peers + && merge_address_info_with_potential_peer_database(updated_addresses) ) { - if ( merge_address_info_with_potential_peer_database(updated_addresses) ) - trigger_p2p_network_connect_loop(); + trigger_p2p_network_connect_loop(); } if (_handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) { @@ -4186,7 +4182,7 @@ namespace graphene { namespace net { namespace detail { void node_impl::connect_to_endpoint(const fc::ip::endpoint& remote_endpoint) { VERIFY_CORRECT_THREAD(); - if (is_connection_to_endpoint_in_progress(remote_endpoint)) + if( is_connected_to_endpoint(remote_endpoint) ) FC_THROW_EXCEPTION(already_connected_to_requested_peer, "already connected to requested endpoint ${endpoint}", ("endpoint", remote_endpoint)); @@ -4196,7 +4192,7 @@ namespace graphene { namespace net { namespace detail { initiate_connect_to(new_peer); } - peer_connection_ptr node_impl::get_active_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint) + peer_connection_ptr node_impl::get_active_conn_for_endpoint( const fc::ip::endpoint& remote_endpoint ) const { VERIFY_CORRECT_THREAD(); fc::scoped_lock lock(_active_connections.get_mutex()); @@ -4209,10 +4205,10 @@ namespace graphene { namespace net { namespace detail { return peer_connection_ptr(); } - peer_connection_ptr node_impl::get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ) + peer_connection_ptr node_impl::get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ) const { VERIFY_CORRECT_THREAD(); - peer_connection_ptr active_ptr = get_active_connection_for_endpoint( remote_endpoint ); + peer_connection_ptr active_ptr = get_active_conn_for_endpoint( remote_endpoint ); if ( active_ptr != peer_connection_ptr() ) return active_ptr; fc::scoped_lock lock(_handshaking_connections.get_mutex()); @@ -4225,7 +4221,7 @@ namespace graphene { namespace net { namespace detail { return peer_connection_ptr(); } - bool node_impl::is_connection_to_endpoint_in_progress( const fc::ip::endpoint& remote_endpoint ) + bool node_impl::is_connected_to_endpoint( const fc::ip::endpoint& remote_endpoint ) const { VERIFY_CORRECT_THREAD(); return get_connection_for_endpoint( remote_endpoint ) != peer_connection_ptr(); @@ -4710,47 +4706,47 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(set_node_delegate, del, delegate_thread); } - void node::load_configuration( const fc::path& configuration_directory ) + void node::load_configuration( const fc::path& configuration_directory ) const { INVOKE_IN_IMPL(load_configuration, configuration_directory); } - void node::listen_to_p2p_network() + void node::listen_to_p2p_network() const { INVOKE_IN_IMPL(listen_to_p2p_network); } - void node::connect_to_p2p_network() + void node::connect_to_p2p_network() const { INVOKE_IN_IMPL(connect_to_p2p_network, my); } - void node::add_node( const fc::ip::endpoint& ep ) + void node::add_node( const fc::ip::endpoint& ep ) const { INVOKE_IN_IMPL(add_node, ep); } - void node::connect_to_endpoint( const fc::ip::endpoint& remote_endpoint ) + void node::connect_to_endpoint( const fc::ip::endpoint& remote_endpoint ) const { INVOKE_IN_IMPL(connect_to_endpoint, remote_endpoint); } - void node::set_listen_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available) + void node::set_listen_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available) const { INVOKE_IN_IMPL(set_listen_endpoint, ep, wait_if_not_available); } - void node::set_accept_incoming_connections(bool accept) + void node::set_accept_incoming_connections(bool accept) const { INVOKE_IN_IMPL(set_accept_incoming_connections, accept); } - void node::set_connect_to_new_peers( bool connect ) + void node::set_connect_to_new_peers( bool connect ) const { INVOKE_IN_IMPL( set_connect_to_new_peers, connect ); } - void node::set_listen_port( uint16_t port, bool wait_if_not_available ) + void node::set_listen_port( uint16_t port, bool wait_if_not_available ) const { INVOKE_IN_IMPL(set_listen_port, port, wait_if_not_available); } @@ -4770,12 +4766,12 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(get_connection_count); } - void node::broadcast( const message& msg ) + void node::broadcast( const message& msg ) const { INVOKE_IN_IMPL(broadcast, msg); } - void node::sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers) + void node::sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers) const { INVOKE_IN_IMPL(sync_from, current_head_block, hard_fork_block_numbers); } @@ -4790,22 +4786,23 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(get_potential_peers); } - void node::set_advanced_node_parameters( const fc::variant_object& params ) + void node::set_advanced_node_parameters( const fc::variant_object& params ) const { INVOKE_IN_IMPL(set_advanced_node_parameters, params); } - fc::variant_object node::get_advanced_node_parameters() + fc::variant_object node::get_advanced_node_parameters() const { INVOKE_IN_IMPL(get_advanced_node_parameters); } - message_propagation_data node::get_transaction_propagation_data( const graphene::net::transaction_id_type& transaction_id ) + message_propagation_data node::get_transaction_propagation_data( + const graphene::net::transaction_id_type& transaction_id ) const { INVOKE_IN_IMPL(get_transaction_propagation_data, transaction_id); } - message_propagation_data node::get_block_propagation_data( const graphene::net::block_id_type& block_id ) + message_propagation_data node::get_block_propagation_data( const graphene::net::block_id_type& block_id ) const { INVOKE_IN_IMPL(get_block_propagation_data, block_id); } @@ -4815,23 +4812,23 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(get_node_id); } - void node::set_allowed_peers( const std::vector& allowed_peers ) + void node::set_allowed_peers( const std::vector& allowed_peers ) const { INVOKE_IN_IMPL(set_allowed_peers, allowed_peers); } - void node::clear_peer_database() + void node::clear_peer_database() const { INVOKE_IN_IMPL(clear_peer_database); } void node::set_total_bandwidth_limit(uint32_t upload_bytes_per_second, - uint32_t download_bytes_per_second) + uint32_t download_bytes_per_second) const { INVOKE_IN_IMPL(set_total_bandwidth_limit, upload_bytes_per_second, download_bytes_per_second); } - void node::disable_peer_advertising() + void node::disable_peer_advertising() const { INVOKE_IN_IMPL(disable_peer_advertising); } @@ -4851,7 +4848,7 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(network_get_usage_stats); } - void node::close() + void node::close() const { INVOKE_IN_IMPL(close); } @@ -5048,7 +5045,7 @@ namespace graphene { namespace net { namespace detail { } // end namespace detail - void node::add_seed_nodes(std::vector seeds) + void node::add_seed_nodes(std::vector seeds) const { for(const std::string& endpoint_string : seeds ) { @@ -5061,13 +5058,13 @@ namespace graphene { namespace net { namespace detail { } } - void node::add_seed_node(const std::string& in) + void node::add_seed_node(const std::string& in) const { INVOKE_IN_IMPL(add_seed_node, in); } void node::set_advertise_algorithm( const std::string& algo, - const std::vector& advertise_or_exclude_list ) + const std::vector& advertise_or_exclude_list ) const { INVOKE_IN_IMPL( set_advertise_algorithm, algo, advertise_or_exclude_list ); } diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index d07b73e3aa..73ef103cbc 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -420,7 +420,7 @@ public: #ifdef P2P_IN_DEDICATED_THREAD std::shared_ptr _thread = std::make_shared("p2p"); - std::shared_ptr get_thread() { return _thread; } + std::shared_ptr get_thread() const { return _thread; } #endif // P2P_IN_DEDICATED_THREAD std::unique_ptr _delegate; fc::sha256 _chain_id; @@ -748,7 +748,7 @@ public: void accept_loop(); void send_hello_message(const peer_connection_ptr& peer); void connect_to_task(peer_connection_ptr new_peer, const fc::ip::endpoint& remote_endpoint); - bool is_connection_to_endpoint_in_progress(const fc::ip::endpoint& remote_endpoint); + bool is_connected_to_endpoint(const fc::ip::endpoint& remote_endpoint) const; void move_peer_to_active_list(const peer_connection_ptr& peer); void move_peer_to_closing_list(const peer_connection_ptr& peer); @@ -759,13 +759,13 @@ public: * @param remote_endpoint the address we are interested in * @returns the connection, or peer_connection_ptr() if not found */ - peer_connection_ptr get_active_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ); + peer_connection_ptr get_active_conn_for_endpoint( const fc::ip::endpoint& remote_endpoint ) const; /*** * Look for a connection that is either active or currently in the handshaking process * @param remote_endpoint the address we are interested in * @returns the connection, or peer_connection_ptr() if not found */ - peer_connection_ptr get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ); + peer_connection_ptr get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ) const; void dump_node_status(); diff --git a/tests/common/simulated_network.cpp b/tests/common/simulated_network.cpp index 91557b1f25..084381dd91 100644 --- a/tests/common/simulated_network.cpp +++ b/tests/common/simulated_network.cpp @@ -48,7 +48,7 @@ namespace graphene { namespace net { } } - void simulated_network::message_sender(std::shared_ptr destination_node) + void simulated_network::message_sender(std::shared_ptr destination_node) const { while (!destination_node->messages_to_deliver.empty()) { @@ -74,7 +74,7 @@ namespace graphene { namespace net { } } - void simulated_network::broadcast( const message& item_to_broadcast ) + void simulated_network::broadcast( const message& item_to_broadcast ) const { for (auto network_node_info : network_nodes) { diff --git a/tests/common/simulated_network.hpp b/tests/common/simulated_network.hpp index 85f59e213a..f093c1d41a 100644 --- a/tests/common/simulated_network.hpp +++ b/tests/common/simulated_network.hpp @@ -34,20 +34,21 @@ class simulated_network : public node public: ~simulated_network() override; explicit simulated_network(const std::string& user_agent) : node(user_agent) {} - void listen_to_p2p_network() override {} - void connect_to_p2p_network() override {} - void connect_to_endpoint(const fc::ip::endpoint& ep) override {} + void listen_to_p2p_network() const override {} + void connect_to_p2p_network() const override {} + void connect_to_endpoint(const fc::ip::endpoint& ep) const override {} fc::ip::endpoint get_actual_listening_endpoint() const override { return fc::ip::endpoint(); } - void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers) override {} - void broadcast(const message& item_to_broadcast) override; + void sync_from(const item_id& current_head_block, + const std::vector& hard_fork_block_numbers) const override {} + void broadcast(const message& item_to_broadcast) const override; void add_node_delegate(std::shared_ptr node_delegate_to_add); uint32_t get_connection_count() const override { return 8; } private: struct node_info; - void message_sender(std::shared_ptr destination_node); + void message_sender(std::shared_ptr destination_node) const; std::list> network_nodes; }; From 17bb77a5794b004af77d9867fc81e24a40d09055 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 15 Aug 2022 07:09:33 +0000 Subject: [PATCH 191/338] Fix code smells --- libraries/net/include/graphene/net/node.hpp | 7 ++++--- libraries/net/node.cpp | 14 ++++++++------ libraries/net/node_impl.hxx | 11 +++++++---- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 12ae9dfddd..b71b5bb4d4 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -212,7 +212,7 @@ namespace graphene { namespace net { * @brief add a list of nodes to seed the p2p network * @param seeds a vector of url strings */ - void add_seed_nodes( std::vector seeds ) const; + void add_seed_nodes( const std::vector& seeds ) const; /**** * @brief add a node to seed the p2p network @@ -300,9 +300,10 @@ namespace graphene { namespace net { void set_advanced_node_parameters(const fc::variant_object& params) const; fc::variant_object get_advanced_node_parameters() const; - message_propagation_data get_transaction_propagation_data( + message_propagation_data get_tx_propagation_data( const graphene::protocol::transaction_id_type& transaction_id) const; - message_propagation_data get_block_propagation_data(const graphene::protocol::block_id_type& block_id) const; + message_propagation_data get_block_propagation_data( + const graphene::protocol::block_id_type& block_id) const; node_id_t get_node_id() const; void set_allowed_peers(const std::vector& allowed_peers) const; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index eee96cb8ef..23270ad3e7 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1673,7 +1673,7 @@ namespace graphene { namespace net { namespace detail { } void node_impl::on_connection_accepted_message( peer_connection* originating_peer, - const connection_accepted_message& ) + const connection_accepted_message& ) const { VERIFY_CORRECT_THREAD(); dlog("Received a connection_accepted in response to my \"hello\" from ${peer}", @@ -4563,13 +4563,15 @@ namespace graphene { namespace net { namespace detail { return result; } - message_propagation_data node_impl::get_transaction_propagation_data( const graphene::net::transaction_id_type& transaction_id ) + message_propagation_data node_impl::get_tx_propagation_data( + const graphene::net::transaction_id_type& transaction_id ) const { VERIFY_CORRECT_THREAD(); return _message_cache.get_message_propagation_data( transaction_id ); } - message_propagation_data node_impl::get_block_propagation_data( const graphene::net::block_id_type& block_id ) + message_propagation_data node_impl::get_block_propagation_data( + const graphene::net::block_id_type& block_id ) const { VERIFY_CORRECT_THREAD(); return _message_cache.get_message_propagation_data( block_id ); @@ -4796,10 +4798,10 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(get_advanced_node_parameters); } - message_propagation_data node::get_transaction_propagation_data( + message_propagation_data node::get_tx_propagation_data( const graphene::net::transaction_id_type& transaction_id ) const { - INVOKE_IN_IMPL(get_transaction_propagation_data, transaction_id); + INVOKE_IN_IMPL(get_tx_propagation_data, transaction_id); } message_propagation_data node::get_block_propagation_data( const graphene::net::block_id_type& block_id ) const @@ -5045,7 +5047,7 @@ namespace graphene { namespace net { namespace detail { } // end namespace detail - void node::add_seed_nodes(std::vector seeds) const + void node::add_seed_nodes(const std::vector& seeds) const { for(const std::string& endpoint_string : seeds ) { diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 73ef103cbc..484b495ae5 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -679,7 +679,8 @@ public: void on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ); - void on_connection_accepted_message( peer_connection* originating_peer, const connection_accepted_message& ); + void on_connection_accepted_message( peer_connection* originating_peer, + const connection_accepted_message& ) const; void on_connection_rejected_message( peer_connection* originating_peer, const connection_rejected_message& connection_rejected_message_received ); @@ -806,13 +807,15 @@ public: void set_advanced_node_parameters( const fc::variant_object& params ); fc::variant_object get_advanced_node_parameters(); - message_propagation_data get_transaction_propagation_data( const graphene::net::transaction_id_type& transaction_id ); - message_propagation_data get_block_propagation_data( const graphene::net::block_id_type& block_id ); + message_propagation_data get_tx_propagation_data( + const graphene::net::transaction_id_type& transaction_id ) const; + message_propagation_data get_block_propagation_data( const graphene::net::block_id_type& block_id ) const; node_id_t get_node_id() const; void set_allowed_peers( const std::vector& allowed_peers ); void clear_peer_database(); - void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second ); + void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, + uint32_t download_bytes_per_second ); void disable_peer_advertising(); fc::variant_object get_call_statistics() const; graphene::net::message get_message_for_item(const item_id& item) override; From dfd33c6bfbfd53c2b9b18907830c199be172c831 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 17 Aug 2022 09:30:59 +0000 Subject: [PATCH 192/338] Re-add peer_connection::is_firewalled and ... * halve number_of_failed_connection_attempts on successful connection, * remove code related to remote firewall check --- .../include/graphene/net/peer_connection.hpp | 1 + libraries/net/node.cpp | 121 +++++++++--------- 2 files changed, 59 insertions(+), 63 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 6d56340807..91db8ecc28 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -171,6 +171,7 @@ namespace graphene { namespace net fc::time_point connection_closed_time; fc::time_point connection_terminated_time; peer_connection_direction direction = peer_connection_direction::unknown; + firewalled_state is_firewalled = firewalled_state::unknown; fc::microseconds clock_offset; fc::microseconds round_trip_delay; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 23270ad3e7..f551af7619 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -163,8 +163,8 @@ namespace graphene { namespace net { namespace detail { impl->_potential_peer_db.update_entry(*updated_peer_record); } - return address_info(*active_peer->get_remote_endpoint(), fc::time_point::now(), active_peer->round_trip_delay, - active_peer->node_id, active_peer->direction, firewalled_state::unknown); + return address_info( *active_peer->get_remote_endpoint(), fc::time_point::now(), active_peer->round_trip_delay, + active_peer->node_id, active_peer->direction, active_peer->is_firewalled ); } /// Base class for list address builder and exclude_list address builder @@ -359,11 +359,13 @@ namespace graphene { namespace net { namespace detail { fc::microseconds delay_until_retry = fc::seconds( (iter->number_of_failed_connection_attempts + 1) * _peer_connection_retry_timeout ); - if (!is_connected_to_endpoint(iter->endpoint) && - ((iter->last_connection_disposition != last_connection_failed && - iter->last_connection_disposition != last_connection_rejected && - iter->last_connection_disposition != last_connection_handshaking_failed) || - (fc::time_point::now() - iter->last_connection_attempt_time) > delay_until_retry)) + bool last_connection_not_ok = ( iter->last_connection_disposition == last_connection_failed || + iter->last_connection_disposition == last_connection_rejected || + iter->last_connection_disposition == last_connection_handshaking_failed ); + + if( !is_connected_to_endpoint( iter->endpoint ) + && ( !last_connection_not_ok + || ( fc::time_point::now() - iter->last_connection_attempt_time ) > delay_until_retry ) ) { connect_to_endpoint(iter->endpoint); initiated_connection_this_pass = true; @@ -1298,6 +1300,8 @@ namespace graphene { namespace net { namespace detail { // Note: // 1. node_id of that peer may have changed, but we don't check or update // 2. we don't check by node_id either, in case when a peer's IP address has changed, we don't handle it + // 3. no matter if the address is reported as firewalled or not, + // we add it to our database and check by ourselves later if (address.last_seen_time > updated_peer_record.last_seen_time) { new_information_received = true; @@ -1624,14 +1628,30 @@ namespace graphene { namespace net { namespace detail { // in the hello message, the peer sent us the IP address and port it thought it was connecting from. // If they match the IP and port we see, we assume that they're actually on the internet. fc::ip::endpoint peers_actual_outbound_endpoint = originating_peer->get_socket().remote_endpoint(); - if( peers_actual_outbound_endpoint.get_address() == originating_peer->inbound_address && - peers_actual_outbound_endpoint.port() == originating_peer->outbound_port && - originating_peer->inbound_port != 0 ) + if( 0 == originating_peer->inbound_port ) + { + dlog( "peer did not give an inbound port so I'm treating them as if they are firewalled." ); + originating_peer->is_firewalled = firewalled_state::firewalled; + } + else if( peers_actual_outbound_endpoint.get_address() == originating_peer->inbound_address && + peers_actual_outbound_endpoint.port() == originating_peer->outbound_port ) { // add to the peer database - fc::ip::endpoint peers_inbound_endpoint(originating_peer->inbound_address, originating_peer->inbound_port); - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(peers_inbound_endpoint); - _potential_peer_db.update_entry(updated_peer_record); + fc::ip::endpoint peers_inbound_endpoint( originating_peer->inbound_address, + originating_peer->inbound_port ); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint( + peers_inbound_endpoint ); + _potential_peer_db.update_entry( updated_peer_record ); + originating_peer->is_firewalled = firewalled_state::not_firewalled; + } + else + { + dlog("peer is firewalled: they think their outbound endpoint is ${reported_endpoint}, " + "but I see it as ${actual_endpoint}", + ("reported_endpoint", fc::ip::endpoint( originating_peer->inbound_address, + originating_peer->outbound_port )) + ("actual_endpoint", peers_actual_outbound_endpoint)); + originating_peer->is_firewalled = firewalled_state::firewalled; } if (!is_accepting_new_connections()) @@ -1787,6 +1807,9 @@ namespace graphene { namespace net { namespace detail { if (updated_peer_record) { updated_peer_record->last_connection_disposition = last_connection_succeeded; + // halve number_of_failed_connection_attempts + constexpr uint16_t two = 2; + updated_peer_record->number_of_failed_connection_attempts /= two; _potential_peer_db.update_entry(*updated_peer_record); } } @@ -1900,14 +1923,18 @@ namespace graphene { namespace net { namespace detail { _handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) { // handshaking is done, move the connection to fully active status and start synchronizing - dlog("peer ${endpoint} which was handshaking with us has started synchronizing with us, start syncing with it", + dlog("peer ${endpoint} which was handshaking with us has started synchronizing with us, " + "start syncing with it", ("endpoint", originating_peer->get_remote_endpoint())); fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); if (inbound_endpoint) { // mark the connection as successful in the database - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(*inbound_endpoint); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(*inbound_endpoint); updated_peer_record.last_connection_disposition = last_connection_succeeded; + // halve number_of_failed_connection_attempts + constexpr uint16_t two = 2; + updated_peer_record.number_of_failed_connection_attempts /= two; _potential_peer_db.update_entry(updated_peer_record); } @@ -3736,8 +3763,8 @@ namespace graphene { namespace net { namespace detail { // The peer we're connecting to will assume we're firewalled if the // ip address and outbound port we send don't match the values it sees on its remote endpoint // - // if we know that we're behind a NAT that will allow incoming connections because our firewall - // detection figured it out, send those values instead. + // Note: we no longer perform remote firewall check, thus we don't know if we're behind a NAT that + // will allow incoming connections. fc::ip::endpoint local_endpoint(peer->get_socket().local_endpoint()); uint16_t listening_port = _node_configuration.accept_incoming_connections ? @@ -3761,21 +3788,12 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); - // TODO: Review code below. Since we no longer perform firewall check, code can be simplified - if (!new_peer->performing_firewall_check()) - { - // create or find the database entry for the new peer - // if we're connecting to them, we believe they're not firewalled - potential_peer_record updated_peer_record + // create or find the database entry for the new peer + potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); - updated_peer_record.last_connection_disposition = last_connection_failed; - updated_peer_record.last_connection_attempt_time = fc::time_point::now();; - _potential_peer_db.update_entry(updated_peer_record); - } - else - { - ilog("connecting to peer ${peer} for firewall check", ("peer", new_peer->get_remote_endpoint())); - } + updated_peer_record.last_connection_disposition = last_connection_failed; + updated_peer_record.last_connection_attempt_time = fc::time_point::now();; + _potential_peer_db.update_entry(updated_peer_record); fc::oexception connect_failed_exception; @@ -3784,6 +3802,9 @@ namespace graphene { namespace net { namespace detail { // blocks until the connection is established and secure connection is negotiated new_peer->connect_to(remote_endpoint, _actual_listening_endpoint); + // we connected to the peer. guess they're not firewalled.... + new_peer->is_firewalled = firewalled_state::not_firewalled; + // connection succeeded, we've started handshaking. record that in our database potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); @@ -3797,7 +3818,7 @@ namespace graphene { namespace net { namespace detail { connect_failed_exception = except; } - if (connect_failed_exception && !new_peer->performing_firewall_check()) + if( connect_failed_exception ) { // connection failed. record that in our database potential_peer_record updated_peer_record @@ -3809,34 +3830,8 @@ namespace graphene { namespace net { namespace detail { else updated_peer_record.last_error = *connect_failed_exception; _potential_peer_db.update_entry(updated_peer_record); - } - if (new_peer->performing_firewall_check()) - { - // we were connecting to test whether the node is firewalled, and we now know the result. - // send a message back to the requester - peer_connection_ptr requesting_peer = get_peer_by_node_id(new_peer->firewall_check_state->requesting_peer); - if (requesting_peer) - { - check_firewall_reply_message reply; - reply.endpoint_checked = new_peer->firewall_check_state->endpoint_to_test; - reply.node_id = new_peer->firewall_check_state->expected_node_id; - reply.result = connect_failed_exception ? - firewall_check_result::unable_to_connect : - firewall_check_result::connection_successful; - ilog("firewall check of ${peer_checked} ${success_or_failure}, sending reply to ${requester}", - ("peer_checked", new_peer->get_remote_endpoint()) - ("success_or_failure", connect_failed_exception ? "failed" : "succeeded" ) - ("requester", requesting_peer->get_remote_endpoint())); - - requesting_peer->send_message(reply); - } - } - - if (connect_failed_exception || new_peer->performing_firewall_check()) - { - // if the connection failed or if this connection was just intended to check - // whether the peer is firewalled, we want to disconnect now. + // if the connection failed, we want to disconnect now. _handshaking_connections.erase(new_peer); _terminating_connections.erase(new_peer); assert(_active_connections.find(new_peer) == _active_connections.end()); @@ -3848,15 +3843,15 @@ namespace graphene { namespace net { namespace detail { trigger_p2p_network_connect_loop(); schedule_peer_for_deletion(new_peer); - if (connect_failed_exception) - throw *connect_failed_exception; + throw *connect_failed_exception; } else { // connection was successful and we want to stay connected fc::ip::endpoint local_endpoint = new_peer->get_local_endpoint(); new_peer->inbound_address = local_endpoint.get_address(); - new_peer->inbound_port = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint.port() : 0; + new_peer->inbound_port = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint.port() + : 0; new_peer->outbound_port = local_endpoint.port(); new_peer->our_state = peer_connection::our_connection_state::just_connected; @@ -4417,7 +4412,7 @@ namespace graphene { namespace net { namespace detail { peer_details["version"] = ""; peer_details["subver"] = peer->user_agent; peer_details["inbound"] = peer->direction == peer_connection_direction::inbound; - peer_details["firewall_status"] = (fc::enum_type)firewalled_state::unknown; + peer_details["firewall_status"] = fc::variant( peer->is_firewalled, 1 ); peer_details["startingheight"] = ""; peer_details["banscore"] = ""; peer_details["syncnode"] = ""; From f0277418690aa91cbfddc98ff541f60b92dc1ee5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 26 Aug 2022 14:32:39 +0000 Subject: [PATCH 193/338] Add "p2p-inbound-endpoint" node startup option --- libraries/app/application.cpp | 11 +- libraries/net/include/graphene/net/node.hpp | 29 +- .../include/graphene/net/peer_connection.hpp | 11 +- libraries/net/node.cpp | 375 ++++++++++-------- libraries/net/node_impl.hxx | 4 +- libraries/net/peer_connection.cpp | 11 +- 6 files changed, 258 insertions(+), 183 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index e2f45a9b73..7d605f143d 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -167,6 +167,10 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) true ); // else try to listen on the default port first, if failed, use a random port + if( _options->count("p2p-inbound-endpoint") > 0 ) + _p2p_network->set_inbound_endpoint( fc::ip::endpoint::from_string(_options->at("p2p-inbound-endpoint") + .as()) ); + if ( _options->count("accept-incoming-connections") > 0 ) _p2p_network->set_accept_incoming_connections( _options->at("accept-incoming-connections").as() ); @@ -1175,7 +1179,12 @@ void application::set_program_options(boost::program_options::options_descriptio ("enable-p2p-network", bpo::value()->implicit_value(true), "Whether to enable P2P network. Note: if delayed_node plugin is enabled, " "this option will be ignored and P2P network will always be disabled.") - ("p2p-endpoint", bpo::value(), "Endpoint for P2P node to listen on") + ("p2p-endpoint", bpo::value(), + "Endpoint (local IP address:port) for P2P node to listen on. " + "Specify 0.0.0.0 as address to listen on all IP addresses") + ("p2p-inbound-endpoint", bpo::value(), + "Endpoint (external IP address:port) that other peers should connect to. " + "If the address is unknown or dynamic, specify 0.0.0.0") ("accept-incoming-connections", bpo::value()->implicit_value(true), "Whether to accept incoming connections") ("connect-to-new-peers", bpo::value()->implicit_value(true), diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index b71b5bb4d4..592546582f 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -225,23 +225,20 @@ namespace graphene { namespace net { */ virtual void connect_to_endpoint( const fc::ip::endpoint& ep ) const; - /** - * Specifies the network interface and port upon which incoming - * connections should be accepted. - * @param ep the endpoint (network interface and port) - * @param wait_if_not_available keep retrying if port is not available - */ - void set_listen_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ) const; + /** + * Specifies the IP address and port on the "local machine" that should accept incoming connections. + * @note To listen on all IPv4 addresses on the local machine, specify 0.0.0.0 as the address. + * @param ep the endpoint (IP address and port) + * @param wait_if_not_available keep retrying if port is not available + */ + void set_listen_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ) const; - /** - * Specifies the port upon which incoming connections should be accepted. - * @param port the port to listen on - * @param wait_if_not_available if true and the port is not available, enter a - * sleep and retry loop to wait for it to become - * available. If false and the port is not available, - * just choose a random available port - */ - void set_listen_port( uint16_t port, bool wait_if_not_available ) const; + /** + * Specifies the IP address and port on the "external network" which other peers should connect to. + * @note If the address is unknown (E.G. dynamically allocated), specify 0.0.0.0 as the address. + * @param ep the endpoint (IP address and port) + */ + void set_inbound_endpoint( const fc::ip::endpoint& ep ) const; /** * Enable or disable listening for incoming connections diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 91db8ecc28..972ef8dd6c 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -204,12 +204,14 @@ namespace graphene { namespace net fc::optional platform; fc::optional bitness; - // for inbound connections, these fields record what the peer sent us in - // its hello message. For outbound, they record what we sent the peer - // in our hello message + // Initially, these fields record info about our local socket, + // they are useless (except the remote_inbound_endpoint field for outbound connections). + // After we receive a hello message, they are replaced with the info in the hello message. fc::ip::address inbound_address; uint16_t inbound_port = 0; uint16_t outbound_port = 0; + /// The inbound endpoint of the remote peer + fc::optional remote_inbound_endpoint; /// @} typedef std::unordered_map item_to_time_map_type; @@ -277,7 +279,8 @@ namespace graphene { namespace net fc::tcp_socket& get_socket(); void accept_connection(); - void connect_to(const fc::ip::endpoint& remote_endpoint, fc::optional local_endpoint = fc::optional()); + void connect_to(const fc::ip::endpoint& remote_endpoint, + const fc::optional& local_endpoint = fc::optional()); void on_message(message_oriented_connection* originating_connection, const message& received_message) override; void on_connection_closed(message_oriented_connection* originating_connection) override; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index f551af7619..b7228a1b74 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -152,19 +152,23 @@ namespace graphene { namespace net { namespace detail { # define VERIFY_CORRECT_THREAD() do {} while (0) #endif - static address_info update_address_record( node_impl* impl, const peer_connection_ptr& active_peer ) + static void update_address_seen_time( node_impl* impl, const peer_connection* active_peer ) { - fc::optional updated_peer_record = - impl->_potential_peer_db.lookup_entry_for_endpoint(*active_peer->get_remote_endpoint()); - - if (updated_peer_record) + fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); + if( inbound_endpoint.valid() && inbound_endpoint->port() != 0 ) { - updated_peer_record->last_seen_time = fc::time_point::now(); - impl->_potential_peer_db.update_entry(*updated_peer_record); + fc::optional updated_peer_record + = impl->_potential_peer_db.lookup_entry_for_endpoint( *inbound_endpoint ); + if( updated_peer_record ) + { + updated_peer_record->last_seen_time = fc::time_point::now(); + impl->_potential_peer_db.update_entry( *updated_peer_record ); + } } - - return address_info( *active_peer->get_remote_endpoint(), fc::time_point::now(), active_peer->round_trip_delay, - active_peer->node_id, active_peer->direction, active_peer->is_firewalled ); + } + static void update_address_seen_time( node_impl* impl, const peer_connection_ptr& active_peer ) + { + update_address_seen_time( impl, active_peer.get() ); } /// Base class for list address builder and exclude_list address builder @@ -241,11 +245,29 @@ namespace graphene { namespace net { namespace detail { reply.addresses.clear(); reply.addresses.reserve( impl->_active_connections.size() ); fc::scoped_lock lock(impl->_active_connections.get_mutex()); + fc::time_point_sec now = fc::time_point::now(); // only pass those that are allowed to advertise AND we are connected to for( const peer_connection_ptr& active_peer : impl->_active_connections ) { - if( should_advertise( *active_peer->get_remote_endpoint() ) ) - reply.addresses.emplace_back( update_address_record( impl, active_peer ) ); + // Note: + // * We want to advertise the peer's inbound endpoint, but not necessarily the remote endpoint. + // * If the peer's inbound port is 0, we still advertise it so that observers know about it. + // The peer is marked as "firewalled", so peers running older versions should be able to handle it too. + // + // If it is an outbound connection, we know that the remote endpoint works (at least for us), + // and we have assigned it to the inbound endpoint, so just use either of them. + // If it is an inbound connection, we just advertise what we have. + fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); + if( inbound_endpoint.valid() && should_advertise( *inbound_endpoint ) ) + { + update_address_seen_time( impl, active_peer ); + reply.addresses.emplace_back( *inbound_endpoint, + now, + active_peer->round_trip_delay, + active_peer->node_id, + active_peer->direction, + active_peer->is_firewalled ); + } } } @@ -266,17 +288,7 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& active_peer : _active_connections) { - fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - fc::optional updated_peer_record = _potential_peer_db - .lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - } + update_address_seen_time( this, active_peer ); } } @@ -338,8 +350,7 @@ namespace graphene { namespace net { namespace detail { } for (const potential_peer_record& add_once_peer : add_once_node_list) { - // see if we have an existing connection to that peer. If we do, disconnect them and - // then try to connect the next time through the loop + // If we have an existing connection to that peer, skip it. peer_connection_ptr existing_connection_ptr = get_connection_for_endpoint( add_once_peer.endpoint ); if(!existing_connection_ptr) connect_to_endpoint(add_once_peer.endpoint); @@ -1295,17 +1306,21 @@ namespace graphene { namespace net { namespace detail { bool new_information_received = false; for (const address_info& address : addresses) { + // If the peer's inbound port is 0, we don't add it to our peer database + if( 0 == address.remote_endpoint.port() ) + continue; // Note: if found, a copy is returned auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); // Note: // 1. node_id of that peer may have changed, but we don't check or update // 2. we don't check by node_id either, in case when a peer's IP address has changed, we don't handle it - // 3. no matter if the address is reported as firewalled or not, + // 3. if the peer's inbound port is not 0, no matter if the address is reported as firewalled or not, // we add it to our database and check by ourselves later - if (address.last_seen_time > updated_peer_record.last_seen_time) + if (address.last_seen_time > updated_peer_record.last_seen_time) // usually true, except when received from + // multiple peers in the same second { new_information_received = true; - updated_peer_record.last_seen_time = std::max(address.last_seen_time, updated_peer_record.last_seen_time); + updated_peer_record.last_seen_time = address.last_seen_time; _potential_peer_db.update_entry(updated_peer_record); } } @@ -1510,6 +1525,14 @@ namespace graphene { namespace net { namespace detail { parse_hello_user_data_for_peer(originating_peer, hello_message_received.user_data); + // For an outbound connection, we know the remote_inbound_endpoint already, so keep it unchanged. + // For an inbound connection, we initialize it here. // TODO or init later, after verified? + if( !originating_peer->remote_inbound_endpoint ) + { + originating_peer->remote_inbound_endpoint = fc::ip::endpoint( originating_peer->inbound_address, + originating_peer->inbound_port ); + } + // if they didn't provide a last known fork, try to guess it if (originating_peer->last_known_fork_block_number == 0 && originating_peer->graphene_git_revision_unix_timestamp) @@ -1519,10 +1542,26 @@ namespace graphene { namespace net { namespace detail { } // now decide what to do with it - if (originating_peer->their_state == peer_connection::their_connection_state::just_connected) + if( originating_peer->their_state != peer_connection::their_connection_state::just_connected ) + { + // we can wind up here if we've connected to ourselves, and the source and + // destination endpoints are the same, causing messages we send out + // to arrive back on the initiating socket instead of the receiving + // socket. If we did a complete job of enumerating local addresses, + // we could avoid directly connecting to ourselves, or at least detect + // immediately when we did it and disconnect. + + // The only way I know of that we'd get an unexpected hello that we + // can't really guard against is if we do a simulatenous open, we + // probably need to think through that case. We're not attempting that + // yet, though, so it's ok to just disconnect here. + wlog("unexpected hello_message from peer, disconnecting"); + disconnect_from_peer(originating_peer, "Received an unexpected hello_message"); + return; + } + + if( hello_message_received.node_public_key != expected_node_public_key.serialize() ) { - if (hello_message_received.node_public_key != expected_node_public_key.serialize()) - { wlog("Invalid signature in hello message from peer ${peer}", ("peer", originating_peer->get_remote_endpoint())); std::string rejection_message("Invalid signature in hello message"); connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, @@ -1535,9 +1574,10 @@ namespace graphene { namespace net { namespace detail { // for this type of message, we're immediately disconnecting this peer disconnect_from_peer( originating_peer, "Invalid signature in hello message" ); return; - } - if (hello_message_received.chain_id != _chain_id) - { + } + + if( hello_message_received.chain_id != _chain_id ) + { wlog("Received hello message from peer on a different chain: ${message}", ("message", hello_message_received)); std::ostringstream rejection_message; rejection_message << "You're on a different chain than I am. I'm on " << _chain_id.str() << @@ -1554,9 +1594,10 @@ namespace graphene { namespace net { namespace detail { // benefit of sharing them) disconnect_from_peer(originating_peer, "You are on a different chain from me"); return; - } - if (originating_peer->last_known_fork_block_number != 0) - { + } + + if (originating_peer->last_known_fork_block_number != 0) + { uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(originating_peer->last_known_fork_block_number); if (next_fork_block_number != 0) { @@ -1586,10 +1627,10 @@ namespace graphene { namespace net { namespace detail { return; } } - } + } + if (already_connected_to_this_peer) { - connection_rejected_message connection_rejected; if (_node_id == originating_peer->node_id) connection_rejected = connection_rejected_message(_user_agent_string, core_protocol_version, @@ -1597,6 +1638,7 @@ namespace graphene { namespace net { namespace detail { rejection_reason_code::connected_to_self, "I'm connecting to myself"); else + // TODO if it is an outbound connection, update the existing connection's inbound_endpoint connection_rejected = connection_rejected_message(_user_agent_string, core_protocol_version, originating_peer->get_socket().remote_endpoint(), rejection_reason_code::already_connected, @@ -1624,34 +1666,40 @@ namespace graphene { namespace net { namespace detail { { // whether we're planning on accepting them as a peer or not, they seem to be a valid node, // so add them to our database if they're not firewalled - - // in the hello message, the peer sent us the IP address and port it thought it was connecting from. - // If they match the IP and port we see, we assume that they're actually on the internet. - fc::ip::endpoint peers_actual_outbound_endpoint = originating_peer->get_socket().remote_endpoint(); - if( 0 == originating_peer->inbound_port ) + if( peer_connection_direction::outbound == originating_peer->direction ) { - dlog( "peer did not give an inbound port so I'm treating them as if they are firewalled." ); - originating_peer->is_firewalled = firewalled_state::firewalled; + // For outbound connection, we already know the peer is not firewalled, + // and it should be already in the peer database. Do nothing here. } - else if( peers_actual_outbound_endpoint.get_address() == originating_peer->inbound_address && - peers_actual_outbound_endpoint.port() == originating_peer->outbound_port ) + else if( 0 == originating_peer->inbound_port ) { - // add to the peer database - fc::ip::endpoint peers_inbound_endpoint( originating_peer->inbound_address, - originating_peer->inbound_port ); - auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint( - peers_inbound_endpoint ); - _potential_peer_db.update_entry( updated_peer_record ); - originating_peer->is_firewalled = firewalled_state::not_firewalled; + dlog( "peer did not give an inbound port so I'm treating them as if they are firewalled." ); + originating_peer->is_firewalled = firewalled_state::firewalled; } else { - dlog("peer is firewalled: they think their outbound endpoint is ${reported_endpoint}, " - "but I see it as ${actual_endpoint}", - ("reported_endpoint", fc::ip::endpoint( originating_peer->inbound_address, - originating_peer->outbound_port )) - ("actual_endpoint", peers_actual_outbound_endpoint)); - originating_peer->is_firewalled = firewalled_state::firewalled; + // Note: no matter how we guess, we end up adding these to our peer database and trying to connect later. + + // First, we add the inbound endpoint that the peer told us it is listening on. + fc::flat_set endpoints_to_save; + endpoints_to_save.insert( *originating_peer->get_endpoint_for_connecting() ); + + // Second, we add the address we see, with the inbound port the peer told us. + // It might be the same as above, but that's OK. + fc::ip::endpoint peers_actual_outbound_endpoint = originating_peer->get_socket().remote_endpoint(); + endpoints_to_save.insert( fc::ip::endpoint( peers_actual_outbound_endpoint.get_address(), + originating_peer->inbound_port ) ); + + for( const auto& ep : endpoints_to_save ) + { + // add to the peer database + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint( ep ); + updated_peer_record.last_seen_time = fc::time_point::now(); + _potential_peer_db.update_entry( updated_peer_record ); + } + + // Note: we don't update originating_peer->is_firewalled, because we might guess wrong + } if (!is_accepting_new_connections()) @@ -1673,23 +1721,6 @@ namespace graphene { namespace net { namespace detail { ("peer", originating_peer->get_remote_endpoint())); } } - } - else - { - // we can wind up here if we've connected to ourself, and the source and - // destination endpoints are the same, causing messages we send out - // to arrive back on the initiating socket instead of the receiving - // socket. If we did a complete job of enumerating local addresses, - // we could avoid directly connecting to ourselves, or at least detect - // immediately when we did it and disconnect. - - // The only way I know of that we'd get an unexpected hello that we - // can't really guard against is if we do a simulatenous open, we - // probably need to think through that case. We're not attempting that - // yet, though, so it's ok to just disconnect here. - wlog("unexpected hello_message from peer, disconnecting"); - disconnect_from_peer(originating_peer, "Received a unexpected hello_message"); - } } void node_impl::on_connection_accepted_message( peer_connection* originating_peer, @@ -1712,28 +1743,40 @@ namespace graphene { namespace net { namespace detail { ("peer", originating_peer->get_remote_endpoint()) ("reason", connection_rejected_message_received.reason_string)); - if (connection_rejected_message_received.reason_code == rejection_reason_code::connected_to_self) + originating_peer->negotiation_status = peer_connection::connection_negotiation_status + ::peer_connection_rejected; + originating_peer->our_state = peer_connection::our_connection_state::connection_rejected; + + if( connection_rejected_message_received.reason_code == rejection_reason_code::connected_to_self + || connection_rejected_message_received.reason_code == rejection_reason_code::different_chain ) { + // Using remote_endpoint here for an outbound connection is OK. + // For an inbound connection, we should have not saved anything to the peer database yet, nor we will + // save anything (it would be weird if they rejected us but we didn't reject them), + // so using remote_endpoint here at least won't do anything bad. + // Note: we should not erase data by the peer's claimed inbound_address or inbound_port, + // because the data is still unreliable. _potential_peer_db.erase(originating_peer->get_socket().remote_endpoint()); move_peer_to_closing_list(originating_peer->shared_from_this()); originating_peer->close_connection(); } + // TODO if it is an outbound connection, and the rejection reason is "already_connected", + // update the existing connection's inbound_endpoint else { // update our database to record that we were rejected so we won't try to connect again for a while // this only happens on connections we originate, so we should already know that peer is not firewalled - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(originating_peer->get_socket().remote_endpoint()); + fc::optional updated_peer_record + = _potential_peer_db.lookup_entry_for_endpoint(originating_peer->get_socket().remote_endpoint()); if (updated_peer_record) { updated_peer_record->last_connection_disposition = last_connection_rejected; updated_peer_record->last_connection_attempt_time = fc::time_point::now(); + // Note: we do not increase number_of_failed_connection_attempts here, this is probably OK _potential_peer_db.update_entry(*updated_peer_record); } + originating_peer->send_message(address_request_message()); } - - originating_peer->negotiation_status = peer_connection::connection_negotiation_status::peer_connection_rejected; - originating_peer->our_state = peer_connection::our_connection_state::connection_rejected; - originating_peer->send_message(address_request_message()); } else FC_THROW( "unexpected connection_rejected_message from peer" ); @@ -1778,11 +1821,14 @@ namespace graphene { namespace net { namespace detail { ("size", address_message_received.addresses.size())); for (const address_info& address : address_message_received.addresses) { - dlog(" ${endpoint} last seen ${time}", ("endpoint", address.remote_endpoint)("time", address.last_seen_time)); + dlog(" ${endpoint} last seen ${time}", + ("endpoint", address.remote_endpoint)("time", address.last_seen_time)); } std::vector updated_addresses = address_message_received.addresses; + auto now = fc::time_point_sec(fc::time_point::now()); for (address_info& address : updated_addresses) - address.last_seen_time = fc::time_point_sec(fc::time_point::now()); + address.last_seen_time = now; + // TODO add some gatekeeping code here if ( _node_configuration.connect_to_new_peers && merge_address_info_with_potential_peer_database(updated_addresses) ) { @@ -1798,22 +1844,28 @@ namespace graphene { namespace net { namespace detail { disconnect_from_peer(originating_peer, "I rejected your connection request (hello message) so I'm disconnecting"); else { - fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) + // Note: updating last_connection_disposition to last_connection_succeeded for inbound connections + // doesn't seem correct + if( peer_connection_direction::outbound == originating_peer->direction ) { - // mark the connection as successful in the database - fc::optional updated_peer_record - = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) + const fc::optional& inbound_endpoint = originating_peer->get_endpoint_for_connecting(); + if( inbound_endpoint.valid() && inbound_endpoint->port() != 0 ) { - updated_peer_record->last_connection_disposition = last_connection_succeeded; - // halve number_of_failed_connection_attempts - constexpr uint16_t two = 2; - updated_peer_record->number_of_failed_connection_attempts /= two; - _potential_peer_db.update_entry(*updated_peer_record); + // mark the connection as successful in the database + fc::optional updated_peer_record + = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); + if (updated_peer_record) + { + updated_peer_record->last_connection_disposition = last_connection_succeeded; + // halve number_of_failed_connection_attempts + constexpr uint16_t two = 2; + updated_peer_record->number_of_failed_connection_attempts /= two; + _potential_peer_db.update_entry(*updated_peer_record); + } } } + // transition it to our active list originating_peer->negotiation_status = peer_connection::connection_negotiation_status::negotiation_complete; move_peer_to_active_list(originating_peer->shared_from_this()); new_peer_just_added(originating_peer->shared_from_this()); @@ -1919,6 +1971,7 @@ namespace graphene { namespace net { namespace detail { return; } + // Why only for inbound connections? if (originating_peer->direction == peer_connection_direction::inbound && _handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) { @@ -1926,19 +1979,14 @@ namespace graphene { namespace net { namespace detail { dlog("peer ${endpoint} which was handshaking with us has started synchronizing with us, " "start syncing with it", ("endpoint", originating_peer->get_remote_endpoint())); - fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - // mark the connection as successful in the database - auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(*inbound_endpoint); - updated_peer_record.last_connection_disposition = last_connection_succeeded; - // halve number_of_failed_connection_attempts - constexpr uint16_t two = 2; - updated_peer_record.number_of_failed_connection_attempts /= two; - _potential_peer_db.update_entry(updated_peer_record); - } + + // Note: there was some code here to update the peer database, similar to the code in on_address_message(), + // but this is an inbound connection, + // updating last_connection_disposition to last_connection_succeeded doesn't seem correct, + // so the code was removed. // transition it to our active list + originating_peer->negotiation_status = peer_connection::connection_negotiation_status::negotiation_complete; move_peer_to_active_list(originating_peer->shared_from_this()); new_peer_just_added(originating_peer->shared_from_this()); } @@ -2584,9 +2632,11 @@ namespace graphene { namespace net { namespace detail { // if we closed the connection (due to timeout or handshake failure), we should have recorded an // error message to store in the peer database when we closed the connection fc::optional inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if (originating_peer->connection_closed_error && inbound_endpoint) + if( originating_peer->connection_closed_error + && inbound_endpoint.valid() && inbound_endpoint->port() != 0 ) { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); + fc::optional updated_peer_record + = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); if (updated_peer_record) { updated_peer_record->last_error = *originating_peer->connection_closed_error; @@ -2601,16 +2651,7 @@ namespace graphene { namespace net { namespace detail { { _active_connections.erase(originating_peer_ptr); - if (inbound_endpoint && originating_peer_ptr->get_remote_endpoint()) - { - fc::optional updated_peer_record - = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - } + update_address_seen_time( this, originating_peer ); } ilog("Remote peer ${endpoint} closed their connection to us", @@ -3756,24 +3797,37 @@ namespace graphene { namespace net { namespace detail { fc::ecc::compact_signature signature = _node_configuration.private_key.sign_compact(shared_secret_encoder.result()); - // in the hello messsage, we send three things: - // ip address - // outbound port - // inbound port - // The peer we're connecting to will assume we're firewalled if the - // ip address and outbound port we send don't match the values it sees on its remote endpoint + // In the hello messsage, we send three things: + // * inbound IP address + // * inbound port + // * outbound port // - // Note: we no longer perform remote firewall check, thus we don't know if we're behind a NAT that - // will allow incoming connections. + // The peer we're connecting to may assume we're firewalled if the + // IP address and outbound port we send don't match the values it sees on its remote endpoint, + // but it is not always true, E.G. if the peer itself is behind a reverse proxy. + // + // Note: we no longer perform remote firewall check (ask the peer to check whether we are firewalled), + // thus we don't know our external IP address, + // nor we know whether we're behind NAT or a reverse proxy that will allow incoming connections. + // However, if the "p2p-inbound-endpoint" node startup option is configured, we send that instead. fc::ip::endpoint local_endpoint(peer->get_socket().local_endpoint()); - uint16_t listening_port = _node_configuration.accept_incoming_connections ? + fc::ip::address inbound_address = local_endpoint.get_address(); + uint16_t inbound_port = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint.port() : 0; + if( _node_configuration.inbound_endpoint.valid() ) + { + if( _node_configuration.inbound_endpoint->get_address() != fc::ip::address() ) + inbound_address = _node_configuration.inbound_endpoint->get_address(); + if( _node_configuration.accept_incoming_connections ) + inbound_port = _node_configuration.inbound_endpoint->port(); + } + hello_message hello(_user_agent_string, core_protocol_version, - local_endpoint.get_address(), - listening_port, + inbound_address, + inbound_port, local_endpoint.port(), _node_public_key, signature, @@ -3789,8 +3843,7 @@ namespace graphene { namespace net { namespace detail { VERIFY_CORRECT_THREAD(); // create or find the database entry for the new peer - potential_peer_record updated_peer_record - = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); updated_peer_record.last_connection_disposition = last_connection_failed; updated_peer_record.last_connection_attempt_time = fc::time_point::now();; _potential_peer_db.update_entry(updated_peer_record); @@ -3806,8 +3859,7 @@ namespace graphene { namespace net { namespace detail { new_peer->is_firewalled = firewalled_state::not_firewalled; // connection succeeded, we've started handshaking. record that in our database - potential_peer_record updated_peer_record - = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); + updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); updated_peer_record.last_connection_disposition = last_connection_handshaking_failed; updated_peer_record.number_of_successful_connection_attempts++; updated_peer_record.last_seen_time = fc::time_point::now(); @@ -3821,8 +3873,7 @@ namespace graphene { namespace net { namespace detail { if( connect_failed_exception ) { // connection failed. record that in our database - potential_peer_record updated_peer_record - = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); + updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); updated_peer_record.last_connection_disposition = last_connection_failed; updated_peer_record.number_of_failed_connection_attempts++; if (new_peer->connection_closed_error) @@ -3834,9 +3885,7 @@ namespace graphene { namespace net { namespace detail { // if the connection failed, we want to disconnect now. _handshaking_connections.erase(new_peer); _terminating_connections.erase(new_peer); - assert(_active_connections.find(new_peer) == _active_connections.end()); _active_connections.erase(new_peer); - assert(_closing_connections.find(new_peer) == _closing_connections.end()); _closing_connections.erase(new_peer); display_current_connections(); @@ -3926,6 +3975,10 @@ namespace graphene { namespace net { namespace detail { _potential_peer_db.open(potential_peer_database_file_name); // push back the time on all peers loaded from the database so we will be able to retry them immediately + // Note: this step is almost useless because we didn't multiply _peer_connection_retry_timeout + // by number_of_failed_connection_attempts. However, it is probably desired as we don't want + // to try to connect to a large number of dead nodes at startup. + // TODO Perhaps just remove it. for (peer_database::iterator itr = _potential_peer_db.begin(); itr != _potential_peer_db.end(); ++itr) { potential_peer_record updated_peer_record = *itr; @@ -4076,13 +4129,21 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); // if we're connecting to them, we believe they're not firewalled - potential_peer_record updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(ep); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(ep); // if we've recently connected to this peer, reset the last_connection_attempt_time to allow // us to immediately retry this peer + // Note: to make it work, we need to multiply _peer_connection_retry_timeout + // by number_of_failed_connection_attempts. + // However, this step is almost useless because we will immediately try to connect anyway + // due to _add_once_node_list. + // On the other hand, if we connected to the peer already but it was not in the peer database somehow, + // this step makes sure that it will be added to the peer database. + auto delay_until_retry = fc::seconds( (updated_peer_record.number_of_failed_connection_attempts + 1) + * _peer_connection_retry_timeout ); updated_peer_record.last_connection_attempt_time = std::min( updated_peer_record.last_connection_attempt_time, - fc::time_point::now() - fc::seconds(_peer_connection_retry_timeout) ); + fc::time_point::now() - delay_until_retry ); _add_once_node_list.push_back(updated_peer_record); _potential_peer_db.update_entry(updated_peer_record); trigger_p2p_network_connect_loop(); @@ -4193,6 +4254,7 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& active_peer : _active_connections ) { + // TODO check by remote inbound endpoint too fc::optional endpoint_for_this_peer( active_peer->get_remote_endpoint() ); if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return active_peer; @@ -4209,6 +4271,7 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_handshaking_connections.get_mutex()); for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) { + // TODO check by remote inbound endpoint too fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return handshaking_peer; @@ -4315,9 +4378,10 @@ namespace graphene { namespace net { namespace detail { { // we're the first to try to want to close the connection fc::optional inbound_endpoint = peer_to_disconnect->get_endpoint_for_connecting(); - if (inbound_endpoint) + if( inbound_endpoint.valid() && inbound_endpoint->port() != 0 ) { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); + fc::optional updated_peer_record + = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); if (updated_peer_record) { updated_peer_record->last_seen_time = fc::time_point::now(); @@ -4358,6 +4422,13 @@ namespace graphene { namespace net { namespace detail { save_node_configuration(); } + void node_impl::set_inbound_endpoint( const fc::ip::endpoint& ep ) + { + VERIFY_CORRECT_THREAD(); + _node_configuration.inbound_endpoint = ep; + save_node_configuration(); + } + void node_impl::set_accept_incoming_connections(bool accept) { VERIFY_CORRECT_THREAD(); @@ -4372,14 +4443,6 @@ namespace graphene { namespace net { namespace detail { save_node_configuration(); } - void node_impl::set_listen_port( uint16_t port, bool wait_if_not_available ) - { - VERIFY_CORRECT_THREAD(); - _node_configuration.listen_endpoint = fc::ip::endpoint( fc::ip::address(), port ); - _node_configuration.wait_if_endpoint_is_busy = wait_if_not_available; - save_node_configuration(); - } - fc::ip::endpoint node_impl::get_actual_listening_endpoint() const { VERIFY_CORRECT_THREAD(); @@ -4733,6 +4796,11 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(set_listen_endpoint, ep, wait_if_not_available); } + void node::set_inbound_endpoint(const fc::ip::endpoint& ep ) const + { + INVOKE_IN_IMPL( set_inbound_endpoint, ep ); + } + void node::set_accept_incoming_connections(bool accept) const { INVOKE_IN_IMPL(set_accept_incoming_connections, accept); @@ -4743,11 +4811,6 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL( set_connect_to_new_peers, connect ); } - void node::set_listen_port( uint16_t port, bool wait_if_not_available ) const - { - INVOKE_IN_IMPL(set_listen_port, port, wait_if_not_available); - } - fc::ip::endpoint node::get_actual_listening_endpoint() const { INVOKE_IN_IMPL(get_actual_listening_endpoint); diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 484b495ae5..8ede33e65f 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -394,6 +394,7 @@ class statistics_gathering_node_delegate_wrapper : public node_delegate struct node_configuration { fc::ip::endpoint listen_endpoint; + fc::optional inbound_endpoint; bool accept_incoming_connections = true; bool connect_to_new_peers = true; bool wait_if_endpoint_is_busy = false; @@ -791,9 +792,9 @@ public: void initiate_connect_to(const peer_connection_ptr& peer); void connect_to_endpoint(const fc::ip::endpoint& ep); void set_listen_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); + void set_inbound_endpoint( const fc::ip::endpoint& ep ); void set_accept_incoming_connections(bool accept); void set_connect_to_new_peers( bool connect ); - void set_listen_port( uint16_t port, bool wait_if_not_available ); fc::ip::endpoint get_actual_listening_endpoint() const; std::vector get_connected_peers() const; @@ -836,6 +837,7 @@ public: FC_REFLECT( graphene::net::detail::node_configuration, (listen_endpoint) + (inbound_endpoint) (accept_incoming_connections) (connect_to_new_peers) (wait_if_endpoint_is_busy) diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index 3fda55d4b9..c78be57496 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -218,7 +218,8 @@ namespace graphene { namespace net their_state = their_connection_state::just_connected; our_state = our_connection_state::just_connected; - ilog( "established inbound connection from ${remote_endpoint}, sending hello", ("remote_endpoint", _message_connection.get_socket().remote_endpoint() ) ); + ilog( "established inbound connection from ${remote_endpoint}, sending hello", + ("remote_endpoint", _message_connection.get_socket().remote_endpoint() ) ); } catch ( const fc::exception& e ) { @@ -227,7 +228,8 @@ namespace graphene { namespace net } } - void peer_connection::connect_to( const fc::ip::endpoint& remote_endpoint, fc::optional local_endpoint ) + void peer_connection::connect_to( const fc::ip::endpoint& remote_endpoint, + const fc::optional& local_endpoint ) { VERIFY_CORRECT_THREAD(); try @@ -261,6 +263,7 @@ namespace graphene { namespace net negotiation_status = connection_negotiation_status::connected; their_state = their_connection_state::just_connected; our_state = our_connection_state::just_connected; + remote_inbound_endpoint = remote_endpoint; ilog( "established outbound connection to ${remote_endpoint}", ("remote_endpoint", remote_endpoint ) ); } catch ( fc::exception& e ) @@ -528,9 +531,7 @@ namespace graphene { namespace net fc::optional peer_connection::get_endpoint_for_connecting() const { - if (inbound_port) - return fc::ip::endpoint(inbound_address, inbound_port); - return fc::optional(); + return remote_inbound_endpoint; } } } // end namespace graphene::net From 33a62e3b542af6c9bb6f232729494958179370f0 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 28 Aug 2022 19:08:24 +0000 Subject: [PATCH 194/338] Add some gatekeeping code --- libraries/net/include/graphene/net/config.hpp | 2 + .../include/graphene/net/peer_connection.hpp | 22 +-- libraries/net/node.cpp | 138 ++++++++++++++++-- libraries/net/node_impl.hxx | 4 +- libraries/net/peer_connection.cpp | 6 - 5 files changed, 134 insertions(+), 38 deletions(-) diff --git a/libraries/net/include/graphene/net/config.hpp b/libraries/net/include/graphene/net/config.hpp index 5e333b805c..b7d65078bb 100644 --- a/libraries/net/include/graphene/net/config.hpp +++ b/libraries/net/include/graphene/net/config.hpp @@ -113,5 +113,7 @@ #define MAXIMUM_PEERDB_SIZE 1000 +constexpr size_t MAX_ADDRESSES_TO_HANDLE_AT_ONCE = 200; + constexpr size_t MAX_BLOCKS_TO_HANDLE_AT_ONCE = 200; constexpr size_t MAX_SYNC_BLOCKS_TO_PREFETCH = 10 * MAX_BLOCKS_TO_HANDLE_AT_ONCE; diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 972ef8dd6c..f5ea7e3604 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -43,21 +43,6 @@ namespace graphene { namespace net { - struct firewall_check_state_data - { - node_id_t expected_node_id; - fc::ip::endpoint endpoint_to_test; - - // if we're coordinating a firewall check for another node, these are the helper - // nodes we've already had do the test (if this structure is still relevant, that - // that means they have all had indeterminate results - std::set nodes_already_tested; - - // If we're a just a helper node, this is the node we report back to - // when we have a result - node_id_t requesting_peer; - }; - class peer_connection; class peer_connection_delegate { @@ -214,7 +199,7 @@ namespace graphene { namespace net fc::optional remote_inbound_endpoint; /// @} - typedef std::unordered_map item_to_time_map_type; + using item_to_time_map_type = std::unordered_map; /// blockchain synchronization state data /// @{ @@ -262,7 +247,9 @@ namespace graphene { namespace net fc::future accept_or_connect_task_done; - firewall_check_state_data *firewall_check_state = nullptr; + /// Whether we're waiting for an address message + bool expecting_address_message = false; + private: #ifndef NDEBUG fc::thread* _thread = nullptr; @@ -310,7 +297,6 @@ namespace graphene { namespace net void clear_old_inventory(); bool is_inventory_advertised_to_us_list_full_for_transactions() const; bool is_inventory_advertised_to_us_list_full() const; - bool performing_firewall_check() const; fc::optional get_endpoint_for_connecting() const; private: void send_queued_messages_task(); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index b7228a1b74..ee78b2764c 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1069,6 +1069,7 @@ namespace graphene { namespace net { namespace detail { { try { + active_peer->expecting_address_message = true; active_peer->send_message(address_request_message()); } catch ( const fc::canceled_exception& ) @@ -1306,7 +1307,8 @@ namespace graphene { namespace net { namespace detail { bool new_information_received = false; for (const address_info& address : addresses) { - // If the peer's inbound port is 0, we don't add it to our peer database + // If the peer's inbound port is 0, we don't add it to our peer database. + // Although it should have been handled by the caller, be defensive here. if( 0 == address.remote_endpoint.port() ) continue; // Note: if found, a copy is returned @@ -1324,6 +1326,7 @@ namespace graphene { namespace net { namespace detail { _potential_peer_db.update_entry(updated_peer_record); } } + // TODO maybe delete too old info by the way return new_information_received; } @@ -1526,9 +1529,12 @@ namespace graphene { namespace net { namespace detail { parse_hello_user_data_for_peer(originating_peer, hello_message_received.user_data); // For an outbound connection, we know the remote_inbound_endpoint already, so keep it unchanged. - // For an inbound connection, we initialize it here. // TODO or init later, after verified? + // For an inbound connection, we initialize it here. if( !originating_peer->remote_inbound_endpoint ) { + // Note: the data is not yet verified, so we need to use it with caution. + // On the one hand, we want to advertise as accurate data as possible to other peers, + // on the other hand, we still want to advertise it to other peers if we didn't have a chance to verify it. originating_peer->remote_inbound_endpoint = fc::ip::endpoint( originating_peer->inbound_address, originating_peer->inbound_port ); } @@ -1643,11 +1649,14 @@ namespace graphene { namespace net { namespace detail { originating_peer->get_socket().remote_endpoint(), rejection_reason_code::already_connected, "I'm already connected to you"); + originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; originating_peer->send_message(message(connection_rejected)); dlog("Received a hello_message from peer ${peer} that I'm already connected to (with id ${id}), rejection", ("peer", originating_peer->get_remote_endpoint()) ("id", originating_peer->node_id)); + // If already connected, we disconnect + disconnect_from_peer(originating_peer, connection_rejected.reason_string); } #ifdef ENABLE_P2P_DEBUGGING_API else if(!_allowed_peers.empty() && @@ -1727,10 +1736,21 @@ namespace graphene { namespace net { namespace detail { const connection_accepted_message& ) const { VERIFY_CORRECT_THREAD(); + // Gatekeeping code + // We only send one address request message shortly after connected + if( originating_peer->our_state != peer_connection::our_connection_state::just_connected ) + { + // Log and ignore + dlog( "Received an unexpected connection_accepted message from ${peer}", + ("peer", originating_peer->get_remote_endpoint()) ); + return; + } + dlog("Received a connection_accepted in response to my \"hello\" from ${peer}", ("peer", originating_peer->get_remote_endpoint())); originating_peer->negotiation_status = peer_connection::connection_negotiation_status::peer_connection_accepted; originating_peer->our_state = peer_connection::our_connection_state::connection_accepted; + originating_peer->expecting_address_message = true; originating_peer->send_message(address_request_message()); } @@ -1747,6 +1767,7 @@ namespace graphene { namespace net { namespace detail { ::peer_connection_rejected; originating_peer->our_state = peer_connection::our_connection_state::connection_rejected; + // TODO the data is not verified, be careful if( connection_rejected_message_received.reason_code == rejection_reason_code::connected_to_self || connection_rejected_message_received.reason_code == rejection_reason_code::different_chain ) { @@ -1775,22 +1796,49 @@ namespace graphene { namespace net { namespace detail { // Note: we do not increase number_of_failed_connection_attempts here, this is probably OK _potential_peer_db.update_entry(*updated_peer_record); } + originating_peer->expecting_address_message = true; originating_peer->send_message(address_request_message()); } } else - FC_THROW( "unexpected connection_rejected_message from peer" ); + { + // Note: in older versions, FC_THROW() was called here, + // which would cause on_connection_closed() to be called, + // which would then close the connection when the peer_connection object was destroyed. + // Explicitly closing the connection here is more intuitive. + dlog( "Unexpected connection_rejected_message from peer ${peer}, disconnecting", + ("peer", originating_peer->get_remote_endpoint()) ); + disconnect_from_peer( originating_peer, "Received an unexpected connection_rejected_message" ); + } } void node_impl::on_address_request_message(peer_connection* originating_peer, const address_request_message&) { VERIFY_CORRECT_THREAD(); - dlog("Received an address request message"); + // Gatekeeping code + if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted + && originating_peer->their_state != peer_connection::their_connection_state::connection_rejected ) + { + dlog( "Unexpected address_request_message from peer ${peer}, disconnecting", + ("peer", originating_peer->get_remote_endpoint()) ); + disconnect_from_peer( originating_peer, "Received an unexpected address_request_message" ); + return; + } + + dlog( "Received an address request message from peer ${peer}", + ("peer", originating_peer->get_remote_endpoint()) ); address_message reply; if (_address_builder != nullptr ) _address_builder->build( this, reply ); originating_peer->send_message(reply); + + // If we rejected their connection, disconnect now + if( originating_peer->their_state == peer_connection::their_connection_state::connection_rejected ) + { + disconnect_from_peer( originating_peer, + "I rejected your connection request (hello message) so I'm disconnecting" ); + } } void node_impl::set_advertise_algorithm( const std::string& algo, @@ -1817,18 +1865,53 @@ namespace graphene { namespace net { namespace detail { const address_message& address_message_received) { VERIFY_CORRECT_THREAD(); - dlog("Received an address message containing ${size} addresses", - ("size", address_message_received.addresses.size())); + // Do some gatekeeping here. + // Malious peers can easily bypass our checks in on_hello_message(), and we will then request addresses anyway, + // so checking connection_state here is useless. + // The size can be large, so we only handle the first N addresses. + // The peer might send us lots of address messages even if we didn't request, + // so we'd better know whether we have sent an address request message recently. + if( !originating_peer->expecting_address_message ) + { + // Log and ignore + dlog( "Received an unexpected address message containing ${size} addresses for peer ${peer}", + ("size", address_message_received.addresses.size()) + ("peer", originating_peer->get_remote_endpoint()) ); + return; + } + originating_peer->expecting_address_message = false; + + dlog( "Received an address message containing ${size} addresses for peer ${peer}", + ("size", address_message_received.addresses.size()) + ("peer", originating_peer->get_remote_endpoint()) ); + size_t count = 0; for (const address_info& address : address_message_received.addresses) { - dlog(" ${endpoint} last seen ${time}", - ("endpoint", address.remote_endpoint)("time", address.last_seen_time)); + dlog( " ${endpoint} last seen ${time}, firewalled status ${fw}", + ("endpoint", address.remote_endpoint)("time", address.last_seen_time) + ("fw", address.firewalled) ); + ++count; + if( count >= _max_addresses_to_handle_at_once ) + break; } - std::vector updated_addresses = address_message_received.addresses; + std::vector updated_addresses; + updated_addresses.reserve( count ); auto now = fc::time_point_sec(fc::time_point::now()); - for (address_info& address : updated_addresses) - address.last_seen_time = now; - // TODO add some gatekeeping code here + count = 0; + for( const address_info& address : address_message_received.addresses ) + { + if( 0 == address.remote_endpoint.port() ) + continue; + updated_addresses.emplace_back( address.remote_endpoint, + now, + address.latency, + address.node_id, + address.direction, + address.firewalled ); + ++count; + if( count >= _max_addresses_to_handle_at_once ) + break; + } if ( _node_configuration.connect_to_new_peers && merge_address_info_with_potential_peer_database(updated_addresses) ) { @@ -1879,6 +1962,15 @@ namespace graphene { namespace net { namespace detail { const fetch_blockchain_item_ids_message& fetch_blockchain_item_ids_message_received) { VERIFY_CORRECT_THREAD(); + // Gatekeeping code + if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted ) + { + dlog( "Unexpected fetch_blockchain_item_ids_message from peer ${peer}, disconnecting", + ("peer", originating_peer->get_remote_endpoint()) ); + disconnect_from_peer( originating_peer, "Received an unexpected fetch_blockchain_item_ids_message" ); + return; + } + item_id peers_last_item_seen = item_id(fetch_blockchain_item_ids_message_received.item_type, item_hash_t()); if (fetch_blockchain_item_ids_message_received.blockchain_synopsis.empty()) { @@ -2409,9 +2501,18 @@ namespace graphene { namespace net { namespace detail { } void node_impl::on_fetch_items_message(peer_connection* originating_peer, - const fetch_items_message& fetch_items_message_received) const + const fetch_items_message& fetch_items_message_received) { VERIFY_CORRECT_THREAD(); + // Gatekeeping code + if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted ) + { + dlog( "Unexpected fetch_items_message from peer ${peer}, disconnecting", + ("peer", originating_peer->get_remote_endpoint()) ); + disconnect_from_peer( originating_peer, "Received an unexpected fetch_items_message" ); + return; + } + dlog("received items request for ids ${ids} of type ${type} from peer ${endpoint}", ("ids", fetch_items_message_received.items_to_fetch) ("type", fetch_items_message_received.item_type) @@ -2518,6 +2619,14 @@ namespace graphene { namespace net { namespace detail { void node_impl::on_item_ids_inventory_message(peer_connection* originating_peer, const item_ids_inventory_message& item_ids_inventory_message_received) { VERIFY_CORRECT_THREAD(); + // Gatekeeping code + if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted ) + { + dlog( "Unexpected item_ids_inventory_message from peer ${peer}, disconnecting", + ("peer", originating_peer->get_remote_endpoint()) ); + disconnect_from_peer( originating_peer, "Received an unexpected item_ids_inventory_message" ); + return; + } // expire old inventory // so we'll be making our decisions about whether to fetch blocks below based only on recent inventory @@ -4593,6 +4702,8 @@ namespace graphene { namespace net { namespace detail { _desired_number_of_connections = params["desired_number_of_connections"].as(1); if (params.contains("maximum_number_of_connections")) _maximum_number_of_connections = params["maximum_number_of_connections"].as(1); + if (params.contains("max_addresses_to_handle_at_once")) + _max_addresses_to_handle_at_once = params["max_addresses_to_handle_at_once"].as(1); if (params.contains("max_blocks_to_handle_at_once")) _max_blocks_to_handle_at_once = params["max_blocks_to_handle_at_once"].as(1); if (params.contains("max_sync_blocks_to_prefetch")) @@ -4615,6 +4726,7 @@ namespace graphene { namespace net { namespace detail { result["peer_connection_retry_timeout"] = _peer_connection_retry_timeout; result["desired_number_of_connections"] = _desired_number_of_connections; result["maximum_number_of_connections"] = _maximum_number_of_connections; + result["max_addresses_to_handle_at_once"] = _max_addresses_to_handle_at_once; result["max_blocks_to_handle_at_once"] = _max_blocks_to_handle_at_once; result["max_sync_blocks_to_prefetch"] = _max_sync_blocks_to_prefetch; result["max_sync_blocks_per_peer"] = _max_sync_blocks_per_peer; diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 8ede33e65f..6c719738f9 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -614,6 +614,8 @@ public: /// used to prevent us from starting new tasks while we're shutting down bool _node_is_shutting_down = false; + /// Maximum number of addresses to handle at one time + size_t _max_addresses_to_handle_at_once = MAX_ADDRESSES_TO_HANDLE_AT_ONCE; /// Maximum number of blocks to handle at one time size_t _max_blocks_to_handle_at_once = MAX_BLOCKS_TO_HANDLE_AT_ONCE; /// Maximum number of sync blocks to prefetch @@ -698,7 +700,7 @@ public: const blockchain_item_ids_inventory_message& blockchain_item_ids_inventory_message_received ); void on_fetch_items_message( peer_connection* originating_peer, - const fetch_items_message& fetch_items_message_received ) const; + const fetch_items_message& fetch_items_message_received ); void on_item_not_available_message( peer_connection* originating_peer, const item_not_available_message& item_not_available_message_received ); diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index c78be57496..8f1c8d1a48 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -87,7 +87,6 @@ namespace graphene { namespace net inhibit_fetching_sync_blocks(false), transaction_fetching_inhibited_until(fc::time_point::min()), last_known_fork_block_number(0), - firewall_check_state(nullptr), #ifndef NDEBUG _thread(&fc::thread::current()), _send_message_queue_tasks_running(0), @@ -524,11 +523,6 @@ namespace graphene { namespace net (GRAPHENE_NET_MAX_INVENTORY_SIZE_IN_MINUTES + 1) * 60 / GRAPHENE_MIN_BLOCK_INTERVAL; } - bool peer_connection::performing_firewall_check() const - { - return firewall_check_state && firewall_check_state->requesting_peer != node_id_t(); - } - fc::optional peer_connection::get_endpoint_for_connecting() const { return remote_inbound_endpoint; From e9d1e16ab75b7094c4ce344ff50a32b21585f9b3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 29 Aug 2022 21:11:09 +0000 Subject: [PATCH 195/338] Handle node_id in user_data in hello message --- programs/network_mapper/network_mapper.cpp | 42 ++++++---------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index 951dbddcb6..00c334eee4 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -99,8 +99,10 @@ class peer_probe : public graphene::net::peer_connection_delegate const graphene::net::hello_message& hello_message_received) { _node_id = hello_message_received.node_public_key; - if (hello_message_received.user_data.contains("node_id")) - originating_peer->node_id = hello_message_received.user_data["node_id"].as( 1 ); + try { + if (hello_message_received.user_data.contains("node_id")) + _node_id = hello_message_received.user_data["node_id"].as( 1 ); + } catch( fc::exception& ) { /* do nothing */ } originating_peer->send_message(graphene::net::connection_rejected_message()); } @@ -202,11 +204,10 @@ int main(int argc, char** argv) std::map address_info_by_node_id; std::map > connections_by_node_id; std::map node_id_by_endpoint; - std::map outdated_nodes; std::vector> probes; const auto& update_info_by_probe = [ &connections_by_node_id, &address_info_by_node_id, - &node_id_by_endpoint, &outdated_nodes, &my_node_id, + &node_id_by_endpoint, &my_node_id, &nodes_already_visited, &nodes_to_visit_set, &nodes_to_visit ] ( const std::shared_ptr& probe ) { @@ -218,23 +219,15 @@ int main(int argc, char** argv) this_node_info.remote_endpoint = probe->_remote; this_node_info.node_id = probe->_node_id; - // Note: Update if already exists. - // Some nodes may have the same node_id, E.G. created by copying the whole data directory of - // another node. In this case data here could be overwritten. + // Note: Update if it already exists (usually unlikely). connections_by_node_id[this_node_info.node_id] = probe->_peers; address_info_by_node_id[this_node_info.node_id] = this_node_info; node_id_by_endpoint[probe->_remote] = probe->_node_id; - for( const auto& info: address_info_by_node_id ) - { - if( info.second.remote_endpoint == probe->_remote && info.first != probe->_node_id ) - outdated_nodes[info.first] = probe->_node_id; - } - for (const graphene::net::address_info& info : probe->_peers) { - if (info.node_id == my_node_id) + if (info.node_id == my_node_id) // We should not be in the list, just be defensive here continue; if (nodes_already_visited.find(info.remote_endpoint) == nodes_already_visited.end() && nodes_to_visit_set.find(info.remote_endpoint) == nodes_to_visit_set.end()) @@ -298,10 +291,9 @@ int main(int argc, char** argv) else probes = std::move( running ); - ilog( "${total} nodes detected, ${outdated} outdated, ${tried} endpoints tried, " + ilog( "${total} nodes detected, ${tried} endpoints tried, " "${reachable} reachable, ${trying} trying, ${todo} to do", ( "total", address_info_by_node_id.size() ) - ( "outdated", outdated_nodes.size() ) ( "tried", nodes_already_visited.size() ) ( "reachable", node_id_by_endpoint.size() ) ( "trying", probes.size() ) @@ -309,19 +301,6 @@ int main(int argc, char** argv) } - // Remove outdated nodes - for( const auto& node_pair : outdated_nodes ) - address_info_by_node_id.erase(node_pair.first); - // Update connection info, replace outdated node_id with new node_id - for( auto& connection_by_id : connections_by_node_id ) - { - for( auto& connection : connection_by_id.second ) - { - if( outdated_nodes.find( connection.node_id ) != outdated_nodes.end() ) - connection.node_id = outdated_nodes[connection.node_id]; - } - } - ilog( "${total} nodes, ${reachable} reachable", ( "total", address_info_by_node_id.size() ) ( "reachable", node_id_by_endpoint.size() ) ); @@ -380,8 +359,9 @@ int main(int argc, char** argv) constexpr uint16_t pair_depth = 2; for (auto& node_and_connections : connections_by_node_id) for (const graphene::net::address_info& this_connection : node_and_connections.second) - dot_stream << " \"" << fc::variant( node_and_connections.first, pair_depth ).as_string() - << "\" -- \"" << fc::variant( this_connection.node_id, 1 ).as_string() << "\";\n"; + if( this_connection.node_id != my_node_id ) // We should not be in the list, just be defensive here + dot_stream << " \"" << fc::variant( node_and_connections.first, pair_depth ).as_string() + << "\" -- \"" << fc::variant( this_connection.node_id, 1 ).as_string() << "\";\n"; dot_stream << "}\n"; From efe8690ed77c4f4cf20399c46d9f1ebb39dac9c5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 29 Aug 2022 22:36:53 +0000 Subject: [PATCH 196/338] Refactor on_hello_message, verify inbound endpoint --- .../include/graphene/net/peer_connection.hpp | 2 + libraries/net/node.cpp | 274 +++++++++--------- libraries/net/node_impl.hxx | 3 +- libraries/net/peer_connection.cpp | 1 + 4 files changed, 149 insertions(+), 131 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index f5ea7e3604..1cc3d1be0b 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -197,6 +197,8 @@ namespace graphene { namespace net uint16_t outbound_port = 0; /// The inbound endpoint of the remote peer fc::optional remote_inbound_endpoint; + /// Whether the inbound endpoint of the remote peer is verified + bool inbound_endpoint_verified = false; /// @} using item_to_time_map_type = std::unordered_map; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index ee78b2764c..fafb8ddf64 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -271,12 +271,13 @@ namespace graphene { namespace net { namespace detail { } } - node_impl::node_impl(const std::string& user_agent) : + node_impl::node_impl(const std::string& user_agent) : _user_agent_string(user_agent) - { + { _rate_limiter.set_actual_rate_time_constant(fc::seconds(2)); + // Note: this means that the node gets a new node_id every time it restarts fc::rand_bytes((char*) _node_id.data(), (int)_node_id.size()); - } + } node_impl::~node_impl() { @@ -1252,8 +1253,9 @@ namespace graphene { namespace net { namespace detail { return (uint32_t)(_handshaking_connections.size() + _active_connections.size()); } - peer_connection_ptr node_impl::get_peer_by_node_id(const node_id_t& node_id) + peer_connection_ptr node_impl::get_peer_by_node_id(const node_id_t& node_id) const { + VERIFY_CORRECT_THREAD(); { fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& active_peer : _active_connections) @@ -1269,37 +1271,6 @@ namespace graphene { namespace net { namespace detail { return peer_connection_ptr(); } - bool node_impl::is_already_connected_to_id(const node_id_t& node_id) - { - VERIFY_CORRECT_THREAD(); - if (node_id == _node_id) - { - dlog("is_already_connected_to_id returning true because the peer is us"); - return true; - } - { - fc::scoped_lock lock(_active_connections.get_mutex()); - for (const peer_connection_ptr& active_peer : _active_connections) - { - if (node_id == active_peer->node_id) - { - dlog("is_already_connected_to_id returning true because the peer is already in our active list"); - return true; - } - } - } - { - fc::scoped_lock lock(_handshaking_connections.get_mutex()); - for (const peer_connection_ptr& handshaking_peer : _handshaking_connections) - if (node_id == handshaking_peer->node_id) - { - dlog("is_already_connected_to_id returning true because the peer is already in our handshaking list"); - return true; - } - } - return false; - } - // merge addresses received from a peer into our database bool node_impl::merge_address_info_with_potential_peer_database(const std::vector addresses) { @@ -1314,6 +1285,7 @@ namespace graphene { namespace net { namespace detail { // Note: if found, a copy is returned auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); // Note: + // TODO fix the comments here (since we don't save node_id in the peer database so far) // 1. node_id of that peer may have changed, but we don't check or update // 2. we don't check by node_id either, in case when a peer's IP address has changed, we don't handle it // 3. if the peer's inbound port is not 0, no matter if the address is reported as firewalled or not, @@ -1494,9 +1466,72 @@ namespace graphene { namespace net { namespace detail { originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as(1); } - void node_impl::on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ) - { + void node_impl::on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ) + { VERIFY_CORRECT_THREAD(); + // Do gatekeeping first + if( originating_peer->their_state != peer_connection::their_connection_state::just_connected ) + { + // we can wind up here if we've connected to ourselves, and the source and + // destination endpoints are the same, causing messages we send out + // to arrive back on the initiating socket instead of the receiving + // socket. If we did a complete job of enumerating local addresses, + // we could avoid directly connecting to ourselves, or at least detect + // immediately when we did it and disconnect. + + // The only way I know of that we'd get an unexpected hello that we + // can't really guard against is if we do a simulatenous open, we + // probably need to think through that case. We're not attempting that + // yet, though, so it's ok to just disconnect here. + dlog( "Unexpected hello_message from peer ${peer}, disconnecting", + ("peer", originating_peer->get_remote_endpoint()) ); + disconnect_from_peer( originating_peer, "Received an unexpected hello_message" ); + return; + } + + // Check chain_id + if( hello_message_received.chain_id != _chain_id ) + { + dlog( "Received hello message from peer on a different chain: ${message}", + ("message", hello_message_received) ); + std::ostringstream rejection_message; + rejection_message << "You're on a different chain than I am. I'm on " << _chain_id.str() << + " and you're on " << hello_message_received.chain_id.str(); + connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, + *originating_peer->get_remote_endpoint(), + rejection_reason_code::different_chain, + rejection_message.str() ); + originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; + originating_peer->send_message( message(connection_rejected) ); + // for this type of message, we're immediately disconnecting this peer, instead of trying to + // allowing her to ask us for peers (any of our peers will be on the same chain as us, so there's no + // benefit of sharing them) + disconnect_from_peer( originating_peer, "You are on a different chain from me" ); + return; + } + + // Validate the peer's public key. + // Note: the node_id in user_data is not verified. + fc::sha256::encoder shared_secret_encoder; + fc::sha512 shared_secret = originating_peer->get_shared_secret(); + shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); + fc::ecc::public_key expected_node_public_key( hello_message_received.signed_shared_secret, + shared_secret_encoder.result(), false ); + if( hello_message_received.node_public_key != expected_node_public_key.serialize() ) + { + wlog( "Invalid signature in hello message from peer ${peer}", + ("peer", originating_peer->get_remote_endpoint()) ); + connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, + *originating_peer->get_remote_endpoint(), + rejection_reason_code::invalid_hello_message, + "Invalid signature in hello message" ); + originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; + originating_peer->send_message( message(connection_rejected) ); + // for this type of message, we're immediately disconnecting this peer + disconnect_from_peer( originating_peer, connection_rejected.reason_string ); + return; + } + // this already_connected check must come before we fill in peer data below node_id_t peer_node_id = hello_message_received.node_public_key; try @@ -1507,14 +1542,34 @@ namespace graphene { namespace net { namespace detail { { // either it's not there or it's not a valid session id. either way, ignore. } - bool already_connected_to_this_peer = is_already_connected_to_id(peer_node_id); - - // validate the node id - fc::sha256::encoder shared_secret_encoder; - fc::sha512 shared_secret = originating_peer->get_shared_secret(); - shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); - fc::ecc::public_key expected_node_public_key(hello_message_received.signed_shared_secret, - shared_secret_encoder.result(), false); + // The peer's node_id should not be null + static const node_id_t null_node_id; + if( null_node_id == peer_node_id ) + { + dlog( "The node_id in the hello_message from peer ${peer} is null, disconnecting", + ("peer", originating_peer->get_remote_endpoint()) ); + disconnect_from_peer( originating_peer, "Your node_id in the hello_message is null" ); + return; + } + // Check whether the peer is myself + if( _node_id == peer_node_id ) + { + // Note: this can happen in rare cases if the peer is not actually myself but another node. + // Anyway, we see it as ourselves, reject it and disconnect it. + connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, + *originating_peer->get_remote_endpoint(), + rejection_reason_code::connected_to_self, + "I'm connecting to myself" ); + originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; + originating_peer->send_message( message(connection_rejected) ); + dlog( "Received a hello_message from peer ${peer} that is myself or claimed to be myself, rejection", + ("peer", originating_peer->get_remote_endpoint()) + ("id", originating_peer->node_id) ); + disconnect_from_peer( originating_peer, connection_rejected.reason_string ); + return; + } + // Get a pointer to an exising connection to the peer (if one exists) for later use + peer_connection_ptr already_connected_peer = get_peer_by_node_id( peer_node_id ); // store off the data provided in the hello message originating_peer->user_agent = hello_message_received.user_agent; @@ -1548,60 +1603,6 @@ namespace graphene { namespace net { namespace detail { } // now decide what to do with it - if( originating_peer->their_state != peer_connection::their_connection_state::just_connected ) - { - // we can wind up here if we've connected to ourselves, and the source and - // destination endpoints are the same, causing messages we send out - // to arrive back on the initiating socket instead of the receiving - // socket. If we did a complete job of enumerating local addresses, - // we could avoid directly connecting to ourselves, or at least detect - // immediately when we did it and disconnect. - - // The only way I know of that we'd get an unexpected hello that we - // can't really guard against is if we do a simulatenous open, we - // probably need to think through that case. We're not attempting that - // yet, though, so it's ok to just disconnect here. - wlog("unexpected hello_message from peer, disconnecting"); - disconnect_from_peer(originating_peer, "Received an unexpected hello_message"); - return; - } - - if( hello_message_received.node_public_key != expected_node_public_key.serialize() ) - { - wlog("Invalid signature in hello message from peer ${peer}", ("peer", originating_peer->get_remote_endpoint())); - std::string rejection_message("Invalid signature in hello message"); - connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::invalid_hello_message, - rejection_message); - - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message( message(connection_rejected ) ); - // for this type of message, we're immediately disconnecting this peer - disconnect_from_peer( originating_peer, "Invalid signature in hello message" ); - return; - } - - if( hello_message_received.chain_id != _chain_id ) - { - wlog("Received hello message from peer on a different chain: ${message}", ("message", hello_message_received)); - std::ostringstream rejection_message; - rejection_message << "You're on a different chain than I am. I'm on " << _chain_id.str() << - " and you're on " << hello_message_received.chain_id.str(); - connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::different_chain, - rejection_message.str()); - - originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message(message(connection_rejected)); - // for this type of message, we're immediately disconnecting this peer, instead of trying to - // allowing her to ask us for peers (any of our peers will be on the same chain as us, so there's no - // benefit of sharing them) - disconnect_from_peer(originating_peer, "You are on a different chain from me"); - return; - } - if (originating_peer->last_known_fork_block_number != 0) { uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(originating_peer->last_known_fork_block_number); @@ -1635,28 +1636,30 @@ namespace graphene { namespace net { namespace detail { } } - if (already_connected_to_this_peer) + if( peer_connection_ptr() != already_connected_peer ) { - connection_rejected_message connection_rejected; - if (_node_id == originating_peer->node_id) - connection_rejected = connection_rejected_message(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::connected_to_self, - "I'm connecting to myself"); - else - // TODO if it is an outbound connection, update the existing connection's inbound_endpoint - connection_rejected = connection_rejected_message(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), - rejection_reason_code::already_connected, - "I'm already connected to you"); - + // If it is an outbound connection, update the existing connection's inbound_endpoint. + // Note: there may be a race condition that multiple tasks try to write the same data + if( peer_connection_direction::outbound == originating_peer->direction + && peer_connection_direction::inbound == already_connected_peer->direction + && originating_peer->node_public_key == already_connected_peer->node_public_key ) + { + already_connected_peer->remote_inbound_endpoint = originating_peer->get_remote_endpoint(); + already_connected_peer->inbound_endpoint_verified = true; + already_connected_peer->is_firewalled = firewalled_state::not_firewalled; + } + // Now reject + connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, + *originating_peer->get_remote_endpoint(), + rejection_reason_code::already_connected, + "I'm already connected to you" ); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; - originating_peer->send_message(message(connection_rejected)); + originating_peer->send_message( message(connection_rejected) ); dlog("Received a hello_message from peer ${peer} that I'm already connected to (with id ${id}), rejection", ("peer", originating_peer->get_remote_endpoint()) ("id", originating_peer->node_id)); // If already connected, we disconnect - disconnect_from_peer(originating_peer, connection_rejected.reason_string); + disconnect_from_peer( originating_peer, connection_rejected.reason_string ); } #ifdef ENABLE_P2P_DEBUGGING_API else if(!_allowed_peers.empty() && @@ -1730,11 +1733,11 @@ namespace graphene { namespace net { namespace detail { ("peer", originating_peer->get_remote_endpoint())); } } - } + } - void node_impl::on_connection_accepted_message( peer_connection* originating_peer, - const connection_accepted_message& ) const - { + void node_impl::on_connection_accepted_message( peer_connection* originating_peer, + const connection_accepted_message& ) const + { VERIFY_CORRECT_THREAD(); // Gatekeeping code // We only send one address request message shortly after connected @@ -1746,13 +1749,13 @@ namespace graphene { namespace net { namespace detail { return; } - dlog("Received a connection_accepted in response to my \"hello\" from ${peer}", - ("peer", originating_peer->get_remote_endpoint())); + dlog( "Received a connection_accepted in response to my \"hello\" from ${peer}", + ("peer", originating_peer->get_remote_endpoint()) ); originating_peer->negotiation_status = peer_connection::connection_negotiation_status::peer_connection_accepted; originating_peer->our_state = peer_connection::our_connection_state::connection_accepted; originating_peer->expecting_address_message = true; originating_peer->send_message(address_request_message()); - } + } void node_impl::on_connection_rejected_message(peer_connection* originating_peer, const connection_rejected_message& connection_rejected_message_received) { @@ -1767,7 +1770,6 @@ namespace graphene { namespace net { namespace detail { ::peer_connection_rejected; originating_peer->our_state = peer_connection::our_connection_state::connection_rejected; - // TODO the data is not verified, be careful if( connection_rejected_message_received.reason_code == rejection_reason_code::connected_to_self || connection_rejected_message_received.reason_code == rejection_reason_code::different_chain ) { @@ -1778,11 +1780,17 @@ namespace graphene { namespace net { namespace detail { // Note: we should not erase data by the peer's claimed inbound_address or inbound_port, // because the data is still unreliable. _potential_peer_db.erase(originating_peer->get_socket().remote_endpoint()); + // Note: we do not send closing_connection_message, but close directly. This is probably OK move_peer_to_closing_list(originating_peer->shared_from_this()); originating_peer->close_connection(); } - // TODO if it is an outbound connection, and the rejection reason is "already_connected", - // update the existing connection's inbound_endpoint + // Note: ideally, if it is an outbound connection, and the rejection reason is "already_connected", + // we should update the existing connection's inbound_endpoint and mark it as verified. + // However, at the moment maybe we haven't processed its hello message, + // so don't know its node_id and unable to locate the existing connection. + // So it is better to do the update in on_hello_message(). + // It is also possible that its hello message comes too late and the connection is already closed, + // in which case we don't have a chance to update anyway. else { // update our database to record that we were rejected so we won't try to connect again for a while @@ -2730,7 +2738,7 @@ namespace graphene { namespace net { namespace detail { } if( originating_peer->we_have_requested_close ) originating_peer->close_connection(); - } + } void node_impl::on_connection_closed(peer_connection* originating_peer) { @@ -4087,6 +4095,8 @@ namespace graphene { namespace net { namespace detail { // Note: this step is almost useless because we didn't multiply _peer_connection_retry_timeout // by number_of_failed_connection_attempts. However, it is probably desired as we don't want // to try to connect to a large number of dead nodes at startup. + // As of writing, _peer_connection_retry_timeout is 30 seconds, pushing the time back that much + // won't have much impact in production. // TODO Perhaps just remove it. for (peer_database::iterator itr = _potential_peer_db.begin(); itr != _potential_peer_db.end(); ++itr) { @@ -4363,10 +4373,13 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& active_peer : _active_connections ) { - // TODO check by remote inbound endpoint too - fc::optional endpoint_for_this_peer( active_peer->get_remote_endpoint() ); + fc::optional endpoint_for_this_peer = active_peer->get_remote_endpoint(); if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return active_peer; + if( peer_connection_direction::inbound == active_peer->direction + && active_peer->inbound_endpoint_verified // which implies get_endpoint_for_connecting().valid() + && *active_peer->get_endpoint_for_connecting() == remote_endpoint ) + return active_peer; } return peer_connection_ptr(); } @@ -4380,10 +4393,13 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_handshaking_connections.get_mutex()); for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) { - // TODO check by remote inbound endpoint too fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return handshaking_peer; + if( peer_connection_direction::inbound == handshaking_peer->direction + && handshaking_peer->inbound_endpoint_verified // which implies get_endpoint_for_connecting().valid() + && *handshaking_peer->get_endpoint_for_connecting() == remote_endpoint ) + return handshaking_peer; } return peer_connection_ptr(); } diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 6c719738f9..00385c73f6 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -664,9 +664,8 @@ public: bool is_accepting_new_connections(); bool is_wanting_new_connections(); uint32_t get_number_of_connections(); - peer_connection_ptr get_peer_by_node_id(const node_id_t& id); + peer_connection_ptr get_peer_by_node_id(const node_id_t& id) const; - bool is_already_connected_to_id(const node_id_t& node_id); bool merge_address_info_with_potential_peer_database( const std::vector addresses ); void display_current_connections(); uint32_t calculate_unsynced_block_count_from_all_peers(); diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index 8f1c8d1a48..a87dbad59a 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -263,6 +263,7 @@ namespace graphene { namespace net their_state = their_connection_state::just_connected; our_state = our_connection_state::just_connected; remote_inbound_endpoint = remote_endpoint; + inbound_endpoint_verified = true; ilog( "established outbound connection to ${remote_endpoint}", ("remote_endpoint", remote_endpoint ) ); } catch ( fc::exception& e ) From 8381eb358db606ebf479c61088ab952019363061 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 29 Aug 2022 23:42:59 +0000 Subject: [PATCH 197/338] Update logging --- libraries/net/node.cpp | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index fafb8ddf64..e1222c44b4 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1483,7 +1483,7 @@ namespace graphene { namespace net { namespace detail { // can't really guard against is if we do a simulatenous open, we // probably need to think through that case. We're not attempting that // yet, though, so it's ok to just disconnect here. - dlog( "Unexpected hello_message from peer ${peer}, disconnecting", + wlog( "Unexpected hello_message from peer ${peer}, disconnecting", ("peer", originating_peer->get_remote_endpoint()) ); disconnect_from_peer( originating_peer, "Received an unexpected hello_message" ); return; @@ -1492,7 +1492,8 @@ namespace graphene { namespace net { namespace detail { // Check chain_id if( hello_message_received.chain_id != _chain_id ) { - dlog( "Received hello message from peer on a different chain: ${message}", + wlog( "Received hello message from peer ${peer} on a different chain: ${message}", + ("peer", originating_peer->get_remote_endpoint()) ("message", hello_message_received) ); std::ostringstream rejection_message; rejection_message << "You're on a different chain than I am. I'm on " << _chain_id.str() << @@ -1546,7 +1547,7 @@ namespace graphene { namespace net { namespace detail { static const node_id_t null_node_id; if( null_node_id == peer_node_id ) { - dlog( "The node_id in the hello_message from peer ${peer} is null, disconnecting", + wlog( "The node_id in the hello_message from peer ${peer} is null, disconnecting", ("peer", originating_peer->get_remote_endpoint()) ); disconnect_from_peer( originating_peer, "Your node_id in the hello_message is null" ); return; @@ -1744,7 +1745,7 @@ namespace graphene { namespace net { namespace detail { if( originating_peer->our_state != peer_connection::our_connection_state::just_connected ) { // Log and ignore - dlog( "Received an unexpected connection_accepted message from ${peer}", + wlog( "Received an unexpected connection_accepted message from ${peer}", ("peer", originating_peer->get_remote_endpoint()) ); return; } @@ -1814,7 +1815,7 @@ namespace graphene { namespace net { namespace detail { // which would cause on_connection_closed() to be called, // which would then close the connection when the peer_connection object was destroyed. // Explicitly closing the connection here is more intuitive. - dlog( "Unexpected connection_rejected_message from peer ${peer}, disconnecting", + wlog( "Unexpected connection_rejected_message from peer ${peer}, disconnecting", ("peer", originating_peer->get_remote_endpoint()) ); disconnect_from_peer( originating_peer, "Received an unexpected connection_rejected_message" ); } @@ -1827,7 +1828,7 @@ namespace graphene { namespace net { namespace detail { if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted && originating_peer->their_state != peer_connection::their_connection_state::connection_rejected ) { - dlog( "Unexpected address_request_message from peer ${peer}, disconnecting", + wlog( "Unexpected address_request_message from peer ${peer}, disconnecting", ("peer", originating_peer->get_remote_endpoint()) ); disconnect_from_peer( originating_peer, "Received an unexpected address_request_message" ); return; @@ -1882,7 +1883,7 @@ namespace graphene { namespace net { namespace detail { if( !originating_peer->expecting_address_message ) { // Log and ignore - dlog( "Received an unexpected address message containing ${size} addresses for peer ${peer}", + wlog( "Received an unexpected address message containing ${size} addresses for peer ${peer}", ("size", address_message_received.addresses.size()) ("peer", originating_peer->get_remote_endpoint()) ); return; @@ -1973,7 +1974,7 @@ namespace graphene { namespace net { namespace detail { // Gatekeeping code if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted ) { - dlog( "Unexpected fetch_blockchain_item_ids_message from peer ${peer}, disconnecting", + wlog( "Unexpected fetch_blockchain_item_ids_message from peer ${peer}, disconnecting", ("peer", originating_peer->get_remote_endpoint()) ); disconnect_from_peer( originating_peer, "Received an unexpected fetch_blockchain_item_ids_message" ); return; @@ -2487,7 +2488,8 @@ namespace graphene { namespace net { namespace detail { } else { - wlog("sync: received a list of sync items available, but I didn't ask for any!"); + wlog( "sync: received a list of sync items available from peer ${peer}, but I didn't ask for any!", + ("peer", originating_peer->get_remote_endpoint()) ); } } @@ -2515,7 +2517,7 @@ namespace graphene { namespace net { namespace detail { // Gatekeeping code if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted ) { - dlog( "Unexpected fetch_items_message from peer ${peer}, disconnecting", + wlog( "Unexpected fetch_items_message from peer ${peer}, disconnecting", ("peer", originating_peer->get_remote_endpoint()) ); disconnect_from_peer( originating_peer, "Received an unexpected fetch_items_message" ); return; @@ -2599,7 +2601,9 @@ namespace graphene { namespace net { namespace detail { _items_to_fetch.insert(prioritized_item_id(requested_item, _items_to_fetch_seq_counter)); ++_items_to_fetch_seq_counter; } - wlog("Peer doesn't have the requested item."); + wlog( "Peer ${peer} doesn't have the requested item ${item}.", + ("peer", originating_peer->get_remote_endpoint()) + ("item", requested_item) ); trigger_fetch_items_loop(); return; } @@ -2616,7 +2620,9 @@ namespace graphene { namespace net { namespace detail { disconnect_from_peer(originating_peer, "You are missing a sync item you claim to have, your database is probably corrupted. Try --rebuild-index.",true, fc::exception(FC_LOG_MESSAGE(error,"You are missing a sync item you claim to have, your database is probably corrupted. Try --rebuild-index.", ("item_id", requested_item)))); - wlog("Peer doesn't have the requested sync item. This really shouldn't happen"); + wlog( "Peer ${peer} doesn't have the requested sync item ${item}. This really shouldn't happen", + ("peer", originating_peer->get_remote_endpoint()) + ("item", requested_item) ); trigger_fetch_sync_items_loop(); return; } @@ -2630,7 +2636,7 @@ namespace graphene { namespace net { namespace detail { // Gatekeeping code if( originating_peer->their_state != peer_connection::their_connection_state::connection_accepted ) { - dlog( "Unexpected item_ids_inventory_message from peer ${peer}, disconnecting", + wlog( "Unexpected item_ids_inventory_message from peer ${peer}, disconnecting", ("peer", originating_peer->get_remote_endpoint()) ); disconnect_from_peer( originating_peer, "Received an unexpected item_ids_inventory_message" ); return; From a6a1b7ece3b32fcfade0bb2e3de56e6c6412eed5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 2 Sep 2022 00:38:46 +0000 Subject: [PATCH 198/338] Update inbound endpoint verification ... ... and add some gatekeeping code and other improvements --- libraries/fc | 2 +- .../include/graphene/net/peer_connection.hpp | 2 + libraries/net/node.cpp | 122 +++++++++++++++--- 3 files changed, 107 insertions(+), 19 deletions(-) diff --git a/libraries/fc b/libraries/fc index 7db6fbaee4..ad7b28bca8 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 7db6fbaee44e0f991a66bfd82f37f62c40a8073e +Subproject commit ad7b28bca8eebe68a594ed06ce1993975101e408 diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 1cc3d1be0b..144878bb6f 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -199,6 +199,8 @@ namespace graphene { namespace net fc::optional remote_inbound_endpoint; /// Whether the inbound endpoint of the remote peer is verified bool inbound_endpoint_verified = false; + /// Some nodes may be listening on multiple endpoints + fc::flat_set additional_inbound_endpoints; /// @} using item_to_time_map_type = std::unordered_map; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index e1222c44b4..c874c1decc 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -152,6 +152,20 @@ namespace graphene { namespace net { namespace detail { # define VERIFY_CORRECT_THREAD() do {} while (0) #endif + /// Greatly delays the next connection to the endpoint + static void greatly_delay_next_conn_to( node_impl* impl, const fc::ip::endpoint& ep ) + { + fc::optional updated_peer_record + = impl->_potential_peer_db.lookup_entry_for_endpoint( ep ); + if( updated_peer_record ) + { + updated_peer_record->last_connection_disposition = last_connection_rejected; + updated_peer_record->last_connection_attempt_time = fc::time_point::now(); + constexpr uint32_t failed_attempts_to_add = 120; // * 30 seconds = 1 hour + updated_peer_record->number_of_failed_connection_attempts += failed_attempts_to_add; + impl->_potential_peer_db.update_entry( *updated_peer_record ); + } + } static void update_address_seen_time( node_impl* impl, const peer_connection* active_peer ) { fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); @@ -468,8 +482,10 @@ namespace graphene { namespace net { namespace detail { if( _update_seed_nodes_loop_done.valid() && _update_seed_nodes_loop_done.canceled() ) return; + constexpr uint32_t five = 5; + auto interval = _active_connections.empty() ? fc::minutes(five) : fc::hours(1); _update_seed_nodes_loop_done = fc::schedule( [this]() { update_seed_nodes_task(); }, - fc::time_point::now() + fc::hours(3), + fc::time_point::now() + interval, "update_seed_nodes_loop" ); } @@ -1285,7 +1301,7 @@ namespace graphene { namespace net { namespace detail { // Note: if found, a copy is returned auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); // Note: - // TODO fix the comments here (since we don't save node_id in the peer database so far) + // We don't save node_id in the peer database so far // 1. node_id of that peer may have changed, but we don't check or update // 2. we don't check by node_id either, in case when a peer's IP address has changed, we don't handle it // 3. if the peer's inbound port is not 0, no matter if the address is reported as firewalled or not, @@ -1341,6 +1357,14 @@ namespace graphene { namespace net { namespace detail { ("type", graphene::net::core_message_type_enum(received_message.msg_type.value()))("hash", message_hash) ("size", received_message.size) ("endpoint", originating_peer->get_remote_endpoint())); + // Gatekeeping code + if( originating_peer->we_have_requested_close + && received_message.msg_type.value() != core_message_type_enum::closing_connection_message_type ) + { + dlog( "Unexpected message from peer ${peer} while we have requested to close connection", + ("peer", originating_peer->get_remote_endpoint()) ); + return; + } switch ( received_message.msg_type.value() ) { case core_message_type_enum::hello_message_type: @@ -1495,6 +1519,15 @@ namespace graphene { namespace net { namespace detail { wlog( "Received hello message from peer ${peer} on a different chain: ${message}", ("peer", originating_peer->get_remote_endpoint()) ("message", hello_message_received) ); + // If it is an outbound connection, make sure we won't reconnect to the peer soon + if( peer_connection_direction::outbound == originating_peer->direction ) + { + // Note: deleting is not the best approach since it can be readded soon and we will reconnect soon. + // Marking it "permanently rejected" is also not good enough since the peer can be "fixed". + // It seems the best approach is to reduce its weight significantly. + greatly_delay_next_conn_to( this, *originating_peer->get_remote_endpoint() ); + } + // Now reject std::ostringstream rejection_message; rejection_message << "You're on a different chain than I am. I'm on " << _chain_id.str() << " and you're on " << hello_message_received.chain_id.str(); @@ -1555,6 +1588,15 @@ namespace graphene { namespace net { namespace detail { // Check whether the peer is myself if( _node_id == peer_node_id ) { + // If it is an outbound connection, make sure we won't reconnect to the peer soon + if( peer_connection_direction::outbound == originating_peer->direction ) + { + // Note: deleting is not the best approach since it can be readded soon and we will reconnect soon. + // Marking it "permanently rejected" is also not good enough since the peer can be "fixed". + // It seems the best approach is to reduce its weight significantly. + greatly_delay_next_conn_to( this, *originating_peer->get_remote_endpoint() ); + } + // Now reject // Note: this can happen in rare cases if the peer is not actually myself but another node. // Anyway, we see it as ourselves, reject it and disconnect it. connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, @@ -1642,12 +1684,32 @@ namespace graphene { namespace net { namespace detail { // If it is an outbound connection, update the existing connection's inbound_endpoint. // Note: there may be a race condition that multiple tasks try to write the same data if( peer_connection_direction::outbound == originating_peer->direction - && peer_connection_direction::inbound == already_connected_peer->direction && originating_peer->node_public_key == already_connected_peer->node_public_key ) { - already_connected_peer->remote_inbound_endpoint = originating_peer->get_remote_endpoint(); - already_connected_peer->inbound_endpoint_verified = true; - already_connected_peer->is_firewalled = firewalled_state::not_firewalled; + // Do not replace a verified public address with a private or local address. + // Note: there is a scenario that some nodes in the same local network may have connected to each other, + // and of course some are outbound connections and some are inbound, so we are unable to update + // all the data, not to mention that their external addresses might be inaccessible to each + // other. + // Unless they are all configured with the "p2p-inbound-endpoint" option with an external address, + // even if they all start out connecting to each other's external addresses, at some point they + // may try to connect to each other's local addresses and possibly stay connected. + // In this case, if the nodes aren't configured with the "advertise-peer-algorithm" option and + // related options properly, when advertising connected peers to other peers, they may expose + // that they are in the same local network and connected to each other. + // On the other hand, when we skip updates in some cases, we may end up trying to reconnect soon + // and endlessly (which is addressed with additional_inbound_endpoints). + auto old_inbound_endpoint = already_connected_peer->get_endpoint_for_connecting(); + auto new_inbound_endpoint = originating_peer->get_remote_endpoint(); + already_connected_peer->additional_inbound_endpoints.insert( *new_inbound_endpoint ); + if ( !already_connected_peer->inbound_endpoint_verified // which implies direction == inbound + || new_inbound_endpoint->get_address().is_public_address() + || !old_inbound_endpoint->get_address().is_public_address() ) + { + already_connected_peer->remote_inbound_endpoint = new_inbound_endpoint; + already_connected_peer->inbound_endpoint_verified = true; + already_connected_peer->is_firewalled = firewalled_state::not_firewalled; + } } // Now reject connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, @@ -1778,9 +1840,12 @@ namespace graphene { namespace net { namespace detail { // For an inbound connection, we should have not saved anything to the peer database yet, nor we will // save anything (it would be weird if they rejected us but we didn't reject them), // so using remote_endpoint here at least won't do anything bad. - // Note: we should not erase data by the peer's claimed inbound_address or inbound_port, + // Note: we should not erase or update data by the peer's claimed inbound_address, // because the data is still unreliable. - _potential_peer_db.erase(originating_peer->get_socket().remote_endpoint()); + // Note: deleting is not the best approach since it can be readded soon and we will reconnect soon. + // Marking it "permanently rejected" is also not good enough since the peer can be "fixed". + // It seems the best approach is to reduce its weight significantly. + greatly_delay_next_conn_to( this, *originating_peer->get_remote_endpoint() ); // Note: we do not send closing_connection_message, but close directly. This is probably OK move_peer_to_closing_list(originating_peer->shared_from_this()); originating_peer->close_connection(); @@ -4379,19 +4444,32 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& active_peer : _active_connections ) { + // Note: for outbound connections, checking by remote_endpoint is OK, + // and we will ignore the inbound address and port it sends to us when handshaking. + // For an inbound active connection, we want to verify its inbound endpoint, if it happens to be + // the same as remote_endpoint but not yet verified, we consider it as not connected. + // * If verification succeeds, we will mark it as "verified" and won't try to connect again. + // * We may fail to verify if it is firewalled, in this case number_of_failed_connection_attempts + // will increase, so we will not reconnect soon, but will wait longer and longer. fc::optional endpoint_for_this_peer = active_peer->get_remote_endpoint(); - if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) + if( peer_connection_direction::outbound == active_peer->direction + && endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return active_peer; if( peer_connection_direction::inbound == active_peer->direction && active_peer->inbound_endpoint_verified // which implies get_endpoint_for_connecting().valid() && *active_peer->get_endpoint_for_connecting() == remote_endpoint ) return active_peer; + for( const auto& ep : active_peer->additional_inbound_endpoints ) + { + if( ep == remote_endpoint ) + return active_peer; + } } return peer_connection_ptr(); } - peer_connection_ptr node_impl::get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ) const - { + peer_connection_ptr node_impl::get_connection_for_endpoint( const fc::ip::endpoint& remote_endpoint ) const + { VERIFY_CORRECT_THREAD(); peer_connection_ptr active_ptr = get_active_conn_for_endpoint( remote_endpoint ); if ( active_ptr != peer_connection_ptr() ) @@ -4399,16 +4477,24 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_handshaking_connections.get_mutex()); for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) { - fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); - if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) - return handshaking_peer; - if( peer_connection_direction::inbound == handshaking_peer->direction + // For an inbound handshaking connection, there is a race condition since we might not know its node_id yet, + // so be stricter here. + // Even so, there may be situations that we end up having multiple active connections with them. + fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); + if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) + return handshaking_peer; + if( peer_connection_direction::inbound == handshaking_peer->direction && handshaking_peer->inbound_endpoint_verified // which implies get_endpoint_for_connecting().valid() && *handshaking_peer->get_endpoint_for_connecting() == remote_endpoint ) - return handshaking_peer; + return handshaking_peer; + for( const auto& ep : handshaking_peer->additional_inbound_endpoints ) + { + if( ep == remote_endpoint ) + return handshaking_peer; + } } return peer_connection_ptr(); - } + } bool node_impl::is_connected_to_endpoint( const fc::ip::endpoint& remote_endpoint ) const { @@ -4821,7 +4907,7 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); fc::mutable_variant_object info; - info["listening_on"] = _actual_listening_endpoint; + info["listening_on"] = std::string( _actual_listening_endpoint ); info["node_public_key"] = fc::variant( _node_public_key, 1 ); info["node_id"] = fc::variant( _node_id, 1 ); return info; From c94cbc76d398dc8b2b3b8145c7b8a376fb29bed0 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 2 Sep 2022 01:35:40 +0000 Subject: [PATCH 199/338] Add colors to log messages in app_test --- tests/app/main.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index 328abcfad3..3144026c9e 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -220,15 +220,11 @@ BOOST_AUTO_TEST_CASE( two_node_network ) using namespace graphene::app; try { // Configure logging - fc::logging_config logging_config; - logging_config.appenders.push_back( fc::appender_config( "stderr", "console", - fc::variant( fc::console_appender::config(), GRAPHENE_MAX_NESTED_OBJECTS ) ) ); + fc::logging_config logging_config = fc::logging_config::default_config(); - fc::logger_config logger_config("p2p"); - logger_config.level = fc::log_level::debug; - logger_config.appenders.push_back("stderr"); - - logging_config.loggers.push_back(logger_config); + auto logger = logging_config.loggers.back(); // get a copy of the default logger + logger.name = "p2p"; // update the name to p2p + logging_config.loggers.push_back( logger ); // add it to logging_config fc::configure_logging(logging_config); From 3d56e96735e88151b0a0fea8b9e5810770742041 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 2 Sep 2022 03:01:42 +0000 Subject: [PATCH 200/338] Fix and improve P2P node tests --- tests/tests/p2p_node_tests.cpp | 146 +++++++++++++++++++++------------ 1 file changed, 95 insertions(+), 51 deletions(-) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 19811ae957..7a141d6728 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -36,6 +36,11 @@ #include +#include +#include +#include +#include + #include #include "../../libraries/net/node_impl.hxx" @@ -101,6 +106,7 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat std::shared_ptr d{}; graphene::net::peer_connection_ptr peer = graphene::net::peer_connection::make_shared( d.get() ); peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); + peer->remote_inbound_endpoint = peer->get_remote_endpoint(); my->move_peer_to_active_list( peer ); return std::pair, graphene::net::peer_connection_ptr>( d, peer ); }).wait(); @@ -184,74 +190,100 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat class test_peer : public graphene::net::peer_connection { - public: +public: test_peer(graphene::net::peer_connection_delegate* del) : graphene::net::peer_connection(del) { - message_received = nullptr; } - std::shared_ptr message_received; + std::vector messages_received; void send_message( const graphene::net::message& message_to_send, size_t message_send_time_field_offset = (size_t)-1 ) override { - message_received = nullptr; - try - { - switch ( message_to_send.msg_type.value() ) - { - case graphene::net::core_message_type_enum::address_message_type : - { - graphene::net::address_message m = message_to_send.as(); - message_received = std::make_shared(m); - break; - } - case graphene::net::core_message_type_enum::check_firewall_reply_message_type : - { - auto m = message_to_send.as(); - message_received = std::make_shared(m); - break; - } - default: - break; - } - } catch (...) {} - + messages_received.push_back( message_to_send ); } }; -void test_address_message( std::shared_ptr msg, std::size_t num_elements ) +void test_closing_connection_message( const graphene::net::message& msg ) +{ + try + { + const auto& closing_msg = msg.as(); + } + catch( fc::exception& ) + { + BOOST_FAIL( "Expecting closing_connection_message" ); + } +} + +void test_address_message( const graphene::net::message& msg, std::size_t num_elements ) { - if (msg != nullptr) + try { - graphene::net::address_message addr_msg = static_cast( - msg->as() ); + const auto& addr_msg = msg.as(); BOOST_CHECK_EQUAL( addr_msg.addresses.size(), num_elements ); - } - else + } + catch( fc::exception& ) { - BOOST_FAIL( "address_message was null" ); + BOOST_FAIL( "Expecting address_message" ); } } -void test_firewall_message( std::shared_ptr msg ) +struct p2p_fixture { - if (msg != nullptr) + p2p_fixture() { - graphene::net::check_firewall_reply_message fw_msg = - static_cast( - msg->as() ); - if (fw_msg.result != graphene::net::firewall_check_result::unable_to_check ) - BOOST_FAIL( "Expected \"Unable to check\"" ); - } - else + // Configure logging : log p2p messages to console + fc::logging_config logging_config = fc::logging_config::default_config(); + + auto logger = logging_config.loggers.back(); // get a copy of the default logger + logger.name = "p2p"; // update the name to p2p + logging_config.loggers.push_back( logger ); // add it to logging_config + + fc::configure_logging(logging_config); + } + ~p2p_fixture() { - BOOST_FAIL( "firewall_message was null" ); + // Restore default logging config + fc::configure_logging( fc::logging_config::default_config() ); } +}; -} +BOOST_FIXTURE_TEST_SUITE( p2p_node_tests, p2p_fixture ) + +/**** + * If a node requests addresses without sending hello_message first, it will be disconnected. + */ +BOOST_AUTO_TEST_CASE( address_request_without_hello ) +{ + // create a node + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir; + test_node node1( "Node1", node1_dir.path(), node1_port ); + node1.disable_peer_advertising(); + + // get something in their list of connections + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = node1.create_peer_connection( "127.0.0.1:8090" ); + + // request addresses + test_delegate peer3_delegate{}; + std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); + + // check the results + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); + test_closing_connection_message( msg ); -BOOST_AUTO_TEST_SUITE( p2p_node_tests ) + // request again + peer3_ptr->messages_received.clear(); + node1.on_message( peer3_ptr, req ); + + // the request is ignored + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 0 ); +} /**** * Assure that when disable_peer_advertising is set, @@ -267,16 +299,18 @@ BOOST_AUTO_TEST_CASE( disable_peer_advertising ) // get something in their list of connections std::pair, graphene::net::peer_connection_ptr> node2_rslts - = node1.create_peer_connection( "127.0.0.1:8090" ); + = node1.create_peer_connection( "127.0.0.1:8090" ); // verify that they do not share it with others test_delegate peer3_delegate{}; std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); + peer3_ptr->their_state = test_peer::their_connection_state::connection_accepted; graphene::net::address_request_message req; node1.on_message( peer3_ptr, req ); // check the results - std::shared_ptr msg = peer3_ptr->message_received; + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); test_address_message( msg, 0 ); } @@ -294,12 +328,14 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) test_delegate peer3_delegate{}; std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); + peer3_ptr->their_state = test_peer::their_connection_state::connection_accepted; // verify that they do not share it with others { graphene::net::address_request_message req; node1.on_message( peer3_ptr, req ); // check the results - std::shared_ptr msg = peer3_ptr->message_received; + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); test_address_message( msg, 0 ); } } @@ -316,6 +352,7 @@ BOOST_AUTO_TEST_CASE( advertise_list_test ) // a fake peer test_delegate del{}; std::shared_ptr my_peer( new test_peer{&del} ); + my_peer->their_state = test_peer::their_connection_state::connection_rejected; // add 2 connections, 1 of which appears on the advertise_list std::pair, graphene::net::peer_connection_ptr> node1_rslts @@ -327,8 +364,11 @@ BOOST_AUTO_TEST_CASE( advertise_list_test ) graphene::net::address_request_message address_request_message_received; my_node.on_message( my_peer, address_request_message_received ); // check the results - std::shared_ptr msg = my_peer->message_received; - test_address_message( msg, 1 ); + BOOST_REQUIRE_EQUAL( my_peer->messages_received.size(), 2U ); + const auto& msg1 = my_peer->messages_received.front(); + test_address_message( msg1, 1 ); + const auto& msg2 = my_peer->messages_received.back(); + test_closing_connection_message( msg2 ); } BOOST_AUTO_TEST_CASE( exclude_list ) @@ -350,11 +390,15 @@ BOOST_AUTO_TEST_CASE( exclude_list ) // act like my_node received an address_request message from my_peer test_delegate del_4{}; std::shared_ptr peer_4( new test_peer(&del_4) ); + peer_4->their_state = test_peer::their_connection_state::connection_rejected; graphene::net::address_request_message address_request_message_received; my_node.on_message( peer_4, address_request_message_received ); // check the results - std::shared_ptr msg = peer_4->message_received; - test_address_message( msg, 2 ); + BOOST_REQUIRE_EQUAL( peer_4->messages_received.size(), 2U ); + const auto& msg1 = peer_4->messages_received.front(); + test_address_message( msg1, 2 ); + const auto& msg2 = peer_4->messages_received.back(); + test_closing_connection_message( msg2 ); } BOOST_AUTO_TEST_SUITE_END() From 8db58de74e6dc43552ec5165bf53d2879cf80b44 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 2 Sep 2022 18:07:10 +0000 Subject: [PATCH 201/338] Update tests to get around race conditions in code --- libraries/net/node.cpp | 5 +++++ tests/common/database_fixture.cpp | 13 +++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index c874c1decc..b456404ec4 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4208,6 +4208,11 @@ namespace graphene { namespace net { namespace detail { // those flags will prevent us from detecting that other applications are // listening on that port. We'd like to detect that, so we'll set up a temporary // tcp server without that flag to see if we can listen on that port. + // Note: There is a race condition where another application may start listening + // on the same port just after the temporary tcp server is destroyed and + // before we try to listen with the real tcp server. + // This happens frequently when running multiple test cases at the same + // time, but less likely in production. bool first = true; for( ;; ) { diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 67683e9421..4cf9d749bb 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -195,8 +195,17 @@ std::shared_ptr database_fixture_base::in fc::set_option( options, "enable-p2p-network", false ); else if( rand() % 100 >= 50 ) // Disable P2P network randomly for test cases fc::set_option( options, "enable-p2p-network", false ); - else if( rand() % 100 >= 50 ) // this should lead to no change - fc::set_option( options, "enable-p2p-network", true ); + else + { + if( rand() % 100 >= 50 ) // this should lead to no change + { + fc::set_option( options, "enable-p2p-network", true ); + } + fc::ip::endpoint ep; + ep.set_port( rand() % 20000 + 5000 ); + idump( (ep)(std::string(ep)) ); + fc::set_option( options, "p2p-endpoint", std::string( ep ) ); + } if (fixture.current_test_name == "get_account_history_operations") { From e498589074dd253db9ae5922d7234aca004ec303 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 2 Sep 2022 21:39:05 +0000 Subject: [PATCH 202/338] Add a comment --- tests/elasticsearch/main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 5589d9a8c7..94e64233a2 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -354,6 +354,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_suite) { } + // Note: this test case ends too quickly, sometimes causing an memory access violation on cleanup } catch (fc::exception &e) { edump((e.to_detail_string())); From 7458c245fd575dc9bde42d3ab9ea53139e6b39c8 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 3 Sep 2022 16:19:46 +0000 Subject: [PATCH 203/338] Fix threading issues in p2p_node_tests --- tests/tests/p2p_node_tests.cpp | 357 +++++++++++++++++++-------------- 1 file changed, 202 insertions(+), 155 deletions(-) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 7a141d6728..ed0a2e74c2 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -53,14 +53,14 @@ */ class test_delegate : public graphene::net::peer_connection_delegate { - public: +public: test_delegate() { } - void on_message( graphene::net::peer_connection* originating_peer, + void on_message( graphene::net::peer_connection* originating_peer, const graphene::net::message& received_message ) { - elog( "on_message was called with ${msg}", ("msg",received_message) ); + ilog( "on_message was called with ${msg}", ("msg",received_message) ); try { graphene::net::address_request_message m = received_message.as< graphene::net::address_request_message >(); std::shared_ptr m_ptr = std::make_shared< graphene::net::message >( m ); @@ -71,23 +71,69 @@ class test_delegate : public graphene::net::peer_connection_delegate } void on_connection_closed( graphene::net::peer_connection* originating_peer ) override {} graphene::net::message get_message_for_item( const graphene::net::item_id& item ) override - { - return graphene::net::message(); + { + return graphene::net::message(); } std::shared_ptr< graphene::net::message > last_message = nullptr; }; +class test_peer : public graphene::net::peer_connection +{ +public: + std::vector messages_received; + + test_peer(graphene::net::peer_connection_delegate* del) : graphene::net::peer_connection(del) + { + } + + void send_message( const graphene::net::message& message_to_send, + size_t message_send_time_field_offset = (size_t)-1 ) override + { + messages_received.push_back( message_to_send ); + } +}; + +static void test_closing_connection_message( const graphene::net::message& msg ) +{ + try + { + const auto& closing_msg = msg.as(); + } + catch( fc::exception& ) + { + BOOST_FAIL( "Expecting closing_connection_message" ); + } +} + +static void test_address_message( const graphene::net::message& msg, std::size_t num_elements ) +{ + try + { + const auto& addr_msg = msg.as(); + BOOST_CHECK_EQUAL( addr_msg.addresses.size(), num_elements ); + } + catch( fc::exception& ) + { + BOOST_FAIL( "Expecting address_message" ); + } +} + class test_node : public graphene::net::node, public graphene::net::node_delegate { - public: +public: + std::vector> test_peers; + test_node( const std::string& name, const fc::path& config_dir, int port, int seed_port = -1 ) : node( name ) { + std::cout << "test_node::test_node(): current thread=" << uint64_t(&fc::thread::current()) << std::endl; node_name = name; } - ~test_node() + ~test_node() { - close(); + my->get_thread()->async( [&]() { + this->test_peers.clear(); + }).wait(); } void on_message( graphene::net::peer_connection_ptr originating_peer, @@ -98,137 +144,105 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat }).wait(); } - std::pair, graphene::net::peer_connection_ptr> + std::pair, std::shared_ptr> create_test_peer( std::string url ) + { + return this->my->get_thread()->async( [&, &url = url](){ + std::shared_ptr d{}; + auto peer = std::make_shared( d.get() ); + peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); + this->test_peers.push_back( peer ); + return std::make_pair( d, peer ); + }).wait(); + } + + std::pair, graphene::net::peer_connection_ptr> create_peer_connection( std::string url ) { - std::pair, graphene::net::peer_connection_ptr> ret_val; - ret_val = this->my->get_thread()->async( [&, &url = url](){ + return this->my->get_thread()->async( [&, &url = url](){ std::shared_ptr d{}; graphene::net::peer_connection_ptr peer = graphene::net::peer_connection::make_shared( d.get() ); peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); peer->remote_inbound_endpoint = peer->get_remote_endpoint(); - my->move_peer_to_active_list( peer ); - return std::pair, graphene::net::peer_connection_ptr>( d, peer ); + my->move_peer_to_active_list( peer ); + return std::make_pair( d, peer ); }).wait(); - return ret_val; } /**** * Implementation methods of node_delegate */ bool has_item( const graphene::net::item_id& id ) { return false; } - bool handle_block( const graphene::net::block_message& blk_msg, bool sync_mode, + bool handle_block( const graphene::net::block_message& blk_msg, bool sync_mode, std::vector& contained_transaction_message_ids ) { return false; } void handle_transaction( const graphene::net::trx_message& trx_msg ) { - elog( "${name} was asked to handle a transaction", ("name", node_name) ); + ilog( "${name} was asked to handle a transaction", ("name", node_name) ); } - void handle_message( const graphene::net::message& message_to_process ) + void handle_message( const graphene::net::message& message_to_process ) { - elog( "${name} received a message", ("name",node_name) ); + ilog( "${name} received a message", ("name",node_name) ); } std::vector get_block_ids( const std::vector& blockchain_synopsis, - uint32_t& remaining_item_count, uint32_t limit = 2000 ) - { - return std::vector(); + uint32_t& remaining_item_count, uint32_t limit = 2000 ) + { + return std::vector(); } graphene::net::message get_item( const graphene::net::item_id& id ) { - elog("${name} get_item was called", ("name",node_name)); - return graphene::net::message(); + ilog("${name} get_item was called", ("name",node_name)); + return graphene::net::message(); } graphene::net::chain_id_type get_chain_id() const { - elog("${name} get_chain_id was called", ("name",node_name)); - return graphene::net::chain_id_type(); + ilog("${name} get_chain_id was called", ("name",node_name)); + return graphene::net::chain_id_type(); } std::vector get_blockchain_synopsis( - const graphene::net::item_hash_t& reference_point, + const graphene::net::item_hash_t& reference_point, uint32_t number_of_blocks_after_reference_point) { - return std::vector(); + return std::vector(); } void sync_status( uint32_t item_type, uint32_t item_count ) {} - void connection_count_changed( uint32_t c ) + void connection_count_changed( uint32_t c ) { - elog( "${name} connection_count_change was called", ("name",node_name) ); + ilog( "${name} connection_count_change was called", ("name",node_name) ); } - uint32_t get_block_number( const graphene::net::item_hash_t& block_id ) - { - elog( "${name} get_block_number was called", ("name",node_name) ); + uint32_t get_block_number( const graphene::net::item_hash_t& block_id ) + { + ilog( "${name} get_block_number was called", ("name",node_name) ); return 0; } fc::time_point_sec get_block_time( const graphene::net::item_hash_t& block_id ) - { - elog( "${name} get_block_time was called", ("name",node_name) ); - return fc::time_point_sec(); + { + ilog( "${name} get_block_time was called", ("name",node_name) ); + return fc::time_point_sec(); } graphene::net::item_hash_t get_head_block_id() const { - elog( "${name} get_head_block_id was called", ("name",node_name) ); - return graphene::net::item_hash_t(); + ilog( "${name} get_head_block_id was called", ("name",node_name) ); + return graphene::net::item_hash_t(); } uint32_t estimate_last_known_fork_from_git_revision_timestamp( uint32_t unix_timestamp ) const - { - return 0; + { + return 0; } void error_encountered( const std::string& message, const fc::oexception& error ) { - elog( "${name} error_encountered was called. Message: ${msg}", ("name",node_name)("msg", message) ); + ilog( "${name} error_encountered was called. Message: ${msg}", ("name",node_name)("msg", message) ); } uint8_t get_current_block_interval_in_seconds() const - { - elog( "${name} get_current_block_interval_in_seconds was called", ("name",node_name) ); - return 0; - } - - private: - std::string node_name; -}; - -class test_peer : public graphene::net::peer_connection -{ -public: - test_peer(graphene::net::peer_connection_delegate* del) : graphene::net::peer_connection(del) { + ilog( "${name} get_current_block_interval_in_seconds was called", ("name",node_name) ); + return 0; } - std::vector messages_received; - - void send_message( const graphene::net::message& message_to_send, - size_t message_send_time_field_offset = (size_t)-1 ) override - { - messages_received.push_back( message_to_send ); - } +private: + std::string node_name; }; -void test_closing_connection_message( const graphene::net::message& msg ) -{ - try - { - const auto& closing_msg = msg.as(); - } - catch( fc::exception& ) - { - BOOST_FAIL( "Expecting closing_connection_message" ); - } -} - -void test_address_message( const graphene::net::message& msg, std::size_t num_elements ) -{ - try - { - const auto& addr_msg = msg.as(); - BOOST_CHECK_EQUAL( addr_msg.addresses.size(), num_elements ); - } - catch( fc::exception& ) - { - BOOST_FAIL( "Expecting address_message" ); - } -} - struct p2p_fixture { p2p_fixture() @@ -256,28 +270,31 @@ BOOST_FIXTURE_TEST_SUITE( p2p_node_tests, p2p_fixture ) */ BOOST_AUTO_TEST_CASE( address_request_without_hello ) { - // create a node + // create a node (node1) int node1_port = fc::network::get_available_port(); fc::temp_directory node1_dir; test_node node1( "Node1", node1_dir.path(), node1_port ); - node1.disable_peer_advertising(); - // get something in their list of connections + // get something in the list of connections std::pair, graphene::net::peer_connection_ptr> node2_rslts = node1.create_peer_connection( "127.0.0.1:8090" ); - // request addresses - test_delegate peer3_delegate{}; - std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + + // peer3 request addresses graphene::net::address_request_message req; node1.on_message( peer3_ptr, req ); // check the results + // peer3 didn't send hello so the connection should be closed BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); const auto& msg = peer3_ptr->messages_received.front(); test_closing_connection_message( msg ); - // request again + // peer3 request again peer3_ptr->messages_received.clear(); node1.on_message( peer3_ptr, req ); @@ -291,24 +308,31 @@ BOOST_AUTO_TEST_CASE( address_request_without_hello ) */ BOOST_AUTO_TEST_CASE( disable_peer_advertising ) { - // create a node + // create a node (node1) int node1_port = fc::network::get_available_port(); fc::temp_directory node1_dir; test_node node1( "Node1", node1_dir.path(), node1_port ); + + // disable peer advertising node1.disable_peer_advertising(); - // get something in their list of connections + // get something in the list of connections std::pair, graphene::net::peer_connection_ptr> node2_rslts = node1.create_peer_connection( "127.0.0.1:8090" ); - // verify that they do not share it with others - test_delegate peer3_delegate{}; - std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 got its hello request and accepted the connection peer3_ptr->their_state = test_peer::their_connection_state::connection_accepted; + + // peer3 request addresses graphene::net::address_request_message req; node1.on_message( peer3_ptr, req ); // check the results + // Node1 does not share the peer list with others BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); const auto& msg = peer3_ptr->messages_received.front(); test_address_message( msg, 0 ); @@ -316,88 +340,111 @@ BOOST_AUTO_TEST_CASE( disable_peer_advertising ) BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) { - // create a node + // create a node (node1) int node1_port = fc::network::get_available_port(); fc::temp_directory node1_dir; test_node node1( "Node1", node1_dir.path(), node1_port ); + + // set advertise algorithm to "nothing" node1.set_advertise_algorithm( "nothing" ); - // get something in their list of connections - std::pair, graphene::net::peer_connection_ptr> node2_rslts + // get something in the list of connections + std::pair, graphene::net::peer_connection_ptr> node2_rslts = node1.create_peer_connection( "127.0.0.1:8090" ); - test_delegate peer3_delegate{}; - std::shared_ptr peer3_ptr = std::make_shared( &peer3_delegate ); + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 got its hello request and accepted the connection peer3_ptr->their_state = test_peer::their_connection_state::connection_accepted; - // verify that they do not share it with others - { - graphene::net::address_request_message req; - node1.on_message( peer3_ptr, req ); - // check the results - BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); - const auto& msg = peer3_ptr->messages_received.front(); - test_address_message( msg, 0 ); - } + + // peer3 request addresses + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); + + // check the results + // Node1 does not share the peer list with others + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); + test_address_message( msg, 0 ); } BOOST_AUTO_TEST_CASE( advertise_list_test ) { - std::vector advert_list = { "127.0.0.1:8090" }; - // set up my node - int my_node_port = fc::network::get_available_port(); - fc::temp_directory my_node_dir; - test_node my_node( "Hello", my_node_dir.path(), my_node_port ); - my_node.set_advertise_algorithm( "list", advert_list ); - - // a fake peer - test_delegate del{}; - std::shared_ptr my_peer( new test_peer{&del} ); - my_peer->their_state = test_peer::their_connection_state::connection_rejected; - - // add 2 connections, 1 of which appears on the advertise_list + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir; + test_node node1( "Node1", node1_dir.path(), node1_port ); + + // set advertise algorithm to "list" + std::vector advert_list = { "127.0.0.1:8090", "2.3.4.55:1234" }; + node1.set_advertise_algorithm( "list", advert_list ); + + // add some connections, 1 of which appears on the advertise_list std::pair, graphene::net::peer_connection_ptr> node1_rslts - = my_node.create_peer_connection("127.0.0.1:8089"); + = node1.create_peer_connection("127.0.0.1:8089"); + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = node1.create_peer_connection("127.0.0.1:8090"); std::pair, graphene::net::peer_connection_ptr> node2_reslts - = my_node.create_peer_connection("127.0.0.1:8090"); + = node1.create_peer_connection("127.0.0.1:8091"); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 got its hello request and rejected the connection + peer3_ptr->their_state = test_peer::their_connection_state::connection_rejected; + + // peer3 request addresses + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); - // act like my_node received an address_request message from my_peer - graphene::net::address_request_message address_request_message_received; - my_node.on_message( my_peer, address_request_message_received ); // check the results - BOOST_REQUIRE_EQUAL( my_peer->messages_received.size(), 2U ); - const auto& msg1 = my_peer->messages_received.front(); + // node1 replies with 1 address, then closes the connection + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 2U ); + const auto& msg1 = peer3_ptr->messages_received.front(); test_address_message( msg1, 1 ); - const auto& msg2 = my_peer->messages_received.back(); + const auto& msg2 = peer3_ptr->messages_received.back(); test_closing_connection_message( msg2 ); } BOOST_AUTO_TEST_CASE( exclude_list ) { - std::vector ex_list = { "127.0.0.1:8090" }; - // set up my node - int my_node_port = fc::network::get_available_port(); - fc::temp_directory my_node_dir; - test_node my_node( "Hello", my_node_dir.path(), my_node_port ); - my_node.set_advertise_algorithm( "exclude_list", ex_list ); - // some peers - std::pair, graphene::net::peer_connection_ptr> node2_rslts - = my_node.create_peer_connection( "127.0.0.1:8089" ); - std::pair, graphene::net::peer_connection_ptr> node3_rslts - = my_node.create_peer_connection( "127.0.0.1:8090" ); - std::pair, graphene::net::peer_connection_ptr> node4_rslts - = my_node.create_peer_connection( "127.0.0.1:8091" ); - - // act like my_node received an address_request message from my_peer - test_delegate del_4{}; - std::shared_ptr peer_4( new test_peer(&del_4) ); - peer_4->their_state = test_peer::their_connection_state::connection_rejected; - graphene::net::address_request_message address_request_message_received; - my_node.on_message( peer_4, address_request_message_received ); + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir; + test_node node1( "Node1", node1_dir.path(), node1_port ); + + // set advertise algorithm to "exclude_list" + std::vector ex_list = { "127.0.0.1:8090", "2.3.4.55:1234" }; + node1.set_advertise_algorithm( "exclude_list", ex_list ); + + // add some connections, 1 of which appears on the exclude_list + std::pair, graphene::net::peer_connection_ptr> node1_rslts + = node1.create_peer_connection("127.0.0.1:8089"); + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = node1.create_peer_connection("127.0.0.1:8090"); + std::pair, graphene::net::peer_connection_ptr> node2_reslts + = node1.create_peer_connection("127.0.0.1:8091"); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 got its hello request and rejected the connection + peer3_ptr->their_state = test_peer::their_connection_state::connection_rejected; + + // peer3 request addresses + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); + // check the results - BOOST_REQUIRE_EQUAL( peer_4->messages_received.size(), 2U ); - const auto& msg1 = peer_4->messages_received.front(); + // node1 replies with 2 addresses, then closes the connection + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 2U ); + const auto& msg1 = peer3_ptr->messages_received.front(); test_address_message( msg1, 2 ); - const auto& msg2 = peer_4->messages_received.back(); + const auto& msg2 = peer3_ptr->messages_received.back(); test_closing_connection_message( msg2 ); } From e7583403d5645ad18b1a689a259b25b2d55f432d Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 5 Sep 2022 16:44:13 +0000 Subject: [PATCH 204/338] Update member functions to const, update logging --- .../include/graphene/net/peer_database.hpp | 4 ++-- libraries/net/peer_database.cpp | 21 ++++++++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_database.hpp b/libraries/net/include/graphene/net/peer_database.hpp index edc046e7c8..4ae2cbb06e 100644 --- a/libraries/net/include/graphene/net/peer_database.hpp +++ b/libraries/net/include/graphene/net/peer_database.hpp @@ -106,8 +106,8 @@ namespace graphene { namespace net { void erase(const fc::ip::endpoint& endpointToErase); void update_entry(const potential_peer_record& updatedRecord); - potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); - fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); + potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; + fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; using iterator = detail::peer_database_iterator; iterator begin() const; diff --git a/libraries/net/peer_database.cpp b/libraries/net/peer_database.cpp index f5fff7ac9e..3c13c9a334 100644 --- a/libraries/net/peer_database.cpp +++ b/libraries/net/peer_database.cpp @@ -68,8 +68,8 @@ namespace graphene { namespace net { void clear(); void erase(const fc::ip::endpoint& endpointToErase); void update_entry(const potential_peer_record& updatedRecord); - potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); - fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup); + potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; + fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; peer_database::iterator begin() const; peer_database::iterator end() const; @@ -125,11 +125,12 @@ namespace graphene { namespace net { if (!fc::exists(peer_database_filename_dir)) fc::create_directories(peer_database_filename_dir); fc::json::save_to_file( peer_records, _peer_database_filename, GRAPHENE_NET_MAX_NESTED_OBJECTS ); + dlog( "Saved peer database to file ${filename}", ( "filename", _peer_database_filename) ); } catch (const fc::exception& e) { - elog("error saving peer database to file ${peer_database_filename}", - ("peer_database_filename", _peer_database_filename)); + wlog( "error saving peer database to file ${peer_database_filename}: ${error}", + ("peer_database_filename", _peer_database_filename)("error", e.to_detail_string()) ); } _potential_peer_set.clear(); } @@ -155,7 +156,8 @@ namespace graphene { namespace net { _potential_peer_set.get().insert(updatedRecord); } - potential_peer_record peer_database_impl::lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup) + potential_peer_record peer_database_impl::lookup_or_create_entry_for_endpoint( + const fc::ip::endpoint& endpointToLookup ) const { auto iter = _potential_peer_set.get().find(endpointToLookup); if (iter != _potential_peer_set.get().end()) @@ -163,7 +165,8 @@ namespace graphene { namespace net { return potential_peer_record(endpointToLookup); } - fc::optional peer_database_impl::lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup) + fc::optional peer_database_impl::lookup_entry_for_endpoint( + const fc::ip::endpoint& endpointToLookup ) const { auto iter = _potential_peer_set.get().find(endpointToLookup); if (iter != _potential_peer_set.get().end()) @@ -251,12 +254,14 @@ namespace graphene { namespace net { my->update_entry(updatedRecord); } - potential_peer_record peer_database::lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup) + potential_peer_record peer_database::lookup_or_create_entry_for_endpoint( + const fc::ip::endpoint& endpointToLookup ) const { return my->lookup_or_create_entry_for_endpoint(endpointToLookup); } - fc::optional peer_database::lookup_entry_for_endpoint(const fc::ip::endpoint& endpoint_to_lookup) + fc::optional peer_database::lookup_entry_for_endpoint( + const fc::ip::endpoint& endpoint_to_lookup ) const { return my->lookup_entry_for_endpoint(endpoint_to_lookup); } From 649bcbce2311d271e2105bd0a18fa68c2c7dd2d1 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 5 Sep 2022 16:47:34 +0000 Subject: [PATCH 205/338] Create dir for saving node config if not exist --- libraries/net/node.cpp | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index b456404ec4..1b026cd3c1 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -319,27 +319,28 @@ namespace graphene { namespace net { namespace detail { ilog( "done" ); } - void node_impl::save_node_configuration() - { + void node_impl::save_node_configuration() + { VERIFY_CORRECT_THREAD(); - if( fc::exists(_node_configuration_directory ) ) + + fc::path configuration_file_name( _node_configuration_directory / NODE_CONFIGURATION_FILENAME ); + try { - fc::path configuration_file_name( _node_configuration_directory / NODE_CONFIGURATION_FILENAME ); - try - { - fc::json::save_to_file( _node_configuration, configuration_file_name ); - } - catch (const fc::canceled_exception&) - { - throw; - } - catch ( const fc::exception& except ) - { - elog( "error writing node configuration to file ${filename}: ${error}", + if( !fc::exists(_node_configuration_directory ) ) + fc::create_directories( _node_configuration_directory ); + fc::json::save_to_file( _node_configuration, configuration_file_name ); + dlog( "Saved node configuration to file ${filename}", ( "filename", configuration_file_name ) ); + } + catch (const fc::canceled_exception&) + { + throw; + } + catch ( const fc::exception& except ) + { + wlog( "error writing node configuration to file ${filename}: ${error}", ( "filename", configuration_file_name )("error", except.to_detail_string() ) ); - } } - } + } void node_impl::p2p_network_connect_loop() { From cfbdba9bd5659df8f89748d52e11fd397fb2c27d Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 5 Sep 2022 19:38:17 +0000 Subject: [PATCH 206/338] Update two_node_network test to three_node_network --- tests/app/main.cpp | 71 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 2 deletions(-) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index 3144026c9e..ac9ef02629 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -212,9 +212,9 @@ BOOST_AUTO_TEST_CASE(load_configuration_options_test_legacy_config_ini_options) } ///////////// -/// @brief create a 2 node network +/// @brief create a 3 node network ///////////// -BOOST_AUTO_TEST_CASE( two_node_network ) +BOOST_AUTO_TEST_CASE( three_node_network ) { using namespace graphene::chain; using namespace graphene::app; @@ -234,6 +234,7 @@ BOOST_AUTO_TEST_CASE( two_node_network ) auto port = fc::network::get_available_port(); auto app1_p2p_endpoint_str = string("127.0.0.1:") + std::to_string(port); auto app2_seed_nodes_str = string("[\"") + app1_p2p_endpoint_str + "\"]"; + auto app3_seed_nodes_str = string("[\"") + app1_p2p_endpoint_str + "\"]"; fc::temp_directory app_dir( graphene::utilities::temp_directory_path() ); auto genesis_file = create_genesis_file(app_dir); @@ -381,6 +382,72 @@ BOOST_AUTO_TEST_CASE( two_node_network ) BOOST_CHECK_EQUAL( db1->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); BOOST_CHECK_EQUAL( db2->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); + // Start app3 + BOOST_TEST_MESSAGE( "Creating and initializing app3" ); + + fc::temp_directory app3_dir( graphene::utilities::temp_directory_path() ); + graphene::app::application app3; + app3.register_plugin(); + app3.register_plugin< graphene::market_history::market_history_plugin >(); + app3.register_plugin< graphene::witness_plugin::witness_plugin >(); + app3.register_plugin< graphene::grouped_orders::grouped_orders_plugin>(); + auto sharable_cfg3 = std::make_shared(); + auto& cfg3 = *sharable_cfg3; + fc::set_option( cfg3, "genesis-json", genesis_file ); + fc::set_option( cfg3, "seed-nodes", app3_seed_nodes_str ); + app3.initialize(app3_dir.path(), sharable_cfg3); + + BOOST_TEST_MESSAGE( "Starting app3 and waiting for connection" ); + app3.startup(); + + fc::wait_for( node_startup_wait_time, [&app1] () { + if( app1.p2p_node()->get_connection_count() < 2 ) + return false; + auto peers = app1.p2p_node()->get_connected_peers(); + if( peers.size() < 2 ) + return false; + for( const auto& peer : peers ) + { + auto itr = peer.info.find( "peer_needs_sync_items_from_us" ); + if( itr == peer.info.end() ) + return false; + if( itr->value().as(1) ) + return false; + } + return true; + }); + + BOOST_REQUIRE_EQUAL(app1.p2p_node()->get_connection_count(), 2u); + BOOST_TEST_MESSAGE( "app1 and app3 successfully connected" ); + + BOOST_TEST_MESSAGE( "Verifying app3 is synced" ); + BOOST_CHECK_EQUAL( app3.chain_database()->head_block_num(), 1u); + BOOST_CHECK_EQUAL( app3.chain_database()->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, + 1000000 ); + + auto new_peer_wait_time = fc::seconds(45); + + BOOST_TEST_MESSAGE( "Waiting for app2 and app3 to connect to each other" ); + fc::wait_for( new_peer_wait_time, [&app2] () { + if( app2.p2p_node()->get_connection_count() < 2 ) + return false; + auto peers = app2.p2p_node()->get_connected_peers(); + if( peers.size() < 2 ) + return false; + for( const auto& peer : peers ) + { + auto itr = peer.info.find( "peer_needs_sync_items_from_us" ); + if( itr == peer.info.end() ) + return false; + if( itr->value().as(1) ) + return false; + } + return true; + }); + + BOOST_REQUIRE_EQUAL(app3.p2p_node()->get_connection_count(), 2u); + BOOST_TEST_MESSAGE( "app2 and app3 successfully connected" ); + } catch( fc::exception& e ) { edump((e.to_detail_string())); throw; From 2e535c1d652228228b2342a449a51fcf4cdcb0f9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 5 Sep 2022 22:27:15 +0000 Subject: [PATCH 207/338] Avoid using get_socket() in on_hello_message() For ease of testing. --- libraries/net/node.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 1b026cd3c1..56e2d754f9 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1665,7 +1665,7 @@ namespace graphene { namespace net { namespace detail { std::ostringstream rejection_message; rejection_message << "Your client is outdated -- you can only understand blocks up to #" << next_fork_block_number << ", but I'm already on block #" << head_block_num; connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), + *originating_peer->get_remote_endpoint(), rejection_reason_code::unspecified, rejection_message.str() ); @@ -1730,7 +1730,7 @@ namespace graphene { namespace net { namespace detail { _allowed_peers.find(originating_peer->node_id) == _allowed_peers.end()) { connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), + *originating_peer->get_remote_endpoint(), rejection_reason_code::blocked, "you are not in my allowed_peers list"); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; @@ -1762,7 +1762,7 @@ namespace graphene { namespace net { namespace detail { // Second, we add the address we see, with the inbound port the peer told us. // It might be the same as above, but that's OK. - fc::ip::endpoint peers_actual_outbound_endpoint = originating_peer->get_socket().remote_endpoint(); + fc::ip::endpoint peers_actual_outbound_endpoint = *originating_peer->get_remote_endpoint(); endpoints_to_save.insert( fc::ip::endpoint( peers_actual_outbound_endpoint.get_address(), originating_peer->inbound_port ) ); @@ -1781,7 +1781,7 @@ namespace graphene { namespace net { namespace detail { if (!is_accepting_new_connections()) { connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - originating_peer->get_socket().remote_endpoint(), + *originating_peer->get_remote_endpoint(), rejection_reason_code::not_accepting_connections, "not accepting any more incoming connections"); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; From 171bdd99a1b3dd6a2f05c2f98e9c17449270253b Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 5 Sep 2022 23:43:39 +0000 Subject: [PATCH 208/338] Add try-catch in on_hello when checking signature --- libraries/net/node.cpp | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 56e2d754f9..ce7902a5f5 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1547,12 +1547,23 @@ namespace graphene { namespace net { namespace detail { // Validate the peer's public key. // Note: the node_id in user_data is not verified. - fc::sha256::encoder shared_secret_encoder; - fc::sha512 shared_secret = originating_peer->get_shared_secret(); - shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); - fc::ecc::public_key expected_node_public_key( hello_message_received.signed_shared_secret, - shared_secret_encoder.result(), false ); - if( hello_message_received.node_public_key != expected_node_public_key.serialize() ) + fc::optional expected_node_public_key; + try + { + fc::sha256::encoder shared_secret_encoder; + fc::sha512 shared_secret = originating_peer->get_shared_secret(); + shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); + expected_node_public_key = fc::ecc::public_key( hello_message_received.signed_shared_secret, + shared_secret_encoder.result(), false ); + } + catch( fc::exception& e ) + { + wlog( "Error when validating signature in hello message from peer ${peer}: ${e}", + ("peer", originating_peer->get_remote_endpoint())("e", e.to_detail_string()) ); + } + + if( !expected_node_public_key + || hello_message_received.node_public_key != expected_node_public_key->serialize() ) { wlog( "Invalid signature in hello message from peer ${peer}", ("peer", originating_peer->get_remote_endpoint()) ); From da8ea0dac9bfb2478c1844a4da8d9f6f3e79f196 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 6 Sep 2022 01:42:41 +0000 Subject: [PATCH 209/338] Add more P2P node tests --- tests/tests/p2p_node_tests.cpp | 714 ++++++++++++++++++++++++++++++--- 1 file changed, 648 insertions(+), 66 deletions(-) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index ed0a2e74c2..7e8cb86330 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -30,9 +30,11 @@ #include #include #include +#include #include #include +#include #include @@ -79,6 +81,9 @@ class test_delegate : public graphene::net::peer_connection_delegate class test_peer : public graphene::net::peer_connection { +private: + fc::ecc::private_key generated_private_key = fc::ecc::private_key::generate(); + fc::optional configured_node_id; // used to create hello_message public: std::vector messages_received; @@ -91,83 +96,84 @@ class test_peer : public graphene::net::peer_connection { messages_received.push_back( message_to_send ); } -}; -static void test_closing_connection_message( const graphene::net::message& msg ) -{ - try + graphene::net::node_id_t get_public_key() const { - const auto& closing_msg = msg.as(); + return generated_private_key.get_public_key(); } - catch( fc::exception& ) + + void set_configured_node_id( const graphene::net::node_id_t& id ) { - BOOST_FAIL( "Expecting closing_connection_message" ); + configured_node_id = id; } -} -static void test_address_message( const graphene::net::message& msg, std::size_t num_elements ) -{ - try + graphene::net::node_id_t get_configured_node_id() const { - const auto& addr_msg = msg.as(); - BOOST_CHECK_EQUAL( addr_msg.addresses.size(), num_elements ); + return configured_node_id ? *configured_node_id : get_public_key(); } - catch( fc::exception& ) + + graphene::net::hello_message create_hello_message( const graphene::net::chain_id_type& chain_id ) { - BOOST_FAIL( "Expecting address_message" ); + graphene::net::hello_message hello; + + hello.user_agent = "Test peer"; + + hello.inbound_address = get_remote_endpoint()->get_address(); + hello.inbound_port = get_remote_endpoint()->port(); + hello.outbound_port = hello.inbound_port; + + hello.node_public_key = generated_private_key.get_public_key(); + + fc::sha256::encoder shared_secret_encoder; + fc::sha512 shared_secret = get_shared_secret(); // note: the shared secret is now just a zero-initialized array + shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); + hello.signed_shared_secret = generated_private_key.sign_compact(shared_secret_encoder.result()); + + hello.chain_id = chain_id; + + if( configured_node_id ) + { + fc::mutable_variant_object user_data; + user_data["node_id"] = fc::variant( *configured_node_id, 1 ); + hello.user_data = user_data; + } + + return hello; } -} +}; -class test_node : public graphene::net::node, public graphene::net::node_delegate +static void test_closing_connection_message( const graphene::net::message& msg ) { -public: - std::vector> test_peers; + BOOST_REQUIRE( msg.msg_type.value() == graphene::net::closing_connection_message::type ); +} - test_node( const std::string& name, const fc::path& config_dir, int port, int seed_port = -1 ) - : node( name ) - { - std::cout << "test_node::test_node(): current thread=" << uint64_t(&fc::thread::current()) << std::endl; - node_name = name; - } - ~test_node() - { - my->get_thread()->async( [&]() { - this->test_peers.clear(); - }).wait(); - } +static void test_connection_accepted_message( const graphene::net::message& msg ) +{ + BOOST_REQUIRE( msg.msg_type.value() == graphene::net::connection_accepted_message::type ); +} - void on_message( graphene::net::peer_connection_ptr originating_peer, - const graphene::net::message& received_message ) - { - my->get_thread()->async( [&]() { - my->on_message( originating_peer.get(), received_message ); - }).wait(); - } +static void test_connection_rejected_message( const graphene::net::message& msg ) +{ + BOOST_REQUIRE( msg.msg_type.value() == graphene::net::connection_rejected_message::type ); +} - std::pair, std::shared_ptr> create_test_peer( std::string url ) - { - return this->my->get_thread()->async( [&, &url = url](){ - std::shared_ptr d{}; - auto peer = std::make_shared( d.get() ); - peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); - this->test_peers.push_back( peer ); - return std::make_pair( d, peer ); - }).wait(); - } +static void test_address_message( const graphene::net::message& msg, std::size_t num_elements ) +{ + BOOST_REQUIRE( msg.msg_type.value() == graphene::net::address_message::type ); + const auto& addr_msg = msg.as(); + BOOST_CHECK_EQUAL( addr_msg.addresses.size(), num_elements ); +} - std::pair, graphene::net::peer_connection_ptr> - create_peer_connection( std::string url ) +class test_node_delegate : public graphene::net::node_delegate +{ +private: + graphene::net::chain_id_type chain_id = fc::sha256::hash(std::string("p2p_test_chain")); + std::string node_name; +public: + explicit test_node_delegate( const std::string& name ) + : node_name( name ) { - return this->my->get_thread()->async( [&, &url = url](){ - std::shared_ptr d{}; - graphene::net::peer_connection_ptr peer = graphene::net::peer_connection::make_shared( d.get() ); - peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); - peer->remote_inbound_endpoint = peer->get_remote_endpoint(); - my->move_peer_to_active_list( peer ); - return std::make_pair( d, peer ); - }).wait(); } - /**** * Implementation methods of node_delegate */ @@ -197,7 +203,7 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat graphene::net::chain_id_type get_chain_id() const { ilog("${name} get_chain_id was called", ("name",node_name)); - return graphene::net::chain_id_type(); + return chain_id; } std::vector get_blockchain_synopsis( const graphene::net::item_hash_t& reference_point, @@ -238,11 +244,99 @@ class test_node : public graphene::net::node, public graphene::net::node_delegat ilog( "${name} get_current_block_interval_in_seconds was called", ("name",node_name) ); return 0; } +}; + +class test_node : public graphene::net::node +{ +public: + std::vector> test_peers; + + test_node( const std::string& name, const fc::path& config_dir, int port, int seed_port = -1 ) + : node( name ) + { + std::cout << "test_node::test_node(): current thread=" << uint64_t(&fc::thread::current()) << std::endl; + node_name = name; + load_configuration( config_dir ); + set_node_delegate( std::make_shared( name ) ); + } + ~test_node() + { + my->get_thread()->async( [&]() { + this->test_peers.clear(); + }).wait(); + } + + const graphene::net::peer_database& get_peer_db() + { + return my->_potential_peer_db; + } + + const graphene::net::chain_id_type& get_chain_id() const + { + return my->_chain_id; + } + + const graphene::net::node_id_t& get_node_id() const + { + return my->_node_id; + } + + void start_fake_network_connect_loop() + { + my->_p2p_network_connect_loop_done = fc::schedule( []{}, fc::time_point::now() + fc::minutes(5), "dummy_task" ); + } + + void stop_fake_network_connect_loop() + { + try { my->_p2p_network_connect_loop_done.cancel(); } catch( ... ) { } + } + + void on_message( graphene::net::peer_connection_ptr originating_peer, + const graphene::net::message& received_message ) + { + my->get_thread()->async( [&]() { + my->on_message( originating_peer.get(), received_message ); + }).wait(); + } + + std::pair, std::shared_ptr> create_test_peer( std::string url ) + { + return this->my->get_thread()->async( [&, &url = url](){ + std::shared_ptr d{}; + auto peer = std::make_shared( d.get() ); + peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); + this->test_peers.push_back( peer ); + return std::make_pair( d, peer ); + }).wait(); + } + + std::pair, graphene::net::peer_connection_ptr> + create_peer_connection( std::string url ) + { + return this->my->get_thread()->async( [&, &url = url](){ + std::shared_ptr d{}; + graphene::net::peer_connection_ptr peer = graphene::net::peer_connection::make_shared( d.get() ); + peer->set_remote_endpoint( fc::optional( fc::ip::endpoint::from_string( url )) ); + peer->remote_inbound_endpoint = peer->get_remote_endpoint(); + my->move_peer_to_active_list( peer ); + return std::make_pair( d, peer ); + }).wait(); + } private: std::string node_name; }; +// this class is to simulate that a test_node started to connect to the network and accepting connections +class fake_network_connect_guard +{ +private: + test_node& _node; +public: + explicit fake_network_connect_guard( test_node& n) : _node(n) { _node.start_fake_network_connect_loop(); } + ~fake_network_connect_guard() { _node.stop_fake_network_connect_loop(); } +}; + struct p2p_fixture { p2p_fixture() @@ -265,6 +359,444 @@ struct p2p_fixture BOOST_FIXTURE_TEST_SUITE( p2p_node_tests, p2p_fixture ) +/**** + * Testing normal hello message processing + */ +BOOST_AUTO_TEST_CASE( hello_test ) +{ try { + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that peer3 initialized the connection to node1 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::inbound; + // simulate that peer3 has a node_id that is different than its public key + graphene::net::node_id_t peer3_public_key = peer3_ptr->get_public_key(); + graphene::net::node_id_t peer3_node_id = fc::ecc::private_key::generate().get_public_key(); + peer3_ptr->set_configured_node_id( peer3_node_id ); + BOOST_CHECK( peer3_node_id == peer3_ptr->get_configured_node_id() ); + BOOST_CHECK( peer3_node_id != peer3_ptr->node_id ); + BOOST_CHECK( peer3_public_key != peer3_ptr->node_public_key ); + BOOST_CHECK( peer3_public_key != peer3_node_id ); + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + req.inbound_address = fc::ip::address( "9.9.9.9" ); + node1.on_message( peer3_ptr, req ); + + // check the results + // peer3 is accepted + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); + test_connection_accepted_message( msg ); + BOOST_CHECK( test_peer::their_connection_state::connection_accepted == peer3_ptr->their_state ); + + // check data + BOOST_CHECK( peer3_node_id == peer3_ptr->node_id ); + BOOST_CHECK( peer3_public_key == peer3_ptr->node_public_key ); + + // check that peer3 is added to the peer database because it is an inbound connection + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_CHECK( peer_record.valid() ); + } + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("9.9.9.9:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_CHECK( peer_record.valid() ); + } + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +/**** + * Testing normal hello message processing when the peer is not accepting connections + */ +BOOST_AUTO_TEST_CASE( hello_firewalled_peer_test ) +{ try { + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that peer3 initialized the connection to node1 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::inbound; + // peer3 does not have a node_id that is different than its public key + graphene::net::node_id_t peer3_public_key = peer3_ptr->get_public_key(); + graphene::net::node_id_t peer3_node_id = peer3_ptr->get_configured_node_id(); + BOOST_CHECK( peer3_node_id != peer3_ptr->node_id ); + BOOST_CHECK( peer3_public_key != peer3_ptr->node_public_key ); + BOOST_CHECK( peer3_public_key == peer3_node_id ); + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + req.inbound_address = fc::ip::address( "9.9.9.9" ); + req.inbound_port = 0; + node1.on_message( peer3_ptr, req ); + + // check the results + // peer3 is accepted + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); + test_connection_accepted_message( msg ); + BOOST_CHECK( test_peer::their_connection_state::connection_accepted == peer3_ptr->their_state ); + // we think peer3 is firewalled + BOOST_CHECK( graphene::net::firewalled_state::firewalled == peer3_ptr->is_firewalled ); + + // check data + BOOST_CHECK( peer3_node_id == peer3_ptr->node_id ); + BOOST_CHECK( peer3_public_key == peer3_ptr->node_public_key ); + BOOST_CHECK( peer3_public_key == peer3_node_id ); + + // check that peer3 is not added to the peer database because it is not accepting connections + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_CHECK( !peer_record.valid() ); + } + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("9.9.9.9:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_CHECK( !peer_record.valid() ); + } + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:0") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_CHECK( !peer_record.valid() ); + } + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("9.9.9.9:0") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_CHECK( !peer_record.valid() ); + } + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +/**** + * If a peer sent us a hello message when we aren't accepting connections, the peer will be rejected. + */ +BOOST_AUTO_TEST_CASE( hello_not_accepting_connections ) +{ try { + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // Note: no fake_network_connect_guard here, by default the node is not accepting connections + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 initialized the connection to peer3 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::outbound; + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + node1.on_message( peer3_ptr, req ); + + // check the results + // peer3 is rejected + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); + test_connection_rejected_message( msg ); + BOOST_CHECK( test_peer::their_connection_state::connection_rejected == peer3_ptr->their_state ); + + // check that peer3 is not added to the peer database because it is an outbound connection + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_CHECK( !peer_record.valid() ); + } +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +/**** + * If a peer sent us a hello message when we aren't expecting it, the peer will be disconnected. + */ +BOOST_AUTO_TEST_CASE( hello_unexpected ) +{ + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 got its hello request and accepted the connection + peer3_ptr->their_state = test_peer::their_connection_state::connection_accepted; + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + node1.on_message( peer3_ptr, req ); + + // check the results + // peer3 should not send hello so the connection should be closed + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg = peer3_ptr->messages_received.front(); + test_closing_connection_message( msg ); + + // peer3 request again + peer3_ptr->messages_received.clear(); + node1.on_message( peer3_ptr, req ); + + // the request is ignored + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 0 ); +} + +/**** + * If we receive a hello from a node which is on a different chain, + * disconnected and greatly delay the next attempt to reconnect + */ +BOOST_AUTO_TEST_CASE( hello_from_different_chain ) +{ + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 initialized the connection to peer3 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::outbound; + // simulate that peer3 is in node1's peer database + node1.add_seed_node( "1.2.3.4:5678" ); + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_REQUIRE( peer_record.valid() ); + } + + const auto now = fc::time_point::now(); + + // peer3 send hello + graphene::net::chain_id_type chain_id = fc::sha256::hash(std::string("dummy_chain")); + graphene::net::hello_message req = peer3_ptr->create_hello_message( chain_id ); + node1.on_message( peer3_ptr, req ); + + // check the results + // the connection should be closed + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 2U ); + const auto& msg1 = peer3_ptr->messages_received.front(); + test_connection_rejected_message( msg1 ); + const auto& msg2 = peer3_ptr->messages_received.back(); + test_closing_connection_message( msg2 ); + BOOST_CHECK( test_peer::their_connection_state::connection_rejected == peer3_ptr->their_state ); + // check peer db + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_REQUIRE( peer_record.valid() ); + BOOST_CHECK( peer_record->last_connection_disposition == graphene::net::last_connection_rejected ); + BOOST_CHECK( peer_record->last_connection_attempt_time >= now ); + BOOST_CHECK_GE( peer_record->number_of_failed_connection_attempts, 10 ); + } +} + +/**** + * If a peer sends us a hello message with an invalid signature, we reject and disconnect it. + */ +BOOST_AUTO_TEST_CASE( hello_invalid_signature ) +{ try { + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that peer3 initialized the connection to node1 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::inbound; + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + req.signed_shared_secret = fc::ecc::compact_signature(); + node1.on_message( peer3_ptr, req ); + + // check the results + // the connection should be closed + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 2U ); + const auto& msg1 = peer3_ptr->messages_received.front(); + test_connection_rejected_message( msg1 ); + const auto& msg2 = peer3_ptr->messages_received.back(); + test_closing_connection_message( msg2 ); + BOOST_CHECK( test_peer::their_connection_state::connection_rejected == peer3_ptr->their_state ); + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +/**** + * If a peer sends us a hello message with an empty node_id, we disconnect it. + */ +BOOST_AUTO_TEST_CASE( hello_null_node_id ) +{ try { + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that peer3 initialized the connection to node1 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::inbound; + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + fc::mutable_variant_object user_data = req.user_data; + user_data["node_id"] = fc::variant( graphene::net::node_id_t(), 1 ); + req.user_data = user_data; + node1.on_message( peer3_ptr, req ); + + // check the results + // the connection should be closed + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); + const auto& msg1 = peer3_ptr->messages_received.back(); + test_closing_connection_message( msg1 ); + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +/**** + * If a peer sends us a hello message with a node_id identical to ours, we reject and disconnect it, + * and greatly delay the next attempt to reconnect + */ +BOOST_AUTO_TEST_CASE( hello_from_self ) +{ try { + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 initialized the connection to peer3 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::outbound; + // simulate that peer3 is in node1's peer database + node1.add_seed_node( "1.2.3.4:5678" ); + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_REQUIRE( peer_record.valid() ); + } + + const auto now = fc::time_point::now(); + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + fc::mutable_variant_object user_data = req.user_data; + user_data["node_id"] = fc::variant( node1.get_node_id(), 1 ); + req.user_data = user_data; + node1.on_message( peer3_ptr, req ); + + // check the results + // the connection should be closed + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 2U ); + const auto& msg1 = peer3_ptr->messages_received.front(); + test_connection_rejected_message( msg1 ); + const auto& msg2 = peer3_ptr->messages_received.back(); + test_closing_connection_message( msg2 ); + BOOST_CHECK( test_peer::their_connection_state::connection_rejected == peer3_ptr->their_state ); + // check peer db + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + const auto peer_record = node1.get_peer_db().lookup_entry_for_endpoint( peer3_ep ); + BOOST_REQUIRE( peer_record.valid() ); + BOOST_CHECK( peer_record->last_connection_disposition == graphene::net::last_connection_rejected ); + BOOST_CHECK( peer_record->last_connection_attempt_time >= now ); + BOOST_CHECK_GE( peer_record->number_of_failed_connection_attempts, 10 ); + } + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +/**** + * If a peer sends us a hello message and we find it is already connected to us, we reject and disconnect it. + */ +BOOST_AUTO_TEST_CASE( hello_already_connected ) +{ try { + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // get something in the list of connections + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = node1.create_peer_connection( "127.0.0.1:8090" ); + auto node2_ptr = node2_rslts.second; + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 initialized the connection to peer3 + peer3_ptr->their_state = test_peer::their_connection_state::just_connected; + peer3_ptr->direction = graphene::net::peer_connection_direction::outbound; + // simulate that node2 and peer3 has the same public_key and node_id + node2_ptr->node_public_key = peer3_ptr->get_public_key(); + node2_ptr->node_id = node2_ptr->node_public_key; + + // peer3 send hello + graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + node1.on_message( peer3_ptr, req ); + + // check the results + // the connection should be closed + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 2U ); + const auto& msg1 = peer3_ptr->messages_received.front(); + test_connection_rejected_message( msg1 ); + const auto& msg2 = peer3_ptr->messages_received.back(); + test_closing_connection_message( msg2 ); + BOOST_CHECK( test_peer::their_connection_state::connection_rejected == peer3_ptr->their_state ); + + // check node2's data + { + fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); + BOOST_CHECK( *node2_ptr->remote_inbound_endpoint == peer3_ep ); + BOOST_CHECK( node2_ptr->inbound_endpoint_verified ); + BOOST_CHECK( graphene::net::firewalled_state::not_firewalled == node2_ptr->is_firewalled ); + BOOST_REQUIRE_EQUAL( node2_ptr->additional_inbound_endpoints.size(), 1u ); + BOOST_CHECK( *node2_ptr->additional_inbound_endpoints.begin() == peer3_ep ); + } + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + /**** * If a node requests addresses without sending hello_message first, it will be disconnected. */ @@ -272,8 +804,10 @@ BOOST_AUTO_TEST_CASE( address_request_without_hello ) { // create a node (node1) int node1_port = fc::network::get_available_port(); - fc::temp_directory node1_dir; + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); // get something in the list of connections std::pair, graphene::net::peer_connection_ptr> node2_rslts @@ -310,8 +844,10 @@ BOOST_AUTO_TEST_CASE( disable_peer_advertising ) { // create a node (node1) int node1_port = fc::network::get_available_port(); - fc::temp_directory node1_dir; + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); // disable peer advertising node1.disable_peer_advertising(); @@ -342,8 +878,10 @@ BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) { // create a node (node1) int node1_port = fc::network::get_available_port(); - fc::temp_directory node1_dir; + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); // set advertise algorithm to "nothing" node1.set_advertise_algorithm( "nothing" ); @@ -374,11 +912,13 @@ BOOST_AUTO_TEST_CASE( advertise_list_test ) { // create a node (node1) int node1_port = fc::network::get_available_port(); - fc::temp_directory node1_dir; + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); // set advertise algorithm to "list" - std::vector advert_list = { "127.0.0.1:8090", "2.3.4.55:1234" }; + std::vector advert_list = { "127.0.0.1:8090", "2.3.4.55:1234", "bad_one" }; node1.set_advertise_algorithm( "list", advert_list ); // add some connections, 1 of which appears on the advertise_list @@ -413,8 +953,10 @@ BOOST_AUTO_TEST_CASE( exclude_list ) { // create a node (node1) int node1_port = fc::network::get_available_port(); - fc::temp_directory node1_dir; + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); // set advertise algorithm to "exclude_list" std::vector ex_list = { "127.0.0.1:8090", "2.3.4.55:1234" }; @@ -448,4 +990,44 @@ BOOST_AUTO_TEST_CASE( exclude_list ) test_closing_connection_message( msg2 ); } +BOOST_AUTO_TEST_CASE( advertising_all_test ) +{ + // create a node (node1) + int node1_port = fc::network::get_available_port(); + fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); + test_node node1( "Node1", node1_dir.path(), node1_port ); + // simulate that node1 started to connect to the network and accepting connections + fake_network_connect_guard guard( node1 ); + + // set advertise algorithm to "all" + node1.set_advertise_algorithm( "all" ); + + // add some connections + std::pair, graphene::net::peer_connection_ptr> node1_rslts + = node1.create_peer_connection("127.0.0.1:8089"); + std::pair, graphene::net::peer_connection_ptr> node2_rslts + = node1.create_peer_connection("127.0.0.1:8090"); + std::pair, graphene::net::peer_connection_ptr> node2_reslts + = node1.create_peer_connection("127.0.0.1:8091"); + + // a new peer (peer3) + std::pair, std::shared_ptr> peer3 + = node1.create_test_peer( "1.2.3.4:5678" ); + std::shared_ptr peer3_ptr = peer3.second; + // simulate that node1 got its hello request and rejected the connection + peer3_ptr->their_state = test_peer::their_connection_state::connection_rejected; + + // peer3 request addresses + graphene::net::address_request_message req; + node1.on_message( peer3_ptr, req ); + + // check the results + // node1 replies with 3 addresses, then closes the connection + BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 2U ); + const auto& msg1 = peer3_ptr->messages_received.front(); + test_address_message( msg1, 3 ); + const auto& msg2 = peer3_ptr->messages_received.back(); + test_closing_connection_message( msg2 ); +} + BOOST_AUTO_TEST_SUITE_END() From bbea3c90e215d4c990b168809ca66f66b8d21f4b Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 6 Sep 2022 16:37:02 +0000 Subject: [PATCH 210/338] Fix threading issues in p2p_node_tests --- tests/tests/p2p_node_tests.cpp | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 7e8cb86330..d0ede89cc9 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -283,7 +283,10 @@ class test_node : public graphene::net::node void start_fake_network_connect_loop() { - my->_p2p_network_connect_loop_done = fc::schedule( []{}, fc::time_point::now() + fc::minutes(5), "dummy_task" ); + this->my->get_thread()->async( [&]() { + my->_p2p_network_connect_loop_done + = fc::schedule( [&]{}, fc::time_point::now() + fc::minutes(5), "dummy_task" ); + }).wait(); } void stop_fake_network_connect_loop() @@ -323,6 +326,14 @@ class test_node : public graphene::net::node }).wait(); } + graphene::net::hello_message create_hello_message_from_peer( std::shared_ptr peer_ptr, + const graphene::net::chain_id_type& chain_id ) + { + return this->my->get_thread()->async( [&](){ + return peer_ptr->create_hello_message( chain_id ); + }).wait(); + } + private: std::string node_name; }; @@ -388,7 +399,7 @@ BOOST_AUTO_TEST_CASE( hello_test ) BOOST_CHECK( peer3_public_key != peer3_node_id ); // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); req.inbound_address = fc::ip::address( "9.9.9.9" ); node1.on_message( peer3_ptr, req ); @@ -444,7 +455,7 @@ BOOST_AUTO_TEST_CASE( hello_firewalled_peer_test ) BOOST_CHECK( peer3_public_key == peer3_node_id ); // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); req.inbound_address = fc::ip::address( "9.9.9.9" ); req.inbound_port = 0; node1.on_message( peer3_ptr, req ); @@ -507,7 +518,7 @@ BOOST_AUTO_TEST_CASE( hello_not_accepting_connections ) peer3_ptr->direction = graphene::net::peer_connection_direction::outbound; // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); node1.on_message( peer3_ptr, req ); // check the results @@ -545,7 +556,7 @@ BOOST_AUTO_TEST_CASE( hello_unexpected ) peer3_ptr->their_state = test_peer::their_connection_state::connection_accepted; // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); node1.on_message( peer3_ptr, req ); // check the results @@ -594,7 +605,7 @@ BOOST_AUTO_TEST_CASE( hello_from_different_chain ) // peer3 send hello graphene::net::chain_id_type chain_id = fc::sha256::hash(std::string("dummy_chain")); - graphene::net::hello_message req = peer3_ptr->create_hello_message( chain_id ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, chain_id ); node1.on_message( peer3_ptr, req ); // check the results @@ -637,7 +648,7 @@ BOOST_AUTO_TEST_CASE( hello_invalid_signature ) peer3_ptr->direction = graphene::net::peer_connection_direction::inbound; // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); req.signed_shared_secret = fc::ecc::compact_signature(); node1.on_message( peer3_ptr, req ); @@ -673,7 +684,7 @@ BOOST_AUTO_TEST_CASE( hello_null_node_id ) peer3_ptr->direction = graphene::net::peer_connection_direction::inbound; // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); fc::mutable_variant_object user_data = req.user_data; user_data["node_id"] = fc::variant( graphene::net::node_id_t(), 1 ); req.user_data = user_data; @@ -718,7 +729,7 @@ BOOST_AUTO_TEST_CASE( hello_from_self ) const auto now = fc::time_point::now(); // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); fc::mutable_variant_object user_data = req.user_data; user_data["node_id"] = fc::variant( node1.get_node_id(), 1 ); req.user_data = user_data; @@ -773,7 +784,7 @@ BOOST_AUTO_TEST_CASE( hello_already_connected ) node2_ptr->node_id = node2_ptr->node_public_key; // peer3 send hello - graphene::net::hello_message req = peer3_ptr->create_hello_message( node1.get_chain_id() ); + graphene::net::hello_message req = node1.create_hello_message_from_peer( peer3_ptr, node1.get_chain_id() ); node1.on_message( peer3_ptr, req ); // check the results From 01a4e2720e93f5bc1d0c15fc4f3a0e2dfb16b015 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 6 Sep 2022 20:29:35 +0000 Subject: [PATCH 211/338] Fix code smells --- .../include/graphene/net/peer_database.hpp | 2 +- libraries/net/node.cpp | 24 ++++++++++--------- libraries/net/node_impl.hxx | 2 +- libraries/net/peer_database.cpp | 8 +++---- programs/network_mapper/network_mapper.cpp | 21 +++++++++------- 5 files changed, 32 insertions(+), 25 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_database.hpp b/libraries/net/include/graphene/net/peer_database.hpp index 4ae2cbb06e..8ba79ac501 100644 --- a/libraries/net/include/graphene/net/peer_database.hpp +++ b/libraries/net/include/graphene/net/peer_database.hpp @@ -106,7 +106,7 @@ namespace graphene { namespace net { void erase(const fc::ip::endpoint& endpointToErase); void update_entry(const potential_peer_record& updatedRecord); - potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; + potential_peer_record lookup_or_create_entry_for_ep(const fc::ip::endpoint& endpointToLookup)const; fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; using iterator = detail::peer_database_iterator; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index ce7902a5f5..58c94ab96c 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1300,7 +1300,7 @@ namespace graphene { namespace net { namespace detail { if( 0 == address.remote_endpoint.port() ) continue; // Note: if found, a copy is returned - auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(address.remote_endpoint); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_ep(address.remote_endpoint); // Note: // We don't save node_id in the peer database so far // 1. node_id of that peer may have changed, but we don't check or update @@ -1556,7 +1556,7 @@ namespace graphene { namespace net { namespace detail { expected_node_public_key = fc::ecc::public_key( hello_message_received.signed_shared_secret, shared_secret_encoder.result(), false ); } - catch( fc::exception& e ) + catch( const fc::exception& e ) { wlog( "Error when validating signature in hello message from peer ${peer}: ${e}", ("peer", originating_peer->get_remote_endpoint())("e", e.to_detail_string()) ); @@ -1587,6 +1587,8 @@ namespace graphene { namespace net { namespace detail { catch (const fc::exception&) { // either it's not there or it's not a valid session id. either way, ignore. + dlog( "Peer ${endpoint} sent us a hello message without a valid node_id in user_data", + ("endpoint", originating_peer->get_remote_endpoint() ) ); } // The peer's node_id should not be null static const node_id_t null_node_id; @@ -1780,7 +1782,7 @@ namespace graphene { namespace net { namespace detail { for( const auto& ep : endpoints_to_save ) { // add to the peer database - auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint( ep ); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_ep( ep ); updated_peer_record.last_seen_time = fc::time_point::now(); _potential_peer_db.update_entry( updated_peer_record ); } @@ -1977,7 +1979,7 @@ namespace graphene { namespace net { namespace detail { ("endpoint", address.remote_endpoint)("time", address.last_seen_time) ("fw", address.firewalled) ); ++count; - if( count >= _max_addresses_to_handle_at_once ) + if( count >= _max_addrs_to_handle_at_once ) break; } std::vector updated_addresses; @@ -1995,7 +1997,7 @@ namespace graphene { namespace net { namespace detail { address.direction, address.firewalled ); ++count; - if( count >= _max_addresses_to_handle_at_once ) + if( count >= _max_addrs_to_handle_at_once ) break; } if ( _node_configuration.connect_to_new_peers @@ -4043,7 +4045,7 @@ namespace graphene { namespace net { namespace detail { VERIFY_CORRECT_THREAD(); // create or find the database entry for the new peer - auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_ep(remote_endpoint); updated_peer_record.last_connection_disposition = last_connection_failed; updated_peer_record.last_connection_attempt_time = fc::time_point::now();; _potential_peer_db.update_entry(updated_peer_record); @@ -4059,7 +4061,7 @@ namespace graphene { namespace net { namespace detail { new_peer->is_firewalled = firewalled_state::not_firewalled; // connection succeeded, we've started handshaking. record that in our database - updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); + updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_ep(remote_endpoint); updated_peer_record.last_connection_disposition = last_connection_handshaking_failed; updated_peer_record.number_of_successful_connection_attempts++; updated_peer_record.last_seen_time = fc::time_point::now(); @@ -4073,7 +4075,7 @@ namespace graphene { namespace net { namespace detail { if( connect_failed_exception ) { // connection failed. record that in our database - updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(remote_endpoint); + updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_ep(remote_endpoint); updated_peer_record.last_connection_disposition = last_connection_failed; updated_peer_record.number_of_failed_connection_attempts++; if (new_peer->connection_closed_error) @@ -4336,7 +4338,7 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); // if we're connecting to them, we believe they're not firewalled - auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_endpoint(ep); + auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_ep(ep); // if we've recently connected to this peer, reset the last_connection_attempt_time to allow // us to immediately retry this peer @@ -4828,7 +4830,7 @@ namespace graphene { namespace net { namespace detail { if (params.contains("maximum_number_of_connections")) _maximum_number_of_connections = params["maximum_number_of_connections"].as(1); if (params.contains("max_addresses_to_handle_at_once")) - _max_addresses_to_handle_at_once = params["max_addresses_to_handle_at_once"].as(1); + _max_addrs_to_handle_at_once = params["max_addresses_to_handle_at_once"].as(1); if (params.contains("max_blocks_to_handle_at_once")) _max_blocks_to_handle_at_once = params["max_blocks_to_handle_at_once"].as(1); if (params.contains("max_sync_blocks_to_prefetch")) @@ -4851,7 +4853,7 @@ namespace graphene { namespace net { namespace detail { result["peer_connection_retry_timeout"] = _peer_connection_retry_timeout; result["desired_number_of_connections"] = _desired_number_of_connections; result["maximum_number_of_connections"] = _maximum_number_of_connections; - result["max_addresses_to_handle_at_once"] = _max_addresses_to_handle_at_once; + result["max_addresses_to_handle_at_once"] = _max_addrs_to_handle_at_once; result["max_blocks_to_handle_at_once"] = _max_blocks_to_handle_at_once; result["max_sync_blocks_to_prefetch"] = _max_sync_blocks_to_prefetch; result["max_sync_blocks_per_peer"] = _max_sync_blocks_per_peer; diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 00385c73f6..a41ed6914d 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -615,7 +615,7 @@ public: bool _node_is_shutting_down = false; /// Maximum number of addresses to handle at one time - size_t _max_addresses_to_handle_at_once = MAX_ADDRESSES_TO_HANDLE_AT_ONCE; + size_t _max_addrs_to_handle_at_once = MAX_ADDRESSES_TO_HANDLE_AT_ONCE; /// Maximum number of blocks to handle at one time size_t _max_blocks_to_handle_at_once = MAX_BLOCKS_TO_HANDLE_AT_ONCE; /// Maximum number of sync blocks to prefetch diff --git a/libraries/net/peer_database.cpp b/libraries/net/peer_database.cpp index 3c13c9a334..4c5b89d686 100644 --- a/libraries/net/peer_database.cpp +++ b/libraries/net/peer_database.cpp @@ -68,7 +68,7 @@ namespace graphene { namespace net { void clear(); void erase(const fc::ip::endpoint& endpointToErase); void update_entry(const potential_peer_record& updatedRecord); - potential_peer_record lookup_or_create_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; + potential_peer_record lookup_or_create_entry_for_ep(const fc::ip::endpoint& endpointToLookup)const; fc::optional lookup_entry_for_endpoint(const fc::ip::endpoint& endpointToLookup)const; peer_database::iterator begin() const; @@ -156,7 +156,7 @@ namespace graphene { namespace net { _potential_peer_set.get().insert(updatedRecord); } - potential_peer_record peer_database_impl::lookup_or_create_entry_for_endpoint( + potential_peer_record peer_database_impl::lookup_or_create_entry_for_ep( const fc::ip::endpoint& endpointToLookup ) const { auto iter = _potential_peer_set.get().find(endpointToLookup); @@ -254,10 +254,10 @@ namespace graphene { namespace net { my->update_entry(updatedRecord); } - potential_peer_record peer_database::lookup_or_create_entry_for_endpoint( + potential_peer_record peer_database::lookup_or_create_entry_for_ep( const fc::ip::endpoint& endpointToLookup ) const { - return my->lookup_or_create_entry_for_endpoint(endpointToLookup); + return my->lookup_or_create_entry_for_ep(endpointToLookup); } fc::optional peer_database::lookup_entry_for_endpoint( diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index 00c334eee4..79fda2c114 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -102,7 +102,10 @@ class peer_probe : public graphene::net::peer_connection_delegate try { if (hello_message_received.user_data.contains("node_id")) _node_id = hello_message_received.user_data["node_id"].as( 1 ); - } catch( fc::exception& ) { /* do nothing */ } + } catch( const fc::exception& ) { + dlog( "Peer ${endpoint} sent us a hello message with an invalid node_id in user_data", + ("endpoint", originating_peer->get_remote_endpoint() ) ); + } originating_peer->send_message(graphene::net::connection_rejected_message()); } @@ -206,9 +209,7 @@ int main(int argc, char** argv) std::map node_id_by_endpoint; std::vector> probes; - const auto& update_info_by_probe = [ &connections_by_node_id, &address_info_by_node_id, - &node_id_by_endpoint, &my_node_id, - &nodes_already_visited, &nodes_to_visit_set, &nodes_to_visit ] + const auto& update_info_by_probe = [ &connections_by_node_id, &address_info_by_node_id, &node_id_by_endpoint ] ( const std::shared_ptr& probe ) { idump( (probe->_node_id)(probe->_remote)(probe->_peers.size()) ); @@ -224,11 +225,14 @@ int main(int argc, char** argv) address_info_by_node_id[this_node_info.node_id] = this_node_info; node_id_by_endpoint[probe->_remote] = probe->_node_id; + }; - for (const graphene::net::address_info& info : probe->_peers) - { + const auto& update_info_by_address_info = [ &address_info_by_node_id, &my_node_id, + &nodes_already_visited, &nodes_to_visit_set, &nodes_to_visit ] + ( const graphene::net::address_info& info ) + { if (info.node_id == my_node_id) // We should not be in the list, just be defensive here - continue; + return; if (nodes_already_visited.find(info.remote_endpoint) == nodes_already_visited.end() && nodes_to_visit_set.find(info.remote_endpoint) == nodes_to_visit_set.end()) { @@ -247,7 +251,6 @@ int main(int argc, char** argv) // Replace private or local addresses with public addresses when possible address_info_by_node_id[info.node_id].remote_endpoint = info.remote_endpoint; } - } }; constexpr size_t max_concurrent_probes = 200; @@ -283,6 +286,8 @@ int main(int argc, char** argv) continue; } update_info_by_probe(probe); + for (const graphene::net::address_info& info : probe->_peers) + update_info_by_address_info( info ); } constexpr uint32_t five = 5; From 68c452b5343368b9fc50c826e60300ae558a6332 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 6 Sep 2022 20:39:01 +0000 Subject: [PATCH 212/338] Remove redundant "disable-peer-advertising" option --- libraries/app/application.cpp | 9 +---- libraries/net/include/graphene/net/node.hpp | 1 - libraries/net/node.cpp | 11 ------ libraries/net/node_impl.hxx | 1 - tests/tests/p2p_node_tests.cpp | 38 --------------------- 5 files changed, 1 insertion(+), 59 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 7d605f143d..a14c4584fa 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -146,12 +146,7 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) _p2p_network->add_seed_nodes(seeds); } - if ( _options->count( "disable-peer-advertising" ) > 0 - && _options->at( "disable-peer-advertising").as() ) // == true - { - _p2p_network->disable_peer_advertising(); - } - else if( _options->count( "advertise-peer-algorithm" ) > 0 ) + if( _options->count( "advertise-peer-algorithm" ) > 0 ) { std::string algo = _options->at("advertise-peer-algorithm").as(); std::vector list; @@ -1196,8 +1191,6 @@ void application::set_program_options(boost::program_options::options_descriptio ("exclude-peer-node", bpo::value>()->composing(), "P2P node to not advertise, only takes effect when algorithm is 'exclude_list' " "(may specify multiple times)") - ("disable-peer-advertising", bpo::value()->implicit_value(false), - "Disable advertising your peers. Note: Overrides any advertise-peer-algorithm settings") ("seed-node,s", bpo::value>()->composing(), "P2P node to connect to on startup (may specify multiple times)") ("seed-nodes", bpo::value()->composing(), diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 592546582f..d3b5dae7c0 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -317,7 +317,6 @@ namespace graphene { namespace net { std::vector get_potential_peers() const; - void disable_peer_advertising() const; fc::variant_object get_call_statistics() const; protected: node_impl_ptr my; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 58c94ab96c..4b986a2204 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4910,12 +4910,6 @@ namespace graphene { namespace net { namespace detail { _rate_limiter.set_download_limit( download_bytes_per_second ); } - void node_impl::disable_peer_advertising() - { - VERIFY_CORRECT_THREAD(); - _address_builder = nullptr; - } - fc::variant_object node_impl::get_call_statistics() const { VERIFY_CORRECT_THREAD(); @@ -5127,11 +5121,6 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(set_total_bandwidth_limit, upload_bytes_per_second, download_bytes_per_second); } - void node::disable_peer_advertising() const - { - INVOKE_IN_IMPL(disable_peer_advertising); - } - fc::variant_object node::get_call_statistics() const { INVOKE_IN_IMPL(get_call_statistics); diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index a41ed6914d..45a6d6e65a 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -818,7 +818,6 @@ public: void clear_peer_database(); void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second ); - void disable_peer_advertising(); fc::variant_object get_call_statistics() const; graphene::net::message get_message_for_item(const item_id& item) override; diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index d0ede89cc9..a759fd044f 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -847,44 +847,6 @@ BOOST_AUTO_TEST_CASE( address_request_without_hello ) BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 0 ); } -/**** - * Assure that when disable_peer_advertising is set, - * the node does not share its peer list - */ -BOOST_AUTO_TEST_CASE( disable_peer_advertising ) -{ - // create a node (node1) - int node1_port = fc::network::get_available_port(); - fc::temp_directory node1_dir( graphene::utilities::temp_directory_path() ); - test_node node1( "Node1", node1_dir.path(), node1_port ); - // simulate that node1 started to connect to the network and accepting connections - fake_network_connect_guard guard( node1 ); - - // disable peer advertising - node1.disable_peer_advertising(); - - // get something in the list of connections - std::pair, graphene::net::peer_connection_ptr> node2_rslts - = node1.create_peer_connection( "127.0.0.1:8090" ); - - // a new peer (peer3) - std::pair, std::shared_ptr> peer3 - = node1.create_test_peer( "1.2.3.4:5678" ); - std::shared_ptr peer3_ptr = peer3.second; - // simulate that node1 got its hello request and accepted the connection - peer3_ptr->their_state = test_peer::their_connection_state::connection_accepted; - - // peer3 request addresses - graphene::net::address_request_message req; - node1.on_message( peer3_ptr, req ); - - // check the results - // Node1 does not share the peer list with others - BOOST_REQUIRE_EQUAL( peer3_ptr->messages_received.size(), 1U ); - const auto& msg = peer3_ptr->messages_received.front(); - test_address_message( msg, 0 ); -} - BOOST_AUTO_TEST_CASE( set_nothing_advertise_algorithm ) { // create a node (node1) From 09d6ca77ce25a6894402f0a30465287131816ced Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 6 Sep 2022 21:15:56 +0000 Subject: [PATCH 213/338] Skip processing addresses if !connect_to_new_peers --- libraries/net/node.cpp | 71 +++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 4b986a2204..a568b4fb3b 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1949,9 +1949,9 @@ namespace graphene { namespace net { namespace detail { _address_builder = std::make_shared(); } - void node_impl::on_address_message(peer_connection* originating_peer, - const address_message& address_message_received) - { + void node_impl::on_address_message( peer_connection* originating_peer, + const address_message& address_message_received ) + { VERIFY_CORRECT_THREAD(); // Do some gatekeeping here. // Malious peers can easily bypass our checks in on_hello_message(), and we will then request addresses anyway, @@ -1972,39 +1972,40 @@ namespace graphene { namespace net { namespace detail { dlog( "Received an address message containing ${size} addresses for peer ${peer}", ("size", address_message_received.addresses.size()) ("peer", originating_peer->get_remote_endpoint()) ); - size_t count = 0; - for (const address_info& address : address_message_received.addresses) - { - dlog( " ${endpoint} last seen ${time}, firewalled status ${fw}", - ("endpoint", address.remote_endpoint)("time", address.last_seen_time) - ("fw", address.firewalled) ); - ++count; - if( count >= _max_addrs_to_handle_at_once ) - break; - } - std::vector updated_addresses; - updated_addresses.reserve( count ); - auto now = fc::time_point_sec(fc::time_point::now()); - count = 0; - for( const address_info& address : address_message_received.addresses ) - { - if( 0 == address.remote_endpoint.port() ) - continue; - updated_addresses.emplace_back( address.remote_endpoint, - now, - address.latency, - address.node_id, - address.direction, - address.firewalled ); - ++count; - if( count >= _max_addrs_to_handle_at_once ) - break; - } - if ( _node_configuration.connect_to_new_peers - && merge_address_info_with_potential_peer_database(updated_addresses) ) + if( _node_configuration.connect_to_new_peers ) { - trigger_p2p_network_connect_loop(); + size_t count = 0; + for (const address_info& address : address_message_received.addresses) + { + dlog( " ${endpoint} last seen ${time}, firewalled status ${fw}", + ("endpoint", address.remote_endpoint)("time", address.last_seen_time) + ("fw", address.firewalled) ); + ++count; + if( count >= _max_addrs_to_handle_at_once ) + break; + } + std::vector updated_addresses; + updated_addresses.reserve( count ); + auto now = fc::time_point_sec(fc::time_point::now()); + count = 0; + for( const address_info& address : address_message_received.addresses ) + { + if( 0 == address.remote_endpoint.port() ) + continue; + updated_addresses.emplace_back( address.remote_endpoint, + now, + address.latency, + address.node_id, + address.direction, + address.firewalled ); + ++count; + if( count >= _max_addrs_to_handle_at_once ) + break; + } + if( merge_address_info_with_potential_peer_database(updated_addresses) ) + trigger_p2p_network_connect_loop(); } + if (_handshaking_connections.find(originating_peer->shared_from_this()) != _handshaking_connections.end()) { // if we were handshaking, we need to continue with the next step in handshaking (which is either @@ -2044,7 +2045,7 @@ namespace graphene { namespace net { namespace detail { } // else if this was an active connection, then this was just a reply to our periodic address requests. // we've processed it, there's nothing else to do - } + } void node_impl::on_fetch_blockchain_item_ids_message(peer_connection* originating_peer, const fetch_blockchain_item_ids_message& fetch_blockchain_item_ids_message_received) From 9ce161a1588a9452b9605902d79a4da7b90b31ac Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 6 Sep 2022 23:15:08 +0000 Subject: [PATCH 214/338] Reconnect from OS-selected port if fail to connect ... from successfully bound local port. BTW add logging. --- libraries/net/node.cpp | 15 +++++++++--- libraries/net/peer_connection.cpp | 40 +++++++++++++++++++++++++++---- 2 files changed, 48 insertions(+), 7 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index a568b4fb3b..84b205b24e 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -155,6 +155,7 @@ namespace graphene { namespace net { namespace detail { /// Greatly delays the next connection to the endpoint static void greatly_delay_next_conn_to( node_impl* impl, const fc::ip::endpoint& ep ) { + dlog( "Greatly delaying the next connection to endpoint ${ep}", ("ep", ep) ); fc::optional updated_peer_record = impl->_potential_peer_db.lookup_entry_for_endpoint( ep ); if( updated_peer_record ) @@ -1602,6 +1603,10 @@ namespace graphene { namespace net { namespace detail { // Check whether the peer is myself if( _node_id == peer_node_id ) { + dlog( "Received a hello_message from peer ${peer} with id ${id} that is myself or claimed to be myself, " + "rejection", + ("peer", originating_peer->get_remote_endpoint()) + ("id", peer_node_id) ); // If it is an outbound connection, make sure we won't reconnect to the peer soon if( peer_connection_direction::outbound == originating_peer->direction ) { @@ -1619,9 +1624,6 @@ namespace graphene { namespace net { namespace detail { "I'm connecting to myself" ); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; originating_peer->send_message( message(connection_rejected) ); - dlog( "Received a hello_message from peer ${peer} that is myself or claimed to be myself, rejection", - ("peer", originating_peer->get_remote_endpoint()) - ("id", originating_peer->node_id) ); disconnect_from_peer( originating_peer, connection_rejected.reason_string ); return; } @@ -1700,6 +1702,10 @@ namespace graphene { namespace net { namespace detail { if( peer_connection_direction::outbound == originating_peer->direction && originating_peer->node_public_key == already_connected_peer->node_public_key ) { + dlog( "Verified that endpoint ${ep} is reachable and belongs to peer ${peer} with id ${id}", + ("ep", originating_peer->get_remote_endpoint()) + ("peer", already_connected_peer->get_remote_endpoint()) + ("id", already_connected_peer->node_id) ); // Do not replace a verified public address with a private or local address. // Note: there is a scenario that some nodes in the same local network may have connected to each other, // and of course some are outbound connections and some are inbound, so we are unable to update @@ -1720,6 +1726,9 @@ namespace graphene { namespace net { namespace detail { || new_inbound_endpoint->get_address().is_public_address() || !old_inbound_endpoint->get_address().is_public_address() ) { + dlog( "Saving verification result for peer ${peer} with id ${id}", + ("peer", already_connected_peer->get_remote_endpoint()) + ("id", already_connected_peer->node_id) ); already_connected_peer->remote_inbound_endpoint = new_inbound_endpoint; already_connected_peer->inbound_endpoint_verified = true; already_connected_peer->is_firewalled = firewalled_state::not_firewalled; diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index a87dbad59a..2ca826fdb9 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -238,6 +238,7 @@ namespace graphene { namespace net direction = peer_connection_direction::outbound; _remote_endpoint = remote_endpoint; + bool failed_to_bind = false; if( local_endpoint ) { // the caller wants us to bind the local side of this socket to a specific ip/port @@ -254,11 +255,40 @@ namespace graphene { namespace net } catch ( const fc::exception& except ) { - wlog( "Failed to bind to desired local endpoint ${endpoint}, will connect using an OS-selected endpoint: ${except}", ("endpoint", *local_endpoint )("except", except ) ); + failed_to_bind = true; + wlog( "Failed to bind to desired local endpoint ${endpoint}, will connect using an OS-selected " + "endpoint: ${except}", + ("endpoint", *local_endpoint )("except", except ) ); } } negotiation_status = connection_negotiation_status::connecting; - _message_connection.connect_to( remote_endpoint ); + bool retry = false; + try + { + _message_connection.connect_to( remote_endpoint ); + } + catch ( const fc::canceled_exception& ) + { + throw; + } + catch ( const fc::exception& except ) + { + if( local_endpoint && !failed_to_bind ) + { + retry = true; + wlog( "Failed to connect to remote endpoint ${remote_endpoint} from local endpoint ${local_endpoint}, " + "will connect using an OS-selected endpoint: ${except}", + ("remote_endpoint", remote_endpoint )("local_endpoint", *local_endpoint )("except", except ) ); + } + else + throw; + } + if( retry ) + { + get_socket().close(); + get_socket().open(); + _message_connection.connect_to( remote_endpoint ); + } negotiation_status = connection_negotiation_status::connected; their_state = their_connection_state::just_connected; our_state = our_connection_state::just_connected; @@ -268,12 +298,14 @@ namespace graphene { namespace net } catch ( fc::exception& e ) { - wlog( "error connecting to peer ${remote_endpoint}: ${e}", ("remote_endpoint", remote_endpoint )("e", e.to_detail_string() ) ); + wlog( "error connecting to peer ${remote_endpoint}: ${e}", + ("remote_endpoint", remote_endpoint )("e", e.to_detail_string() ) ); throw; } } // connect_to() - void peer_connection::on_message( message_oriented_connection* originating_connection, const message& received_message ) + void peer_connection::on_message( message_oriented_connection* originating_connection, + const message& received_message ) { VERIFY_CORRECT_THREAD(); _currently_handling_message = true; From dc72295525ccb3a5581cdbd312b561813d1f0b04 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 00:11:21 +0000 Subject: [PATCH 215/338] Save successfully connected endpoint to peer db --- libraries/net/node.cpp | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 84b205b24e..67d0fd561f 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -167,6 +167,19 @@ namespace graphene { namespace net { namespace detail { impl->_potential_peer_db.update_entry( *updated_peer_record ); } } + /// Saves a successfully connected endpoint to the peer database + static void save_successful_address( node_impl* impl, const fc::ip::endpoint& ep ) + { + dlog( "Saving successfully connected endpoint ${ep} to peer database", ("ep", ep) ); + auto updated_peer_record = impl->_potential_peer_db.lookup_or_create_entry_for_ep( ep ); + updated_peer_record.last_connection_disposition = last_connection_succeeded; + updated_peer_record.last_connection_attempt_time = fc::time_point::now(); + // halve number_of_failed_connection_attempts + constexpr uint16_t two = 2; + updated_peer_record.number_of_failed_connection_attempts /= two; + updated_peer_record.last_seen_time = fc::time_point::now(); + impl->_potential_peer_db.update_entry(updated_peer_record); + } static void update_address_seen_time( node_impl* impl, const peer_connection* active_peer ) { fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); @@ -1733,6 +1746,10 @@ namespace graphene { namespace net { namespace detail { already_connected_peer->inbound_endpoint_verified = true; already_connected_peer->is_firewalled = firewalled_state::not_firewalled; } + // If the already connected peer is in the active connections list, save the endpoint to the peer db + if( peer_connection::connection_negotiation_status::negotiation_complete + == already_connected_peer->negotiation_status ) + save_successful_address( this, *new_inbound_endpoint ); } // Now reject connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, @@ -2028,23 +2045,7 @@ namespace graphene { namespace net { namespace detail { // Note: updating last_connection_disposition to last_connection_succeeded for inbound connections // doesn't seem correct if( peer_connection_direction::outbound == originating_peer->direction ) - { - const fc::optional& inbound_endpoint = originating_peer->get_endpoint_for_connecting(); - if( inbound_endpoint.valid() && inbound_endpoint->port() != 0 ) - { - // mark the connection as successful in the database - fc::optional updated_peer_record - = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_connection_disposition = last_connection_succeeded; - // halve number_of_failed_connection_attempts - constexpr uint16_t two = 2; - updated_peer_record->number_of_failed_connection_attempts /= two; - _potential_peer_db.update_entry(*updated_peer_record); - } - } - } + save_successful_address( this, *originating_peer->get_remote_endpoint() ); // transition it to our active list originating_peer->negotiation_status = peer_connection::connection_negotiation_status::negotiation_complete; From 616969100f7d5264eb0a0ab7f85245aa776350c5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 00:46:50 +0000 Subject: [PATCH 216/338] Fix code smells --- programs/network_mapper/network_mapper.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index 79fda2c114..988aa1e31e 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -227,9 +227,8 @@ int main(int argc, char** argv) node_id_by_endpoint[probe->_remote] = probe->_node_id; }; - const auto& update_info_by_address_info = [ &address_info_by_node_id, &my_node_id, - &nodes_already_visited, &nodes_to_visit_set, &nodes_to_visit ] - ( const graphene::net::address_info& info ) + const auto& update_info_by_address_info = [ &address_info_by_node_id, &my_node_id, &nodes_already_visited, + &nodes_to_visit_set, &nodes_to_visit ] ( const graphene::net::address_info& info ) { if (info.node_id == my_node_id) // We should not be in the list, just be defensive here return; @@ -247,10 +246,8 @@ int main(int argc, char** argv) } else if ( !address_info_by_node_id[info.node_id].remote_endpoint.get_address().is_public_address() && info.remote_endpoint.get_address().is_public_address() ) - { // Replace private or local addresses with public addresses when possible address_info_by_node_id[info.node_id].remote_endpoint = info.remote_endpoint; - } }; constexpr size_t max_concurrent_probes = 200; From e3ddde51cfacdf5a2d24a3351e13a3afcb9481f8 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 01:27:24 +0000 Subject: [PATCH 217/338] Fix typo --- programs/network_mapper/network_mapper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index 988aa1e31e..f623a46d06 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -120,7 +120,7 @@ class peer_probe : public graphene::net::peer_connection_delegate const graphene::net::connection_rejected_message& connection_rejected_message_received ) { // Note: We will be rejected and disconnected if our chain_id is not the same as the peer's . - // If we aren't be disconnected, it is OK to send an address request message. + // If we aren't disconnected, it is OK to send an address request message. _connection_was_rejected = true; wlog( "peer ${endpoint} rejected our connection with reason ${reason}", ("endpoint", originating_peer->get_remote_endpoint() ) From 9edf7899b291e87436669920c00ebdfac2702ad2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 14:13:01 +0000 Subject: [PATCH 218/338] Do not expose local address if not listening --- libraries/net/node.cpp | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 67d0fd561f..0425480957 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4015,6 +4015,8 @@ namespace graphene { namespace net { namespace detail { // * inbound port // * outbound port // + // If we don't accept incoming connections, we send nothing. + // // The peer we're connecting to may assume we're firewalled if the // IP address and outbound port we send don't match the values it sees on its remote endpoint, // but it is not always true, E.G. if the peer itself is behind a reverse proxy. @@ -4024,24 +4026,28 @@ namespace graphene { namespace net { namespace detail { // nor we know whether we're behind NAT or a reverse proxy that will allow incoming connections. // However, if the "p2p-inbound-endpoint" node startup option is configured, we send that instead. - fc::ip::endpoint local_endpoint(peer->get_socket().local_endpoint()); - fc::ip::address inbound_address = local_endpoint.get_address(); - uint16_t inbound_port = _node_configuration.accept_incoming_connections ? - _actual_listening_endpoint.port() : 0; - - if( _node_configuration.inbound_endpoint.valid() ) + fc::ip::address inbound_address; // default 0.0.0.0 + uint16_t inbound_port = 0; + uint16_t outbound_port = 0; + if( _node_configuration.accept_incoming_connections ) { - if( _node_configuration.inbound_endpoint->get_address() != fc::ip::address() ) - inbound_address = _node_configuration.inbound_endpoint->get_address(); - if( _node_configuration.accept_incoming_connections ) + fc::ip::endpoint local_endpoint = peer->get_socket().local_endpoint(); + inbound_address = local_endpoint.get_address(); + inbound_port = _actual_listening_endpoint.port(); + outbound_port = local_endpoint.port(); + if( _node_configuration.inbound_endpoint.valid() ) + { + if( _node_configuration.inbound_endpoint->get_address() != fc::ip::address() ) + inbound_address = _node_configuration.inbound_endpoint->get_address(); inbound_port = _node_configuration.inbound_endpoint->port(); + } } hello_message hello(_user_agent_string, core_protocol_version, inbound_address, inbound_port, - local_endpoint.port(), + outbound_port, _node_public_key, signature, _chain_id, From be70a6fb1a36f65f2842455a644c409b571727f2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 15:49:57 +0000 Subject: [PATCH 219/338] Rename some startup options related to p2p network --- libraries/app/application.cpp | 39 ++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index a14c4584fa..2a620f350c 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -146,14 +146,14 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) _p2p_network->add_seed_nodes(seeds); } - if( _options->count( "advertise-peer-algorithm" ) > 0 ) + if( _options->count( "p2p-advertise-peer-algorithm" ) > 0 ) { - std::string algo = _options->at("advertise-peer-algorithm").as(); + std::string algo = _options->at("p2p-advertise-peer-algorithm").as(); std::vector list; - if( algo == "list" && _options->count("advertise-peer-node") > 0 ) - list = _options->at("advertise-peer-node").as>(); - else if( algo == "exclude_list" && _options->count("exclude-peer-node") > 0 ) - list = _options->at("exclude-peer-node").as>(); + if( algo == "list" && _options->count("p2p-advertise-peer-node") > 0 ) + list = _options->at("p2p-advertise-peer-node").as>(); + else if( algo == "exclude_list" && _options->count("p2p-exclude-peer-node") > 0 ) + list = _options->at("p2p-exclude-peer-node").as>(); _p2p_network->set_advertise_algorithm( algo, list ); } @@ -166,11 +166,11 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) _p2p_network->set_inbound_endpoint( fc::ip::endpoint::from_string(_options->at("p2p-inbound-endpoint") .as()) ); - if ( _options->count("accept-incoming-connections") > 0 ) - _p2p_network->set_accept_incoming_connections( _options->at("accept-incoming-connections").as() ); + if ( _options->count("p2p-accept-incoming-connections") > 0 ) + _p2p_network->set_accept_incoming_connections( _options->at("p2p-accept-incoming-connections").as() ); - if ( _options->count("connect-to-new-peers") > 0 ) - _p2p_network->set_connect_to_new_peers( _options->at( "connect-to-new-peers" ).as() ); + if ( _options->count("p2p-connect-to-new-peers") > 0 ) + _p2p_network->set_connect_to_new_peers( _options->at( "p2p-connect-to-new-peers" ).as() ); _p2p_network->listen_to_p2p_network(); ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); @@ -1172,23 +1172,24 @@ void application::set_program_options(boost::program_options::options_descriptio const auto& default_opts = application_options::get_default(); configuration_file_options.add_options() ("enable-p2p-network", bpo::value()->implicit_value(true), - "Whether to enable P2P network. Note: if delayed_node plugin is enabled, " + "Whether to enable P2P network (default: true). Note: if delayed_node plugin is enabled, " "this option will be ignored and P2P network will always be disabled.") + ("p2p-accept-incoming-connections", bpo::value()->implicit_value(true), + "Whether to accept incoming connections (default: true)") ("p2p-endpoint", bpo::value(), "Endpoint (local IP address:port) for P2P node to listen on. " "Specify 0.0.0.0 as address to listen on all IP addresses") ("p2p-inbound-endpoint", bpo::value(), "Endpoint (external IP address:port) that other peers should connect to. " "If the address is unknown or dynamic, specify 0.0.0.0") - ("accept-incoming-connections", bpo::value()->implicit_value(true), - "Whether to accept incoming connections") - ("connect-to-new-peers", bpo::value()->implicit_value(true), - "Whether to connect to new peers advertised by other peers") - ("advertise-peer-algorithm", bpo::value()->implicit_value("all"), - "Determines which peers are advertised. Algorithms: 'all', 'nothing', 'list', exclude_list'") - ("advertise-peer-node", bpo::value>()->composing(), + ("p2p-connect-to-new-peers", bpo::value()->implicit_value(true), + "Whether to connect to new peers advertised by other peers (default: true)") + ("p2p-advertise-peer-algorithm", bpo::value()->implicit_value("all"), + "Determines which peers are advertised in response to address requests from other peers. " + "Algorithms: 'all', 'nothing', 'list', exclude_list'. (default: all)") + ("p2p-advertise-peer-node", bpo::value>()->composing(), "P2P node to advertise, only takes effect when algorithm is 'list' (may specify multiple times)") - ("exclude-peer-node", bpo::value>()->composing(), + ("p2p-exclude-peer-node", bpo::value>()->composing(), "P2P node to not advertise, only takes effect when algorithm is 'exclude_list' " "(may specify multiple times)") ("seed-node,s", bpo::value>()->composing(), From 9b175ea796f82e5b7cb4abbcc055acd621736983 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 15:57:13 +0000 Subject: [PATCH 220/338] Do not try to bind if not listening --- libraries/app/application.cpp | 6 +++++- libraries/net/node.cpp | 4 +++- libraries/net/node_impl.hxx | 2 ++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 2a620f350c..64aba7148d 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -173,7 +173,11 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) _p2p_network->set_connect_to_new_peers( _options->at( "p2p-connect-to-new-peers" ).as() ); _p2p_network->listen_to_p2p_network(); - ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); + fc::ip::endpoint listening_endpoint = _p2p_network->get_actual_listening_endpoint(); + if( listening_endpoint.port() != 0 ) + ilog( "Configured p2p node to listen on ${ip}", ("ip", listening_endpoint) ); + else + ilog( "Configured p2p node to not listen for incoming connections" ); _p2p_network->connect_to_p2p_network(); _p2p_network->sync_from(net::item_id(net::core_message_type_enum::block_message_type, diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 0425480957..a14e0afc71 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4072,7 +4072,9 @@ namespace graphene { namespace net { namespace detail { try { // blocks until the connection is established and secure connection is negotiated - new_peer->connect_to(remote_endpoint, _actual_listening_endpoint); + auto bind_to_endpoint = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint + : fc::optional(); + new_peer->connect_to( remote_endpoint, bind_to_endpoint ); // we connected to the peer. guess they're not firewalled.... new_peer->is_firewalled = firewalled_state::not_firewalled; diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 45a6d6e65a..f76aeaaad7 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -434,6 +434,8 @@ public: /// Stores the endpoint we're listening on. This will be the same as /// _node_configuration.listen_endpoint, unless that endpoint was already /// in use. + /// This will be 0.0.0.0:0 if the node is configured to not listen. + // Note: updating the type to optional may break 3rd-party client applications. fc::ip::endpoint _actual_listening_endpoint; /// Used by the task that manages connecting to peers From b9f64d63caac5d6ddc0ec06d80902a3ef1f927cd Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 22:06:38 +0000 Subject: [PATCH 221/338] Prefer peer's socket address if it's not listening --- libraries/net/node.cpp | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index a14e0afc71..8733d6c82b 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1660,10 +1660,24 @@ namespace graphene { namespace net { namespace detail { if( !originating_peer->remote_inbound_endpoint ) { // Note: the data is not yet verified, so we need to use it with caution. - // On the one hand, we want to advertise as accurate data as possible to other peers, + // + // We will advertise "remote_inbound_endpoint" when other peers request addresses. + // + // On the one hand, we want to advertise as accurate data as possible to other peers (we will try to verify), // on the other hand, we still want to advertise it to other peers if we didn't have a chance to verify it. - originating_peer->remote_inbound_endpoint = fc::ip::endpoint( originating_peer->inbound_address, - originating_peer->inbound_port ); + // + // When the peer is not listening (i.e. it tells us its inbound port is 0), the inbound address it tells us + // may be invalid (e.g. 0.0.0.0), and we are not going to verify it anyway. + // For observation purposes, we still advertise it to other peers, and we need to tell them an address, + // so we use the address we see. + // + // In addition, by now, our list or exclude list for peer advertisement only contains IP endpoints but not + // nodes' public keys (we can't use node_id because it changes every time the node restarts). Using a valid + // address is better for the purpose. + originating_peer->remote_inbound_endpoint + = fc::ip::endpoint( originating_peer->inbound_port != 0 ? originating_peer->inbound_address + : originating_peer->get_remote_endpoint()->get_address(), + originating_peer->inbound_port ); } // if they didn't provide a last known fork, try to guess it From 1ae2bca1991ff05ceae1e075362d83def2f67b41 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 22:09:51 +0000 Subject: [PATCH 222/338] Update three_node_network to test not listening --- tests/app/main.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index ac9ef02629..f5e937250f 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -387,14 +387,11 @@ BOOST_AUTO_TEST_CASE( three_node_network ) fc::temp_directory app3_dir( graphene::utilities::temp_directory_path() ); graphene::app::application app3; - app3.register_plugin(); - app3.register_plugin< graphene::market_history::market_history_plugin >(); - app3.register_plugin< graphene::witness_plugin::witness_plugin >(); - app3.register_plugin< graphene::grouped_orders::grouped_orders_plugin>(); auto sharable_cfg3 = std::make_shared(); auto& cfg3 = *sharable_cfg3; fc::set_option( cfg3, "genesis-json", genesis_file ); fc::set_option( cfg3, "seed-nodes", app3_seed_nodes_str ); + fc::set_option( cfg3, "p2p-accept-incoming-connections", false ); app3.initialize(app3_dir.path(), sharable_cfg3); BOOST_TEST_MESSAGE( "Starting app3 and waiting for connection" ); From 53e9c38044791f1db983e4605cc3cd4f739689e4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 7 Sep 2022 22:37:59 +0000 Subject: [PATCH 223/338] Rename and update some P2P-related startup options --- libraries/app/application.cpp | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 64aba7148d..fc12ce039f 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -150,10 +150,10 @@ void application_impl::reset_p2p_node(const fc::path& data_dir) { std::string algo = _options->at("p2p-advertise-peer-algorithm").as(); std::vector list; - if( algo == "list" && _options->count("p2p-advertise-peer-node") > 0 ) - list = _options->at("p2p-advertise-peer-node").as>(); - else if( algo == "exclude_list" && _options->count("p2p-exclude-peer-node") > 0 ) - list = _options->at("p2p-exclude-peer-node").as>(); + if( algo == "list" && _options->count("p2p-advertise-peer-endpoint") > 0 ) + list = _options->at("p2p-advertise-peer-endpoint").as>(); + else if( algo == "exclude_list" && _options->count("p2p-exclude-peer-endpoint") > 0 ) + list = _options->at("p2p-exclude-peer-endpoint").as>(); _p2p_network->set_advertise_algorithm( algo, list ); } @@ -1179,27 +1179,28 @@ void application::set_program_options(boost::program_options::options_descriptio "Whether to enable P2P network (default: true). Note: if delayed_node plugin is enabled, " "this option will be ignored and P2P network will always be disabled.") ("p2p-accept-incoming-connections", bpo::value()->implicit_value(true), - "Whether to accept incoming connections (default: true)") + "Whether to accept incoming P2P connections (default: true)") ("p2p-endpoint", bpo::value(), - "Endpoint (local IP address:port) for P2P node to listen on. " + "The endpoint (local IP address:port) on which the node will listen for P2P connections. " "Specify 0.0.0.0 as address to listen on all IP addresses") ("p2p-inbound-endpoint", bpo::value(), - "Endpoint (external IP address:port) that other peers should connect to. " + "The endpoint (external IP address:port) that other P2P peers should connect to. " "If the address is unknown or dynamic, specify 0.0.0.0") ("p2p-connect-to-new-peers", bpo::value()->implicit_value(true), - "Whether to connect to new peers advertised by other peers (default: true)") + "Whether the node will connect to new P2P peers advertised by other peers (default: true)") ("p2p-advertise-peer-algorithm", bpo::value()->implicit_value("all"), - "Determines which peers are advertised in response to address requests from other peers. " + "Determines which P2P peers are advertised in response to address requests from other peers. " "Algorithms: 'all', 'nothing', 'list', exclude_list'. (default: all)") - ("p2p-advertise-peer-node", bpo::value>()->composing(), - "P2P node to advertise, only takes effect when algorithm is 'list' (may specify multiple times)") - ("p2p-exclude-peer-node", bpo::value>()->composing(), - "P2P node to not advertise, only takes effect when algorithm is 'exclude_list' " - "(may specify multiple times)") + ("p2p-advertise-peer-endpoint", bpo::value>()->composing(), + "The endpoint (IP address:port) of the P2P peer to advertise, only takes effect when algorithm " + "is 'list' (may specify multiple times)") + ("p2p-exclude-peer-endpoint", bpo::value>()->composing(), + "The endpoint (IP address:port) of the P2P peer to not advertise, only takes effect when algorithm " + "is 'exclude_list' (may specify multiple times)") ("seed-node,s", bpo::value>()->composing(), - "P2P node to connect to on startup (may specify multiple times)") + "The endpoint (IP address:port) of the P2P peer to connect to on startup (may specify multiple times)") ("seed-nodes", bpo::value()->composing(), - "JSON array of P2P nodes to connect to on startup") + "JSON array of P2P peers to connect to on startup") ("checkpoint,c", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") ("rpc-endpoint", bpo::value()->implicit_value("127.0.0.1:8090"), From b2314263af767c60c409654cf215656751eb2528 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 8 Sep 2022 18:12:18 +0000 Subject: [PATCH 224/338] Update logging --- libraries/net/node.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 8733d6c82b..2132eec296 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1616,7 +1616,7 @@ namespace graphene { namespace net { namespace detail { // Check whether the peer is myself if( _node_id == peer_node_id ) { - dlog( "Received a hello_message from peer ${peer} with id ${id} that is myself or claimed to be myself, " + ilog( "Received a hello_message from peer ${peer} with id ${id} that is myself or claimed to be myself, " "rejection", ("peer", originating_peer->get_remote_endpoint()) ("id", peer_node_id) ); @@ -1729,7 +1729,7 @@ namespace graphene { namespace net { namespace detail { if( peer_connection_direction::outbound == originating_peer->direction && originating_peer->node_public_key == already_connected_peer->node_public_key ) { - dlog( "Verified that endpoint ${ep} is reachable and belongs to peer ${peer} with id ${id}", + ilog( "Verified that endpoint ${ep} is reachable and belongs to peer ${peer} with id ${id}", ("ep", originating_peer->get_remote_endpoint()) ("peer", already_connected_peer->get_remote_endpoint()) ("id", already_connected_peer->node_id) ); @@ -1753,7 +1753,7 @@ namespace graphene { namespace net { namespace detail { || new_inbound_endpoint->get_address().is_public_address() || !old_inbound_endpoint->get_address().is_public_address() ) { - dlog( "Saving verification result for peer ${peer} with id ${id}", + ilog( "Saving verification result for peer ${peer} with id ${id}", ("peer", already_connected_peer->get_remote_endpoint()) ("id", already_connected_peer->node_id) ); already_connected_peer->remote_inbound_endpoint = new_inbound_endpoint; @@ -4085,6 +4085,7 @@ namespace graphene { namespace net { namespace detail { try { + ilog("Connecting to peer ${peer}", ("peer", remote_endpoint)); // blocks until the connection is established and secure connection is negotiated auto bind_to_endpoint = _node_configuration.accept_incoming_connections ? _actual_listening_endpoint : fc::optional(); @@ -4141,7 +4142,7 @@ namespace graphene { namespace net { namespace detail { new_peer->our_state = peer_connection::our_connection_state::just_connected; new_peer->their_state = peer_connection::their_connection_state::just_connected; send_hello_message(new_peer); - dlog("Sent \"hello\" to peer ${peer}", ("peer", new_peer->get_remote_endpoint())); + ilog("Sent \"hello\" to peer ${peer}", ("peer", new_peer->get_remote_endpoint())); } } From b211dce4e46b4e936b7a0f3a32915a85eec6c190 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 8 Sep 2022 22:21:55 +0000 Subject: [PATCH 225/338] Update logging --- libraries/net/node.cpp | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 2132eec296..99be164193 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -470,12 +470,12 @@ namespace graphene { namespace net { namespace detail { try { - dlog("Starting an iteration of update_seed_nodes loop."); + ilog("Starting an iteration of update_seed_nodes loop."); for( const std::string& endpoint_string : _seed_nodes ) { resolve_seed_node_and_add( endpoint_string ); } - dlog("Done an iteration of update_seed_nodes loop."); + ilog("Done an iteration of update_seed_nodes loop."); } catch (const fc::canceled_exception&) { @@ -783,7 +783,7 @@ namespace graphene { namespace net { namespace detail { for (const peer_connection_ptr& peer : _active_connections) { // only advertise to peers who are in sync with us - idump((peer->peer_needs_sync_items_from_us)); + //idump((peer->peer_needs_sync_items_from_us)); // for debug if( !peer->peer_needs_sync_items_from_us ) { std::map > items_to_advertise_by_type; @@ -791,7 +791,7 @@ namespace graphene { namespace net { namespace detail { // or anything it has advertised to us // group the items we need to send by type, because we'll need to send one inventory message per type size_t total_items_to_send = 0; - idump((inventory_to_advertise)); + //idump((inventory_to_advertise)); // for debug for (const item_id& item_to_advertise : inventory_to_advertise) { auto adv_to_peer = peer->inventory_advertised_to_peer.find(item_to_advertise); @@ -812,10 +812,10 @@ namespace graphene { namespace net { namespace detail { } else { - if (adv_to_peer != peer->inventory_advertised_to_peer.end() ) - idump( (*adv_to_peer) ); - if (adv_to_us != peer->inventory_peer_advertised_to_us.end() ) - idump( (*adv_to_us) ); + //if (adv_to_peer != peer->inventory_advertised_to_peer.end() ) // for debug + // idump( (*adv_to_peer) ); // for debug + //if (adv_to_us != peer->inventory_peer_advertised_to_us.end() ) // for debug + // idump( (*adv_to_us) ); // for debug } } dlog("advertising ${count} new item(s) of ${types} type(s) to peer ${endpoint}", @@ -1772,7 +1772,7 @@ namespace graphene { namespace net { namespace detail { "I'm already connected to you" ); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; originating_peer->send_message( message(connection_rejected) ); - dlog("Received a hello_message from peer ${peer} that I'm already connected to (with id ${id}), rejection", + ilog("Received a hello_message from peer ${peer} that I'm already connected to (with id ${id}), rejection", ("peer", originating_peer->get_remote_endpoint()) ("id", originating_peer->node_id)); // If already connected, we disconnect @@ -1839,14 +1839,14 @@ namespace graphene { namespace net { namespace detail { "not accepting any more incoming connections"); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; originating_peer->send_message(message(connection_rejected)); - dlog("Received a hello_message from peer ${peer}, but I'm not accepting any more connections, rejection", + ilog("Received a hello_message from peer ${peer}, but I'm not accepting any more connections, rejection", ("peer", originating_peer->get_remote_endpoint())); } else { originating_peer->their_state = peer_connection::their_connection_state::connection_accepted; originating_peer->send_message(message(connection_accepted_message())); - dlog("Received a hello_message from peer ${peer}, sending reply to accept connection", + ilog("Received a hello_message from peer ${peer}, sending reply to accept connection", ("peer", originating_peer->get_remote_endpoint())); } } @@ -1866,7 +1866,7 @@ namespace graphene { namespace net { namespace detail { return; } - dlog( "Received a connection_accepted in response to my \"hello\" from ${peer}", + ilog( "Received a connection_accepted in response to my \"hello\" from ${peer}", ("peer", originating_peer->get_remote_endpoint()) ); originating_peer->negotiation_status = peer_connection::connection_negotiation_status::peer_connection_accepted; originating_peer->our_state = peer_connection::our_connection_state::connection_accepted; @@ -2122,7 +2122,7 @@ namespace graphene { namespace net { namespace detail { bool disconnect_from_inhibited_peer = false; // if our client doesn't have any items after the item the peer requested, it will send back // a list containing the last item the peer requested - idump((reply_message)(fetch_blockchain_item_ids_message_received.blockchain_synopsis)); + //idump((reply_message)(fetch_blockchain_item_ids_message_received.blockchain_synopsis)); // for debug if( reply_message.item_hashes_available.empty() ) originating_peer->peer_needs_sync_items_from_us = false; /* I have no items in my blockchain */ else if( !fetch_blockchain_item_ids_message_received.blockchain_synopsis.empty() && @@ -2930,7 +2930,7 @@ namespace graphene { namespace net { namespace detail { { std::vector contained_transaction_msg_ids; _delegate->handle_block(block_message_to_send, true, contained_transaction_msg_ids); - ilog("Successfully pushed sync block ${num} (id:${id})", + dlog("Successfully pushed sync block ${num} (id:${id})", ("num", block_message_to_send.block.block_num()) ("id", block_message_to_send.block_id)); _most_recent_blocks_accepted.push_back(block_message_to_send.block_id); @@ -3296,7 +3296,7 @@ namespace graphene { namespace net { namespace detail { std::vector contained_transaction_msg_ids; _delegate->handle_block(block_message_to_process, false, contained_transaction_msg_ids); message_validated_time = fc::time_point::now(); - ilog("Successfully pushed block ${num} (id:${id})", + dlog("Successfully pushed block ${num} (id:${id})", ("num", block_message_to_process.block.block_num()) ("id", block_message_to_process.block_id)); _most_recent_blocks_accepted.push_back(block_message_to_process.block_id); From 67bbdea4b05438ad4543edc9153410f5ee1493a9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 8 Sep 2022 23:45:29 +0000 Subject: [PATCH 226/338] Save remote endpoint of inbound peer to peer db --- libraries/net/node.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 99be164193..3c2a22e924 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1813,9 +1813,13 @@ namespace graphene { namespace net { namespace detail { fc::flat_set endpoints_to_save; endpoints_to_save.insert( *originating_peer->get_endpoint_for_connecting() ); - // Second, we add the address we see, with the inbound port the peer told us. + // Second, we add the address and port we see. // It might be the same as above, but that's OK. fc::ip::endpoint peers_actual_outbound_endpoint = *originating_peer->get_remote_endpoint(); + endpoints_to_save.insert( peers_actual_outbound_endpoint ); + + // Third, we add the address we see, with the inbound port the peer told us. + // It might be the same as above, but that's OK. endpoints_to_save.insert( fc::ip::endpoint( peers_actual_outbound_endpoint.get_address(), originating_peer->inbound_port ) ); From 9a7f723a5d7b22835d1245c14b0ac52759bb0046 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 8 Sep 2022 23:56:24 +0000 Subject: [PATCH 227/338] Update logging --- libraries/net/node.cpp | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 3c2a22e924..b0bc5d37c2 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1802,7 +1802,8 @@ namespace graphene { namespace net { namespace detail { } else if( 0 == originating_peer->inbound_port ) { - dlog( "peer did not give an inbound port so I'm treating them as if they are firewalled." ); + ilog( "peer ${peer} did not give an inbound port so I'm treating them as if they are firewalled.", + ("peer", originating_peer->get_remote_endpoint()) ); originating_peer->is_firewalled = firewalled_state::firewalled; } else @@ -1823,6 +1824,9 @@ namespace graphene { namespace net { namespace detail { endpoints_to_save.insert( fc::ip::endpoint( peers_actual_outbound_endpoint.get_address(), originating_peer->inbound_port ) ); + ilog( "Saving potential endpoints to the peer database for peer ${peer}: ${endpoints}", + ("peer", originating_peer->get_remote_endpoint()) ("endpoints", endpoints_to_save) ); + for( const auto& ep : endpoints_to_save ) { // add to the peer database @@ -4590,20 +4594,27 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); ilog( "----------------- PEER STATUS UPDATE --------------------" ); - ilog( " number of peers: ${active} active, ${handshaking}, ${closing} closing. attempting to maintain ${desired} - ${maximum} peers", - ( "active", _active_connections.size() )("handshaking", _handshaking_connections.size() )("closing",_closing_connections.size() ) + ilog( " number of peers: ${active} active, ${handshaking} handshaking, ${closing} closing. " + " attempting to maintain ${desired} - ${maximum} peers", + ( "active", _active_connections.size() )("handshaking", _handshaking_connections.size() ) + ( "closing", _closing_connections.size() ) ( "desired", _desired_number_of_connections )("maximum", _maximum_number_of_connections ) ); { fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) { - ilog( " active peer ${endpoint} peer_is_in_sync_with_us:${in_sync_with_us} we_are_in_sync_with_peer:${in_sync_with_them}", + ilog( " active peer ${endpoint} [${direction}] peer_is_in_sync_with_us:${in_sync_with_us} " + "we_are_in_sync_with_peer:${in_sync_with_them}", ( "endpoint", peer->get_remote_endpoint() ) - ( "in_sync_with_us", !peer->peer_needs_sync_items_from_us )("in_sync_with_them", !peer->we_need_sync_items_from_peer ) ); + ( "direction", peer->direction ) + ( "in_sync_with_us", !peer->peer_needs_sync_items_from_us ) + ( "in_sync_with_them", !peer->we_need_sync_items_from_peer ) ); if( peer->we_need_sync_items_from_peer ) - ilog( " above peer has ${count} sync items we might need", ("count", peer->ids_of_items_to_get.size() ) ); + ilog( " above peer has ${count} sync items we might need", + ("count", peer->ids_of_items_to_get.size() ) ); if (peer->inhibit_fetching_sync_blocks) - ilog( " we are not fetching sync blocks from the above peer (inhibit_fetching_sync_blocks == true)" ); + ilog( " we are not fetching sync blocks from the above peer " + "(inhibit_fetching_sync_blocks == true)" ); } } @@ -4611,8 +4622,10 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_handshaking_connections.get_mutex()); for( const peer_connection_ptr& peer : _handshaking_connections ) { - ilog( " handshaking peer ${endpoint} in state ours(${our_state}) theirs(${their_state})", - ( "endpoint", peer->get_remote_endpoint() )("our_state", peer->our_state )("their_state", peer->their_state ) ); + ilog( " handshaking peer ${endpoint} [${direction}] in state ours(${our_state}) theirs(${their_state})", + ( "endpoint", peer->get_remote_endpoint() ) + ( "direction", peer->direction ) + ( "our_state", peer->our_state )( "their_state", peer->their_state ) ); } } ilog( "--------- MEMORY USAGE ------------" ); From e0721f9aaee90a1c4a1ac7c84697175eb84d85a7 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 9 Sep 2022 00:33:54 +0000 Subject: [PATCH 228/338] Update logging --- libraries/net/node.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index b0bc5d37c2..695d4ae2c9 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -812,10 +812,12 @@ namespace graphene { namespace net { namespace detail { } else { - //if (adv_to_peer != peer->inventory_advertised_to_peer.end() ) // for debug - // idump( (*adv_to_peer) ); // for debug - //if (adv_to_us != peer->inventory_peer_advertised_to_us.end() ) // for debug - // idump( (*adv_to_us) ); // for debug + if( adv_to_peer != peer->inventory_advertised_to_peer.end() ) + dlog( "adv_to_peer != peer->inventory_advertised_to_peer.end() : ${adv_to_peer}", + ("adv_to_peer", *adv_to_peer) ); + if( adv_to_us != peer->inventory_peer_advertised_to_us.end() ) + dlog( "adv_to_us != peer->inventory_peer_advertised_to_us.end() : ${adv_to_us}", + ("adv_to_us", *adv_to_us) ); } } dlog("advertising ${count} new item(s) of ${types} type(s) to peer ${endpoint}", From 75686efbc1e104ee7b019a0530b2b6088068cf07 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 9 Sep 2022 06:29:53 +0000 Subject: [PATCH 229/338] Update logging --- libraries/net/node.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 695d4ae2c9..f4f98892a3 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1755,7 +1755,8 @@ namespace graphene { namespace net { namespace detail { || new_inbound_endpoint->get_address().is_public_address() || !old_inbound_endpoint->get_address().is_public_address() ) { - ilog( "Saving verification result for peer ${peer} with id ${id}", + ilog( "Saving verification result ${ep} for peer ${peer} with id ${id}", + ("ep", new_inbound_endpoint) ("peer", already_connected_peer->get_remote_endpoint()) ("id", already_connected_peer->node_id) ); already_connected_peer->remote_inbound_endpoint = new_inbound_endpoint; From 984b1b62c1eea1fa7a4a9d98c64f26823ec5f4a3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 12 Sep 2022 00:03:28 +0000 Subject: [PATCH 230/338] Update logging --- libraries/net/node.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index f4f98892a3..9dedaf50f2 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4606,10 +4606,12 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) { - ilog( " active peer ${endpoint} [${direction}] peer_is_in_sync_with_us:${in_sync_with_us} " - "we_are_in_sync_with_peer:${in_sync_with_them}", + ilog( " active peer ${endpoint} [${direction}] (${inbound_ep} ${verified}) " + "peer_is_in_sync_with_us:${in_sync_with_us} we_are_in_sync_with_peer:${in_sync_with_them}", ( "endpoint", peer->get_remote_endpoint() ) ( "direction", peer->direction ) + ( "inbound_ep", peer->get_endpoint_for_connecting() ) + ( "verified", peer->inbound_endpoint_verified ? "verified" : "not_verified" ) ( "in_sync_with_us", !peer->peer_needs_sync_items_from_us ) ( "in_sync_with_them", !peer->we_need_sync_items_from_peer ) ); if( peer->we_need_sync_items_from_peer ) From 62360438e5ec301679484ecc6f719bcc363c6fb4 Mon Sep 17 00:00:00 2001 From: Abit Date: Mon, 12 Sep 2022 00:13:26 +0200 Subject: [PATCH 231/338] Add logging about sending bulk data in replay mode --- libraries/plugins/es_objects/es_objects.cpp | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index f7a2f4181e..b47f0ef600 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -89,7 +89,7 @@ class es_objects_plugin_impl uint32_t start_es_after_block = 0; bool sync_db_on_startup = false; - + void init(const boost::program_options::variables_map& options); }; @@ -122,6 +122,9 @@ class es_objects_plugin_impl uint32_t limit_documents = _options.bulk_replay; + uint64_t docs_sent_batch = 0; + uint64_t docs_sent_total = 0; + std::unique_ptr es; vector bulk_lines; @@ -167,6 +170,8 @@ struct data_loader [this, &opt](const graphene::db::object &o) { my->prepareTemplate( static_cast(o), opt ); }); + my->send_bulk_if_ready(true); + my->docs_sent_batch = 0; } }; @@ -328,6 +333,20 @@ void es_objects_plugin_impl::send_bulk_if_ready( bool force ) return; if( !force && bulk_lines.size() < limit_documents ) return; + constexpr uint32_t log_count_threshold = 20000; // lines + constexpr uint32_t log_time_threshold = 3600; // seconds + static uint64_t next_log_count = log_count_threshold; + static fc::time_point next_log_time = fc::time_point::now() + fc::seconds(log_time_threshold); + docs_sent_batch += bulk_lines.size(); + docs_sent_total += bulk_lines.size(); + bool log_by_next = ( docs_sent_total >= next_log_count || fc::time_point::now() >= next_log_time ); + if( log_by_next || limit_documents == _options.bulk_replay || force ) + { + ilog( "Sending ${n} lines of bulk data to ElasticSearch, this batch ${b}, total ${t}", + ("n",bulk_lines.size())("b",docs_sent_batch)("t",docs_sent_total) ); + next_log_count = docs_sent_total + log_count_threshold; + next_log_time = fc::time_point::now() + fc::seconds(log_time_threshold); + } // send data to elasticsearch when being forced or bulk is too large if( !es->send_bulk( bulk_lines ) ) { From 90e6f707b318e9014e625be38d29827991183104 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 12 Sep 2022 05:47:12 +0000 Subject: [PATCH 232/338] Remove trailing whitespaces --- libraries/plugins/es_objects/es_objects.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index b47f0ef600..88b9abbdf6 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -89,7 +89,7 @@ class es_objects_plugin_impl uint32_t start_es_after_block = 0; bool sync_db_on_startup = false; - + void init(const boost::program_options::variables_map& options); }; From 1eddc9f1c9f5609b3a2e039290e7caf234597315 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 12 Sep 2022 16:46:25 +0000 Subject: [PATCH 233/338] Extend get_order_book API to return more info --- libraries/app/api_objects.cpp | 24 +++++++++++++++++++ libraries/app/database_api.cpp | 23 ++++++++---------- .../app/include/graphene/app/api_objects.hpp | 17 ++++++++++++- 3 files changed, 50 insertions(+), 14 deletions(-) diff --git a/libraries/app/api_objects.cpp b/libraries/app/api_objects.cpp index a9be85244c..49d911189c 100644 --- a/libraries/app/api_objects.cpp +++ b/libraries/app/api_objects.cpp @@ -27,6 +27,30 @@ namespace graphene { namespace app { +order::order( const string& _price, + const string& _quote, + const string& _base, + const limit_order_id_type& _id, + const account_id_type& _oid, + const string& _oname, + const time_point_sec& _exp ) +: price( _price ), + quote( _quote ), + base( _base ), + id( _id ), + owner_id( _oid ), + owner_name( _oname ), + expiration( _exp ) +{ + // Nothing to do +} + +order_book::order_book( const string& _base, const string& _quote ) +: base( _base ), quote( _quote ) +{ + // Do nothing else +} + market_ticker::market_ticker(const market_ticker_object& mto, const fc::time_point_sec& now, const asset_object& asset_base, diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index e474075fef..6aab692409 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -1417,9 +1417,7 @@ order_book database_api_impl::get_order_book( const string& base, const string& "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); - order_book result; - result.base = base; - result.quote = quote; + order_book result( base, quote ); auto assets = lookup_asset_symbols( {base, quote} ); FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); @@ -1431,25 +1429,24 @@ order_book database_api_impl::get_order_book( const string& base, const string& for( const auto& o : orders ) { + auto order_price = price_to_string( o.sell_price, *assets[0], *assets[1] ); if( o.sell_price.base.asset_id == base_id ) { - order ord; - ord.price = price_to_string( o.sell_price, *assets[0], *assets[1] ); - ord.quote = assets[1]->amount_to_string( share_type( fc::uint128_t( o.for_sale.value ) + auto quote_amt = assets[1]->amount_to_string( share_type( fc::uint128_t( o.for_sale.value ) * o.sell_price.quote.amount.value / o.sell_price.base.amount.value ) ); - ord.base = assets[0]->amount_to_string( o.for_sale ); - result.bids.push_back( ord ); + auto base_amt = assets[0]->amount_to_string( o.for_sale ); + result.bids.emplace_back( order_price, quote_amt, base_amt, o.id, + o.seller, o.seller(_db).name, o.expiration ); } else { - order ord; - ord.price = price_to_string( o.sell_price, *assets[0], *assets[1] ); - ord.quote = assets[1]->amount_to_string( o.for_sale ); - ord.base = assets[0]->amount_to_string( share_type( fc::uint128_t( o.for_sale.value ) + auto quote_amt = assets[1]->amount_to_string( o.for_sale ); + auto base_amt = assets[0]->amount_to_string( share_type( fc::uint128_t( o.for_sale.value ) * o.sell_price.quote.amount.value / o.sell_price.base.amount.value ) ); - result.asks.push_back( ord ); + result.asks.emplace_back( order_price, quote_amt, base_amt, o.id, + o.seller, o.seller(_db).name, o.expiration ); } } diff --git a/libraries/app/include/graphene/app/api_objects.hpp b/libraries/app/include/graphene/app/api_objects.hpp index 328fd0b946..6e77291fb8 100644 --- a/libraries/app/include/graphene/app/api_objects.hpp +++ b/libraries/app/include/graphene/app/api_objects.hpp @@ -84,6 +84,19 @@ namespace graphene { namespace app { string price; string quote; string base; + limit_order_id_type id; + account_id_type owner_id; + string owner_name; + time_point_sec expiration; + + order() = default; + order( const string& _price, + const string& _quote, + const string& _base, + const limit_order_id_type& _id, + const account_id_type& _oid, + const string& _oname, + const time_point_sec& _exp ); }; struct order_book @@ -92,6 +105,8 @@ namespace graphene { namespace app { string quote; vector< order > bids; vector< order > asks; + order_book() = default; + order_book( const string& _base, const string& _quote ); }; struct market_ticker @@ -191,7 +206,7 @@ FC_REFLECT( graphene::app::full_account, (more_data_available) ) -FC_REFLECT( graphene::app::order, (price)(quote)(base) ) +FC_REFLECT( graphene::app::order, (price)(quote)(base)(id)(owner_id)(owner_name)(expiration) ) FC_REFLECT( graphene::app::order_book, (base)(quote)(bids)(asks) ) FC_REFLECT( graphene::app::market_ticker, (time)(base)(quote)(latest)(lowest_ask)(lowest_ask_base_size)(lowest_ask_quote_size) From 4ab7acfac81adda4fc71b8e622fb36053a6cedea Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 12 Sep 2022 16:59:24 +0000 Subject: [PATCH 234/338] Update tests for extended get_order_book API --- tests/tests/api_limit_tests.cpp | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/tests/tests/api_limit_tests.cpp b/tests/tests/api_limit_tests.cpp index c2bc8db184..3dd518cf9c 100644 --- a/tests/tests/api_limit_tests.cpp +++ b/tests/tests/api_limit_tests.cpp @@ -215,7 +215,7 @@ BOOST_AUTO_TEST_CASE( api_limit_get_settle_orders ){ } } BOOST_AUTO_TEST_CASE( api_limit_get_order_book ){ - try{ + try { graphene::app::database_api db_api( db, &( app.get_options() )); auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); @@ -223,21 +223,26 @@ BOOST_AUTO_TEST_CASE( api_limit_get_order_book ){ account_id_type dan_id = create_account("dan", dan_private_key.get_public_key()).id; transfer(account_id_type(), nathan_id, asset(100)); transfer(account_id_type(), dan_id, asset(100)); - asset_id_type bitusd_id = create_bitasset( - "USDBIT", nathan_id, 100, disable_force_settle).id; - asset_id_type bitdan_id = create_bitasset( - "DANBIT", dan_id, 100, disable_force_settle).id; + asset_id_type bitusd_id = create_user_issued_asset( "USDBIT", nathan_id(db), charge_market_fee).id; + asset_id_type bitdan_id = create_user_issued_asset( "DANBIT", dan_id(db), charge_market_fee).id; + issue_uia( nathan_id, asset(100, bitusd_id) ); + issue_uia( dan_id, asset(100, bitdan_id) ); + create_sell_order( nathan_id, asset(100, bitusd_id), asset(10000, bitdan_id) ); + create_sell_order( dan_id, asset(100, bitdan_id), asset(10000, bitusd_id) ); generate_block(); fc::usleep(fc::milliseconds(100)); GRAPHENE_CHECK_THROW(db_api.get_order_book(std::string(static_cast(bitusd_id)), std::string(static_cast(bitdan_id)),89), fc::exception); graphene::app::order_book result =db_api.get_order_book(std::string( static_cast(bitusd_id)), std::string(static_cast(bitdan_id)),78); - BOOST_REQUIRE_EQUAL( result.bids.size(), 0u); - }catch (fc::exception& e) { + BOOST_REQUIRE_EQUAL( result.bids.size(), 1u ); + BOOST_CHECK( result.bids.front().owner_name == "nathan" ); + BOOST_REQUIRE_EQUAL( result.asks.size(), 1u ); + BOOST_CHECK( result.asks.front().owner_name == "dan" ); + } catch (fc::exception& e) { edump((e.to_detail_string())); throw; - } + } } BOOST_AUTO_TEST_CASE( api_limit_lookup_accounts ) { From c4e1cdd03043c3f210fab029ef5796068b420e23 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 13 Sep 2022 22:09:55 +0000 Subject: [PATCH 235/338] Add creation block num and time for accnts, assets --- libraries/chain/account_evaluator.cpp | 6 ++++- libraries/chain/account_object.cpp | 1 + libraries/chain/asset_evaluator.cpp | 4 ++- libraries/chain/db_block.cpp | 2 ++ libraries/chain/db_genesis.cpp | 27 ++++++++++++++++--- .../include/graphene/chain/account_object.hpp | 5 ++++ .../include/graphene/chain/asset_object.hpp | 8 +++++- .../chain/include/graphene/chain/config.hpp | 2 +- .../chain/include/graphene/chain/database.hpp | 3 +++ 9 files changed, 51 insertions(+), 7 deletions(-) diff --git a/libraries/chain/account_evaluator.cpp b/libraries/chain/account_evaluator.cpp index b9070c91a9..7fc5dda0ec 100644 --- a/libraries/chain/account_evaluator.cpp +++ b/libraries/chain/account_evaluator.cpp @@ -177,7 +177,8 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio const auto& global_properties = d.get_global_properties(); - const auto& new_acnt_object = d.create( [&o,&d,&global_properties,referrer_percent]( account_object& obj ) + const auto& new_acnt_object = d.create( [&o,&d,&global_properties,referrer_percent] + ( account_object& obj ) { obj.registrar = o.registrar; obj.referrer = o.referrer; @@ -208,6 +209,9 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio obj.allowed_assets = o.extensions.value.buyback_options->markets; obj.allowed_assets->emplace( o.extensions.value.buyback_options->asset_to_buy ); } + + obj.creation_block_num = d._current_block_num; + obj.creation_time = d._current_block_time; }); const auto& dynamic_properties = d.get_dynamic_global_properties(); diff --git a/libraries/chain/account_object.cpp b/libraries/chain/account_object.cpp index 627de8e899..0d98b774cd 100644 --- a/libraries/chain/account_object.cpp +++ b/libraries/chain/account_object.cpp @@ -312,6 +312,7 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::account_object, (owner_special_authority)(active_special_authority) (top_n_control_flags) (allowed_assets) + (creation_block_num)(creation_time) ) FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::account_balance_object, diff --git a/libraries/chain/asset_evaluator.cpp b/libraries/chain/asset_evaluator.cpp index cca9f351a4..e17b4b8fdf 100644 --- a/libraries/chain/asset_evaluator.cpp +++ b/libraries/chain/asset_evaluator.cpp @@ -310,7 +310,7 @@ object_id_type asset_create_evaluator::do_apply( const asset_create_operation& o }).id; const asset_object& new_asset = - d.create( [&op,next_asset_id,&dyn_asset,bit_asset_id]( asset_object& a ) { + d.create( [&op,next_asset_id,&dyn_asset,bit_asset_id,&d]( asset_object& a ) { a.issuer = op.issuer; a.symbol = op.symbol; a.precision = op.precision; @@ -322,6 +322,8 @@ object_id_type asset_create_evaluator::do_apply( const asset_create_operation& o a.dynamic_asset_data_id = dyn_asset.id; if( op.bitasset_opts.valid() ) a.bitasset_data_id = bit_asset_id; + a.creation_block_num = d._current_block_num; + a.creation_time = d._current_block_time; }); FC_ASSERT( new_asset.id == next_asset_id, "Unexpected object database error, object id mismatch" ); diff --git a/libraries/chain/db_block.cpp b/libraries/chain/db_block.cpp index 7dab0b27d6..2a9ebfb22d 100644 --- a/libraries/chain/db_block.cpp +++ b/libraries/chain/db_block.cpp @@ -611,6 +611,8 @@ void database::_apply_block( const signed_block& next_block ) _current_block_num = next_block_num; _current_trx_in_block = 0; + _current_block_time = next_block.timestamp; + _issue_453_affected_assets.clear(); signed_block processed_block( next_block ); // make a copy diff --git a/libraries/chain/db_genesis.cpp b/libraries/chain/db_genesis.cpp index b3267fbffd..4624c16d81 100644 --- a/libraries/chain/db_genesis.cpp +++ b/libraries/chain/db_genesis.cpp @@ -70,6 +70,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) transaction_evaluation_state genesis_eval_state(this); + _current_block_time = genesis_state.initial_timestamp; + // Create blockchain accounts fc::ecc::private_key null_private_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key"))); create([](account_balance_object& b) { @@ -88,6 +90,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) s.name = n.name; s.core_in_balance = GRAPHENE_MAX_SHARE_SUPPLY; }).id; + n.creation_block_num = 0; + n.creation_time = _current_block_time; }); FC_ASSERT(committee_account.get_id() == GRAPHENE_COMMITTEE_ACCOUNT); FC_ASSERT(create([this](account_object& a) { @@ -104,6 +108,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.membership_expiration_date = time_point_sec::maximum(); a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }).get_id() == GRAPHENE_WITNESS_ACCOUNT); FC_ASSERT(create([this](account_object& a) { a.name = "relaxed-committee-account"; @@ -119,6 +125,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.membership_expiration_date = time_point_sec::maximum(); a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }).get_id() == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT); // The same data set is assigned to more than one account auto init_account_data_as_null = [this](account_object& a) { @@ -134,6 +142,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.membership_expiration_date = time_point_sec::maximum(); a.network_fee_percentage = 0; a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }; FC_ASSERT(create([&init_account_data_as_null](account_object& a) { a.name = "null-account"; @@ -153,6 +163,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.membership_expiration_date = time_point_sec::maximum(); a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }).get_id() == GRAPHENE_TEMP_ACCOUNT); FC_ASSERT(create([&init_account_data_as_null](account_object& a) { a.name = "proxy-to-self"; @@ -179,6 +191,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.membership_expiration_date = time_point_sec::maximum(); a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }); FC_ASSERT( acct.get_id() == account_id_type(id) ); remove( acct.statistics(*this) ); @@ -191,7 +205,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.current_supply = GRAPHENE_MAX_SHARE_SUPPLY; }); const asset_object& core_asset = - create( [&genesis_state,&core_dyn_asset]( asset_object& a ) { + create( [&genesis_state,&core_dyn_asset,this]( asset_object& a ) { a.symbol = GRAPHENE_SYMBOL; a.options.max_supply = genesis_state.max_core_supply; a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS; @@ -203,6 +217,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.options.core_exchange_rate.quote.amount = 1; a.options.core_exchange_rate.quote.asset_id = asset_id_type(0); a.dynamic_asset_data_id = core_dyn_asset.id; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }); FC_ASSERT( core_dyn_asset.id == asset_dynamic_data_id_type() ); FC_ASSERT( asset_id_type(core_asset.id) == asset().asset_id ); @@ -219,7 +235,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) create([](asset_dynamic_data_object& a) { a.current_supply = 0; }); - const asset_object& asset_obj = create( [id,&dyn_asset]( asset_object& a ) { + const asset_object& asset_obj = create( [id,&dyn_asset,this]( asset_object& a ) { a.symbol = "SPECIAL" + std::to_string( id ); a.options.max_supply = 0; a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS; @@ -231,6 +247,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.options.core_exchange_rate.quote.amount = 1; a.options.core_exchange_rate.quote.asset_id = asset_id_type(0); a.dynamic_asset_data_id = dyn_asset.id; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }); FC_ASSERT( asset_obj.get_id() == asset_id_type(id) ); remove( dyn_asset ); @@ -378,7 +396,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) total_supplies[ new_asset_id ] += asst.accumulated_fees; - create([&asst,&get_account_id,&dynamic_data_id,&bitasset_data_id](asset_object& a) { + create([&asst,&get_account_id,&dynamic_data_id,&bitasset_data_id,this](asset_object& a) + { a.symbol = asst.symbol; a.options.description = asst.description; a.precision = asst.precision; @@ -390,6 +409,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) : DEFAULT_UIA_ASSET_ISSUER_PERMISSION ); a.dynamic_asset_data_id = dynamic_data_id; a.bitasset_data_id = bitasset_data_id; + a.creation_block_num = 0; + a.creation_time = _current_block_time; }); } diff --git a/libraries/chain/include/graphene/chain/account_object.hpp b/libraries/chain/include/graphene/chain/account_object.hpp index 8b1982cf49..66aefa2dcd 100644 --- a/libraries/chain/include/graphene/chain/account_object.hpp +++ b/libraries/chain/include/graphene/chain/account_object.hpp @@ -287,6 +287,11 @@ namespace graphene { namespace chain { */ optional< flat_set > allowed_assets; + /// The block number when the account was created + uint32_t creation_block_num = 0; + /// The time when the account was created + time_point_sec creation_time; + bool has_special_authority()const { return (!owner_special_authority.is_type< no_special_authority >()) diff --git a/libraries/chain/include/graphene/chain/asset_object.hpp b/libraries/chain/include/graphene/chain/asset_object.hpp index fcd675a829..3dd11bb344 100644 --- a/libraries/chain/include/graphene/chain/asset_object.hpp +++ b/libraries/chain/include/graphene/chain/asset_object.hpp @@ -141,7 +141,6 @@ namespace graphene { namespace chain { asset_options options; - /// Current supply, fee pool, and collected fees are stored in a separate object as they change frequently. asset_dynamic_data_id_type dynamic_asset_data_id; /// Extra data associated with BitAssets. This field is non-null if and only if is_market_issued() returns true @@ -152,6 +151,11 @@ namespace graphene { namespace chain { /// The ID of the liquidity pool if the asset is the share asset of a liquidity pool optional for_liquidity_pool; + /// The block number when the asset object was created + uint32_t creation_block_num = 0; + /// The time when the asset object was created + time_point_sec creation_time; + asset_id_type get_id()const { return id; } void validate()const @@ -486,6 +490,8 @@ FC_REFLECT_DERIVED( graphene::chain::asset_object, (graphene::db::object), (bitasset_data_id) (buyback_account) (for_liquidity_pool) + (creation_block_num) + (creation_time) ) FC_REFLECT_TYPENAME( graphene::chain::asset_bitasset_data_object ) diff --git a/libraries/chain/include/graphene/chain/config.hpp b/libraries/chain/include/graphene/chain/config.hpp index 99b7da3599..598df91053 100644 --- a/libraries/chain/include/graphene/chain/config.hpp +++ b/libraries/chain/include/graphene/chain/config.hpp @@ -32,7 +32,7 @@ #define GRAPHENE_MAX_NESTED_OBJECTS (200) -const std::string GRAPHENE_CURRENT_DB_VERSION = "20210806"; +const std::string GRAPHENE_CURRENT_DB_VERSION = "20220913"; #define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4 #define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3 diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index f51574dcb0..cbcbadb61a 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -768,7 +768,10 @@ namespace graphene { namespace chain { */ vector > _applied_ops; + public: + fc::time_point_sec _current_block_time; uint32_t _current_block_num = 0; + private: uint16_t _current_trx_in_block = 0; uint16_t _current_op_in_trx = 0; uint32_t _current_virtual_op = 0; From 0dc888b3b2d3b3cd89fb095dd29ece3d2ef2be32 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 14 Sep 2022 23:19:55 +0000 Subject: [PATCH 236/338] Add tests for account and asset creation time --- tests/common/database_fixture.hpp | 9 +++++++++ tests/tests/operation_tests.cpp | 14 ++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/tests/common/database_fixture.hpp b/tests/common/database_fixture.hpp index 9ab8ec757b..bf88a7bc83 100644 --- a/tests/common/database_fixture.hpp +++ b/tests/common/database_fixture.hpp @@ -585,6 +585,15 @@ struct database_fixture_init : database_fixture_base { asset_id_type mpa1_id(1); BOOST_REQUIRE( mpa1_id(db).is_market_issued() ); BOOST_CHECK( mpa1_id(db).bitasset_data(db).asset_id == mpa1_id ); + + BOOST_CHECK_EQUAL( account_id_type()(db).creation_block_num, 0 ); + BOOST_CHECK( account_id_type()(db).creation_time == genesis_state.initial_timestamp ); + + BOOST_CHECK_EQUAL( asset_id_type()(db).creation_block_num, 0 ); + BOOST_CHECK( asset_id_type()(db).creation_time == genesis_state.initial_timestamp ); + + BOOST_CHECK_EQUAL( mpa1_id(db).creation_block_num, 0 ); + BOOST_CHECK( mpa1_id(db).creation_time == genesis_state.initial_timestamp ); } static void init( database_fixture_init& fixture ) diff --git a/tests/tests/operation_tests.cpp b/tests/tests/operation_tests.cpp index 399da7b374..81f678d3c2 100644 --- a/tests/tests/operation_tests.cpp +++ b/tests/tests/operation_tests.cpp @@ -1974,6 +1974,14 @@ BOOST_AUTO_TEST_CASE( create_account_test ) const account_statistics_object& statistics = nathan_account.statistics(db); BOOST_CHECK(statistics.id.space() == implementation_ids); BOOST_CHECK(statistics.id.type() == impl_account_statistics_object_type); + + account_id_type nathan_id = nathan_account.id; + + generate_block(); + + BOOST_CHECK_EQUAL( nathan_id(db).creation_block_num, db.head_block_num() ); + BOOST_CHECK( nathan_id(db).creation_time == db.head_block_time() ); + } catch (fc::exception& e) { edump((e.to_detail_string())); throw; @@ -2216,6 +2224,12 @@ BOOST_AUTO_TEST_CASE( create_uia ) REQUIRE_THROW_WITH_VALUE(op, symbol, "ABCDEFGHIJKLMNOPQRSTUVWXYZ"); REQUIRE_THROW_WITH_VALUE(op, common_options.core_exchange_rate, price(asset(-100), asset(1))); REQUIRE_THROW_WITH_VALUE(op, common_options.core_exchange_rate, price(asset(100),asset(-1))); + + generate_block(); + + BOOST_CHECK_EQUAL( test_asset_id(db).creation_block_num, db.head_block_num() ); + BOOST_CHECK( test_asset_id(db).creation_time == db.head_block_time() ); + } catch(fc::exception& e) { edump((e.to_detail_string())); throw; From 51ced007f43b862e842908a4d6d6601ea12549bc Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 01:05:12 +0000 Subject: [PATCH 237/338] Verify inbound endpoints as soon as possible --- .../include/graphene/net/peer_connection.hpp | 4 ++- libraries/net/node.cpp | 32 +++++++++++++++++-- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 144878bb6f..25bd9a89a0 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -195,12 +195,14 @@ namespace graphene { namespace net fc::ip::address inbound_address; uint16_t inbound_port = 0; uint16_t outbound_port = 0; - /// The inbound endpoint of the remote peer + /// The inbound endpoint of the remote peer (our best guess) fc::optional remote_inbound_endpoint; /// Whether the inbound endpoint of the remote peer is verified bool inbound_endpoint_verified = false; /// Some nodes may be listening on multiple endpoints fc::flat_set additional_inbound_endpoints; + /// Potential inbound endpoints of the peer + fc::flat_map potential_inbound_endpoints; /// @} using item_to_time_map_type = std::unordered_map; diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 9dedaf50f2..e24e7f4c41 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1376,6 +1376,9 @@ namespace graphene { namespace net { namespace detail { ("endpoint", originating_peer->get_remote_endpoint())); // Gatekeeping code if( originating_peer->we_have_requested_close + // allow hello_message so we can learn more about the peer + && received_message.msg_type.value() != core_message_type_enum::hello_message_type + // allow closing_connection_message so we can finish disconnecting && received_message.msg_type.value() != core_message_type_enum::closing_connection_message_type ) { dlog( "Unexpected message from peer ${peer} while we have requested to close connection", @@ -1751,9 +1754,14 @@ namespace graphene { namespace net { namespace detail { auto old_inbound_endpoint = already_connected_peer->get_endpoint_for_connecting(); auto new_inbound_endpoint = originating_peer->get_remote_endpoint(); already_connected_peer->additional_inbound_endpoints.insert( *new_inbound_endpoint ); - if ( !already_connected_peer->inbound_endpoint_verified // which implies direction == inbound - || new_inbound_endpoint->get_address().is_public_address() - || !old_inbound_endpoint->get_address().is_public_address() ) + if( peer_connection_direction::inbound == already_connected_peer->direction ) + { + already_connected_peer->potential_inbound_endpoints[*new_inbound_endpoint] + = firewalled_state::not_firewalled; + } + if( !already_connected_peer->inbound_endpoint_verified // which implies direction == inbound + || new_inbound_endpoint->get_address().is_public_address() + || !old_inbound_endpoint->get_address().is_public_address() ) { ilog( "Saving verification result ${ep} for peer ${peer} with id ${id}", ("ep", new_inbound_endpoint) @@ -1836,6 +1844,8 @@ namespace graphene { namespace net { namespace detail { auto updated_peer_record = _potential_peer_db.lookup_or_create_entry_for_ep( ep ); updated_peer_record.last_seen_time = fc::time_point::now(); _potential_peer_db.update_entry( updated_peer_record ); + // mark as a potential inbound address + originating_peer->potential_inbound_endpoints[ep] = firewalled_state::unknown; } // Note: we don't update originating_peer->is_firewalled, because we might guess wrong @@ -3666,6 +3676,12 @@ namespace graphene { namespace net { namespace detail { _last_reported_number_of_conns = (uint32_t)_active_connections.size(); _delegate->connection_count_changed( _last_reported_number_of_conns ); } + // If it is an inbound connection, try to verify its inbound endpoint + if( peer_connection_direction::inbound == peer->direction ) + { + for( const auto& potential_inbound_endpoint : peer->potential_inbound_endpoints ) + _add_once_node_list.push_back( potential_peer_record( potential_inbound_endpoint.first ) ); + } } void node_impl::close() @@ -4129,6 +4145,10 @@ namespace graphene { namespace net { namespace detail { updated_peer_record.last_error = *connect_failed_exception; _potential_peer_db.update_entry(updated_peer_record); + // If this is for inbound endpoint verification, + // here we could try to find the original connection and update its firewalled state, + // but it doesn't seem necessary. + // if the connection failed, we want to disconnect now. _handshaking_connections.erase(new_peer); _terminating_connections.erase(new_peer); @@ -4666,6 +4686,12 @@ namespace graphene { namespace net { namespace detail { // the peer has already told us that it's ready to close the connection, so just close the connection peer_to_disconnect->close_connection(); } + else if( peer_to_disconnect->we_have_requested_close ) + { + dlog( "Disconnecting again from ${peer} for ${reason}, ignore", + ("peer",peer_to_disconnect->get_remote_endpoint()) ("reason",reason_for_disconnect)); + return; + } else { // we're the first to try to want to close the connection From cce56506d6cced597046231870f92aedeb2cff97 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 01:39:49 +0000 Subject: [PATCH 238/338] Fix a code smell --- libraries/net/node.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index e24e7f4c41..31c754c251 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3680,7 +3680,7 @@ namespace graphene { namespace net { namespace detail { if( peer_connection_direction::inbound == peer->direction ) { for( const auto& potential_inbound_endpoint : peer->potential_inbound_endpoints ) - _add_once_node_list.push_back( potential_peer_record( potential_inbound_endpoint.first ) ); + _add_once_node_list.emplace_back( potential_inbound_endpoint.first ); } } From d331b4a1e1d762472b56e12b51d8884a63373505 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 15:46:25 +0000 Subject: [PATCH 239/338] Review inbound peer's inbound endpoint --- libraries/net/node.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 31c754c251..5ecc9eb374 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -2089,7 +2089,22 @@ namespace graphene { namespace net { namespace detail { } } // else if this was an active connection, then this was just a reply to our periodic address requests. - // we've processed it, there's nothing else to do + // we've processed it. + // Now seems like a good time to review the peer's inbound endpoint and firewalled state + else if( !originating_peer->inbound_endpoint_verified // which implies direction == inbound + && originating_peer->inbound_port != 0 // Ignore if the peer is not listening + // We try not to update it a 2nd time + && originating_peer->get_remote_endpoint() != originating_peer->get_endpoint_for_connecting() ) + { + // Our best guess for the peer's inbound endpoint now is its remote endpoint, + // unless we are behind a reverse proxy, in which case we try to use a public address + if( originating_peer->get_remote_endpoint()->get_address().is_public_address() + || !originating_peer->get_endpoint_for_connecting()->get_address().is_public_address() ) + originating_peer->remote_inbound_endpoint = originating_peer->get_remote_endpoint(); + // else do nothing + + // We could reinitialize inbound endpoint verification here, but it doesn't seem necessary + } } void node_impl::on_fetch_blockchain_item_ids_message(peer_connection* originating_peer, From 0090b1fe341ee8c0778423f1e6b090cd09897109 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 16:19:33 +0000 Subject: [PATCH 240/338] Fix a code smell --- libraries/net/node.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 5ecc9eb374..f1ff9177de 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -2098,9 +2098,10 @@ namespace graphene { namespace net { namespace detail { { // Our best guess for the peer's inbound endpoint now is its remote endpoint, // unless we are behind a reverse proxy, in which case we try to use a public address - if( originating_peer->get_remote_endpoint()->get_address().is_public_address() + auto remote_endpoint = originating_peer->get_remote_endpoint(); // Note: this returns a copy + if( remote_endpoint->get_address().is_public_address() || !originating_peer->get_endpoint_for_connecting()->get_address().is_public_address() ) - originating_peer->remote_inbound_endpoint = originating_peer->get_remote_endpoint(); + originating_peer->remote_inbound_endpoint = remote_endpoint; // else do nothing // We could reinitialize inbound endpoint verification here, but it doesn't seem necessary From 326d44eafbb8fee108ebd997f7bf97dd6ce03493 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 20:02:17 +0000 Subject: [PATCH 241/338] Guess inbound endpoint early to avoid revisiting --- libraries/net/node.cpp | 120 ++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 63 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index f1ff9177de..fae0cdec48 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1513,6 +1513,7 @@ namespace graphene { namespace net { namespace detail { void node_impl::on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ) { VERIFY_CORRECT_THREAD(); + auto remote_endpoint = originating_peer->get_remote_endpoint(); // Note: this returns a copy // Do gatekeeping first if( originating_peer->their_state != peer_connection::their_connection_state::just_connected ) { @@ -1528,7 +1529,7 @@ namespace graphene { namespace net { namespace detail { // probably need to think through that case. We're not attempting that // yet, though, so it's ok to just disconnect here. wlog( "Unexpected hello_message from peer ${peer}, disconnecting", - ("peer", originating_peer->get_remote_endpoint()) ); + ("peer", remote_endpoint) ); disconnect_from_peer( originating_peer, "Received an unexpected hello_message" ); return; } @@ -1537,7 +1538,7 @@ namespace graphene { namespace net { namespace detail { if( hello_message_received.chain_id != _chain_id ) { wlog( "Received hello message from peer ${peer} on a different chain: ${message}", - ("peer", originating_peer->get_remote_endpoint()) + ("peer", remote_endpoint) ("message", hello_message_received) ); // If it is an outbound connection, make sure we won't reconnect to the peer soon if( peer_connection_direction::outbound == originating_peer->direction ) @@ -1545,14 +1546,14 @@ namespace graphene { namespace net { namespace detail { // Note: deleting is not the best approach since it can be readded soon and we will reconnect soon. // Marking it "permanently rejected" is also not good enough since the peer can be "fixed". // It seems the best approach is to reduce its weight significantly. - greatly_delay_next_conn_to( this, *originating_peer->get_remote_endpoint() ); + greatly_delay_next_conn_to( this, *remote_endpoint ); } // Now reject std::ostringstream rejection_message; rejection_message << "You're on a different chain than I am. I'm on " << _chain_id.str() << " and you're on " << hello_message_received.chain_id.str(); connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, - *originating_peer->get_remote_endpoint(), + *remote_endpoint, rejection_reason_code::different_chain, rejection_message.str() ); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; @@ -1578,16 +1579,16 @@ namespace graphene { namespace net { namespace detail { catch( const fc::exception& e ) { wlog( "Error when validating signature in hello message from peer ${peer}: ${e}", - ("peer", originating_peer->get_remote_endpoint())("e", e.to_detail_string()) ); + ("peer", remote_endpoint)("e", e.to_detail_string()) ); } if( !expected_node_public_key || hello_message_received.node_public_key != expected_node_public_key->serialize() ) { wlog( "Invalid signature in hello message from peer ${peer}", - ("peer", originating_peer->get_remote_endpoint()) ); + ("peer", remote_endpoint) ); connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, - *originating_peer->get_remote_endpoint(), + *remote_endpoint, rejection_reason_code::invalid_hello_message, "Invalid signature in hello message" ); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; @@ -1607,14 +1608,14 @@ namespace graphene { namespace net { namespace detail { { // either it's not there or it's not a valid session id. either way, ignore. dlog( "Peer ${endpoint} sent us a hello message without a valid node_id in user_data", - ("endpoint", originating_peer->get_remote_endpoint() ) ); + ("endpoint", remote_endpoint ) ); } // The peer's node_id should not be null static const node_id_t null_node_id; if( null_node_id == peer_node_id ) { wlog( "The node_id in the hello_message from peer ${peer} is null, disconnecting", - ("peer", originating_peer->get_remote_endpoint()) ); + ("peer", remote_endpoint) ); disconnect_from_peer( originating_peer, "Your node_id in the hello_message is null" ); return; } @@ -1623,7 +1624,7 @@ namespace graphene { namespace net { namespace detail { { ilog( "Received a hello_message from peer ${peer} with id ${id} that is myself or claimed to be myself, " "rejection", - ("peer", originating_peer->get_remote_endpoint()) + ("peer", remote_endpoint) ("id", peer_node_id) ); // If it is an outbound connection, make sure we won't reconnect to the peer soon if( peer_connection_direction::outbound == originating_peer->direction ) @@ -1631,13 +1632,13 @@ namespace graphene { namespace net { namespace detail { // Note: deleting is not the best approach since it can be readded soon and we will reconnect soon. // Marking it "permanently rejected" is also not good enough since the peer can be "fixed". // It seems the best approach is to reduce its weight significantly. - greatly_delay_next_conn_to( this, *originating_peer->get_remote_endpoint() ); + greatly_delay_next_conn_to( this, *remote_endpoint ); } // Now reject // Note: this can happen in rare cases if the peer is not actually myself but another node. // Anyway, we see it as ourselves, reject it and disconnect it. connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, - *originating_peer->get_remote_endpoint(), + *remote_endpoint, rejection_reason_code::connected_to_self, "I'm connecting to myself" ); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; @@ -1651,14 +1652,11 @@ namespace graphene { namespace net { namespace detail { // store off the data provided in the hello message originating_peer->user_agent = hello_message_received.user_agent; originating_peer->node_public_key = hello_message_received.node_public_key; - // will probably be overwritten in parse_hello_user_data_for_peer() - originating_peer->node_id = hello_message_received.node_public_key; originating_peer->core_protocol_version = hello_message_received.core_protocol_version; originating_peer->inbound_address = hello_message_received.inbound_address; originating_peer->inbound_port = hello_message_received.inbound_port; originating_peer->outbound_port = hello_message_received.outbound_port; - - parse_hello_user_data_for_peer(originating_peer, hello_message_received.user_data); + // Note: more data is stored after initialized remote_inbound_endpoint // For an outbound connection, we know the remote_inbound_endpoint already, so keep it unchanged. // For an inbound connection, we initialize it here. @@ -1679,12 +1677,23 @@ namespace graphene { namespace net { namespace detail { // In addition, by now, our list or exclude list for peer advertisement only contains IP endpoints but not // nodes' public keys (we can't use node_id because it changes every time the node restarts). Using a valid // address is better for the purpose. - originating_peer->remote_inbound_endpoint - = fc::ip::endpoint( originating_peer->inbound_port != 0 ? originating_peer->inbound_address - : originating_peer->get_remote_endpoint()->get_address(), - originating_peer->inbound_port ); + if( originating_peer->inbound_port == 0 ) + originating_peer->remote_inbound_endpoint = fc::ip::endpoint( remote_endpoint->get_address() ); + else if( originating_peer->inbound_address.is_public_address() + || originating_peer->inbound_address == remote_endpoint->get_address() ) + originating_peer->remote_inbound_endpoint = fc::ip::endpoint( originating_peer->inbound_address, + originating_peer->inbound_port ); + else + originating_peer->remote_inbound_endpoint = remote_endpoint; } + // Note: store node_id after initialized remote_inbound_endpoint to avoid a race condition + + // will probably be overwritten in parse_hello_user_data_for_peer() + originating_peer->node_id = hello_message_received.node_public_key; + + parse_hello_user_data_for_peer(originating_peer, hello_message_received.user_data); + // if they didn't provide a last known fork, try to guess it if (originating_peer->last_known_fork_block_number == 0 && originating_peer->graphene_git_revision_unix_timestamp) @@ -1712,7 +1721,7 @@ namespace graphene { namespace net { namespace detail { std::ostringstream rejection_message; rejection_message << "Your client is outdated -- you can only understand blocks up to #" << next_fork_block_number << ", but I'm already on block #" << head_block_num; connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - *originating_peer->get_remote_endpoint(), + *remote_endpoint, rejection_reason_code::unspecified, rejection_message.str() ); @@ -1734,9 +1743,10 @@ namespace graphene { namespace net { namespace detail { if( peer_connection_direction::outbound == originating_peer->direction && originating_peer->node_public_key == already_connected_peer->node_public_key ) { + auto already_connected_endpoint = already_connected_peer->get_remote_endpoint(); // This returns a copy ilog( "Verified that endpoint ${ep} is reachable and belongs to peer ${peer} with id ${id}", - ("ep", originating_peer->get_remote_endpoint()) - ("peer", already_connected_peer->get_remote_endpoint()) + ("ep", remote_endpoint) + ("peer", already_connected_endpoint) ("id", already_connected_peer->node_id) ); // Do not replace a verified public address with a private or local address. // Note: there is a scenario that some nodes in the same local network may have connected to each other, @@ -1751,40 +1761,38 @@ namespace graphene { namespace net { namespace detail { // that they are in the same local network and connected to each other. // On the other hand, when we skip updates in some cases, we may end up trying to reconnect soon // and endlessly (which is addressed with additional_inbound_endpoints). - auto old_inbound_endpoint = already_connected_peer->get_endpoint_for_connecting(); - auto new_inbound_endpoint = originating_peer->get_remote_endpoint(); - already_connected_peer->additional_inbound_endpoints.insert( *new_inbound_endpoint ); + already_connected_peer->additional_inbound_endpoints.insert( *remote_endpoint ); if( peer_connection_direction::inbound == already_connected_peer->direction ) { - already_connected_peer->potential_inbound_endpoints[*new_inbound_endpoint] + already_connected_peer->potential_inbound_endpoints[*remote_endpoint] = firewalled_state::not_firewalled; } if( !already_connected_peer->inbound_endpoint_verified // which implies direction == inbound - || new_inbound_endpoint->get_address().is_public_address() - || !old_inbound_endpoint->get_address().is_public_address() ) + || remote_endpoint->get_address().is_public_address() + || !already_connected_peer->get_endpoint_for_connecting()->get_address().is_public_address() ) { ilog( "Saving verification result ${ep} for peer ${peer} with id ${id}", - ("ep", new_inbound_endpoint) - ("peer", already_connected_peer->get_remote_endpoint()) + ("ep", remote_endpoint) + ("peer", already_connected_endpoint) ("id", already_connected_peer->node_id) ); - already_connected_peer->remote_inbound_endpoint = new_inbound_endpoint; + already_connected_peer->remote_inbound_endpoint = remote_endpoint; already_connected_peer->inbound_endpoint_verified = true; already_connected_peer->is_firewalled = firewalled_state::not_firewalled; } // If the already connected peer is in the active connections list, save the endpoint to the peer db if( peer_connection::connection_negotiation_status::negotiation_complete == already_connected_peer->negotiation_status ) - save_successful_address( this, *new_inbound_endpoint ); + save_successful_address( this, *remote_endpoint ); } // Now reject connection_rejected_message connection_rejected( _user_agent_string, core_protocol_version, - *originating_peer->get_remote_endpoint(), + *remote_endpoint, rejection_reason_code::already_connected, "I'm already connected to you" ); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; originating_peer->send_message( message(connection_rejected) ); ilog("Received a hello_message from peer ${peer} that I'm already connected to (with id ${id}), rejection", - ("peer", originating_peer->get_remote_endpoint()) + ("peer", remote_endpoint) ("id", originating_peer->node_id)); // If already connected, we disconnect disconnect_from_peer( originating_peer, connection_rejected.reason_string ); @@ -1794,12 +1802,13 @@ namespace graphene { namespace net { namespace detail { _allowed_peers.find(originating_peer->node_id) == _allowed_peers.end()) { connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - *originating_peer->get_remote_endpoint(), + *remote_endpoint, rejection_reason_code::blocked, "you are not in my allowed_peers list"); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; originating_peer->send_message( message(connection_rejected ) ); - dlog( "Received a hello_message from peer ${peer} who isn't in my allowed_peers list, rejection", ("peer", originating_peer->get_remote_endpoint() ) ); + dlog( "Received a hello_message from peer ${peer} who isn't in my allowed_peers list, rejection", + ("peer", remote_endpoint ) ); } #endif // ENABLE_P2P_DEBUGGING_API else @@ -1814,7 +1823,7 @@ namespace graphene { namespace net { namespace detail { else if( 0 == originating_peer->inbound_port ) { ilog( "peer ${peer} did not give an inbound port so I'm treating them as if they are firewalled.", - ("peer", originating_peer->get_remote_endpoint()) ); + ("peer", remote_endpoint) ); originating_peer->is_firewalled = firewalled_state::firewalled; } else @@ -1823,20 +1832,20 @@ namespace graphene { namespace net { namespace detail { // First, we add the inbound endpoint that the peer told us it is listening on. fc::flat_set endpoints_to_save; - endpoints_to_save.insert( *originating_peer->get_endpoint_for_connecting() ); + endpoints_to_save.insert( fc::ip::endpoint( originating_peer->inbound_address, + originating_peer->inbound_port ) ); // Second, we add the address and port we see. // It might be the same as above, but that's OK. - fc::ip::endpoint peers_actual_outbound_endpoint = *originating_peer->get_remote_endpoint(); - endpoints_to_save.insert( peers_actual_outbound_endpoint ); + endpoints_to_save.insert( *remote_endpoint ); // Third, we add the address we see, with the inbound port the peer told us. // It might be the same as above, but that's OK. - endpoints_to_save.insert( fc::ip::endpoint( peers_actual_outbound_endpoint.get_address(), + endpoints_to_save.insert( fc::ip::endpoint( remote_endpoint->get_address(), originating_peer->inbound_port ) ); ilog( "Saving potential endpoints to the peer database for peer ${peer}: ${endpoints}", - ("peer", originating_peer->get_remote_endpoint()) ("endpoints", endpoints_to_save) ); + ("peer", remote_endpoint) ("endpoints", endpoints_to_save) ); for( const auto& ep : endpoints_to_save ) { @@ -1855,20 +1864,20 @@ namespace graphene { namespace net { namespace detail { if (!is_accepting_new_connections()) { connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, - *originating_peer->get_remote_endpoint(), + *remote_endpoint, rejection_reason_code::not_accepting_connections, "not accepting any more incoming connections"); originating_peer->their_state = peer_connection::their_connection_state::connection_rejected; originating_peer->send_message(message(connection_rejected)); ilog("Received a hello_message from peer ${peer}, but I'm not accepting any more connections, rejection", - ("peer", originating_peer->get_remote_endpoint())); + ("peer", remote_endpoint)); } else { originating_peer->their_state = peer_connection::their_connection_state::connection_accepted; originating_peer->send_message(message(connection_accepted_message())); ilog("Received a hello_message from peer ${peer}, sending reply to accept connection", - ("peer", originating_peer->get_remote_endpoint())); + ("peer", remote_endpoint)); } } } @@ -2089,23 +2098,8 @@ namespace graphene { namespace net { namespace detail { } } // else if this was an active connection, then this was just a reply to our periodic address requests. - // we've processed it. - // Now seems like a good time to review the peer's inbound endpoint and firewalled state - else if( !originating_peer->inbound_endpoint_verified // which implies direction == inbound - && originating_peer->inbound_port != 0 // Ignore if the peer is not listening - // We try not to update it a 2nd time - && originating_peer->get_remote_endpoint() != originating_peer->get_endpoint_for_connecting() ) - { - // Our best guess for the peer's inbound endpoint now is its remote endpoint, - // unless we are behind a reverse proxy, in which case we try to use a public address - auto remote_endpoint = originating_peer->get_remote_endpoint(); // Note: this returns a copy - if( remote_endpoint->get_address().is_public_address() - || !originating_peer->get_endpoint_for_connecting()->get_address().is_public_address() ) - originating_peer->remote_inbound_endpoint = remote_endpoint; - // else do nothing - - // We could reinitialize inbound endpoint verification here, but it doesn't seem necessary - } + // we've processed it, there's nothing else to do + // Note: we could reinitialize inbound endpoint verification here, but it doesn't seem necessary } void node_impl::on_fetch_blockchain_item_ids_message(peer_connection* originating_peer, From f7023e5e42f0990263f121ee16399e3237d99cc3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 20:26:22 +0000 Subject: [PATCH 242/338] Simplify code --- libraries/net/node.cpp | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index fae0cdec48..39c1502baf 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4549,15 +4549,11 @@ namespace graphene { namespace net { namespace detail { if( peer_connection_direction::outbound == active_peer->direction && endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return active_peer; - if( peer_connection_direction::inbound == active_peer->direction - && active_peer->inbound_endpoint_verified // which implies get_endpoint_for_connecting().valid() - && *active_peer->get_endpoint_for_connecting() == remote_endpoint ) + // Note: if it is an inbound connection and its inbound endpoint is verified already, + // the inbound endpoint should be in additional_inbound_endpoints + if( active_peer->additional_inbound_endpoints.find( remote_endpoint ) + != active_peer->additional_inbound_endpoints.end() ) return active_peer; - for( const auto& ep : active_peer->additional_inbound_endpoints ) - { - if( ep == remote_endpoint ) - return active_peer; - } } return peer_connection_ptr(); } @@ -4577,15 +4573,11 @@ namespace graphene { namespace net { namespace detail { fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return handshaking_peer; - if( peer_connection_direction::inbound == handshaking_peer->direction - && handshaking_peer->inbound_endpoint_verified // which implies get_endpoint_for_connecting().valid() - && *handshaking_peer->get_endpoint_for_connecting() == remote_endpoint ) + // Note: if it is an inbound connection and its inbound endpoint is verified already, + // the inbound endpoint should be in additional_inbound_endpoints + if( handshaking_peer->additional_inbound_endpoints.find( remote_endpoint ) + != handshaking_peer->additional_inbound_endpoints.end() ) return handshaking_peer; - for( const auto& ep : handshaking_peer->additional_inbound_endpoints ) - { - if( ep == remote_endpoint ) - return handshaking_peer; - } } return peer_connection_ptr(); } From 28704f1f05abfa1d83fd6275713437b7d0ce1f12 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 20:47:09 +0000 Subject: [PATCH 243/338] Remove redundant member: inbound_endpoint_verified --- libraries/net/include/graphene/net/peer_connection.hpp | 2 -- libraries/net/node.cpp | 7 +++---- libraries/net/peer_connection.cpp | 1 - tests/tests/p2p_node_tests.cpp | 1 - 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 25bd9a89a0..8f87485880 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -197,8 +197,6 @@ namespace graphene { namespace net uint16_t outbound_port = 0; /// The inbound endpoint of the remote peer (our best guess) fc::optional remote_inbound_endpoint; - /// Whether the inbound endpoint of the remote peer is verified - bool inbound_endpoint_verified = false; /// Some nodes may be listening on multiple endpoints fc::flat_set additional_inbound_endpoints; /// Potential inbound endpoints of the peer diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 39c1502baf..51e6aa3785 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1767,7 +1767,7 @@ namespace graphene { namespace net { namespace detail { already_connected_peer->potential_inbound_endpoints[*remote_endpoint] = firewalled_state::not_firewalled; } - if( !already_connected_peer->inbound_endpoint_verified // which implies direction == inbound + if( already_connected_peer->is_firewalled != firewalled_state::not_firewalled // implies it's inbound || remote_endpoint->get_address().is_public_address() || !already_connected_peer->get_endpoint_for_connecting()->get_address().is_public_address() ) { @@ -1776,7 +1776,6 @@ namespace graphene { namespace net { namespace detail { ("peer", already_connected_endpoint) ("id", already_connected_peer->node_id) ); already_connected_peer->remote_inbound_endpoint = remote_endpoint; - already_connected_peer->inbound_endpoint_verified = true; already_connected_peer->is_firewalled = firewalled_state::not_firewalled; } // If the already connected peer is in the active connections list, save the endpoint to the peer db @@ -4628,12 +4627,12 @@ namespace graphene { namespace net { namespace detail { fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) { - ilog( " active peer ${endpoint} [${direction}] (${inbound_ep} ${verified}) " + ilog( " active peer ${endpoint} [${direction}] (${inbound_ep} ${is_firewalled}) " "peer_is_in_sync_with_us:${in_sync_with_us} we_are_in_sync_with_peer:${in_sync_with_them}", ( "endpoint", peer->get_remote_endpoint() ) ( "direction", peer->direction ) ( "inbound_ep", peer->get_endpoint_for_connecting() ) - ( "verified", peer->inbound_endpoint_verified ? "verified" : "not_verified" ) + ( "is_firewalled", peer->is_firewalled) ( "in_sync_with_us", !peer->peer_needs_sync_items_from_us ) ( "in_sync_with_them", !peer->we_need_sync_items_from_peer ) ); if( peer->we_need_sync_items_from_peer ) diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index 2ca826fdb9..ad4a99bfb2 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -293,7 +293,6 @@ namespace graphene { namespace net their_state = their_connection_state::just_connected; our_state = our_connection_state::just_connected; remote_inbound_endpoint = remote_endpoint; - inbound_endpoint_verified = true; ilog( "established outbound connection to ${remote_endpoint}", ("remote_endpoint", remote_endpoint ) ); } catch ( fc::exception& e ) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index a759fd044f..0faa8844df 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -800,7 +800,6 @@ BOOST_AUTO_TEST_CASE( hello_already_connected ) { fc::ip::endpoint peer3_ep = fc::ip::endpoint::from_string( std::string("1.2.3.4:5678") ); BOOST_CHECK( *node2_ptr->remote_inbound_endpoint == peer3_ep ); - BOOST_CHECK( node2_ptr->inbound_endpoint_verified ); BOOST_CHECK( graphene::net::firewalled_state::not_firewalled == node2_ptr->is_firewalled ); BOOST_REQUIRE_EQUAL( node2_ptr->additional_inbound_endpoints.size(), 1u ); BOOST_CHECK( *node2_ptr->additional_inbound_endpoints.begin() == peer3_ep ); From 120ea6f99e3b339526ecbd1ab676720ca28af5ab Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 21:13:45 +0000 Subject: [PATCH 244/338] Avoid unnecessary copying of data --- libraries/net/node.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 51e6aa3785..0388bdddd4 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4569,7 +4569,7 @@ namespace graphene { namespace net { namespace detail { // For an inbound handshaking connection, there is a race condition since we might not know its node_id yet, // so be stricter here. // Even so, there may be situations that we end up having multiple active connections with them. - fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); + fc::optional endpoint_for_this_peer = handshaking_peer->get_remote_endpoint(); if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) return handshaking_peer; // Note: if it is an inbound connection and its inbound endpoint is verified already, From c0174e8cac7f3565a4c00547626e7d144b335ecd Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 15 Sep 2022 22:43:30 +0000 Subject: [PATCH 245/338] Fix a code smell --- libraries/net/node.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 0388bdddd4..f69bb98cd2 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -1677,7 +1677,7 @@ namespace graphene { namespace net { namespace detail { // In addition, by now, our list or exclude list for peer advertisement only contains IP endpoints but not // nodes' public keys (we can't use node_id because it changes every time the node restarts). Using a valid // address is better for the purpose. - if( originating_peer->inbound_port == 0 ) + if( 0 == originating_peer->inbound_port ) originating_peer->remote_inbound_endpoint = fc::ip::endpoint( remote_endpoint->get_address() ); else if( originating_peer->inbound_address.is_public_address() || originating_peer->inbound_address == remote_endpoint->get_address() ) From 3ab613e86997181e70623aaf881013cc512b4177 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 11:59:50 +0000 Subject: [PATCH 246/338] Extend get_block_header* APIs to return signatures Including 2 database APIs: - get_block_header - get_block_header_batch --- libraries/app/database_api.cpp | 13 +++++++------ libraries/app/database_api_impl.hxx | 4 ++-- libraries/app/include/graphene/app/database_api.hpp | 10 +++++----- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 6aab692409..ab976333c5 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -227,27 +227,28 @@ void database_api_impl::cancel_all_subscriptions( bool reset_callback, bool rese // // ////////////////////////////////////////////////////////////////////// -optional database_api::get_block_header(uint32_t block_num)const +optional database_api::get_block_header(uint32_t block_num)const { return my->get_block_header( block_num ); } -optional database_api_impl::get_block_header(uint32_t block_num) const +optional database_api_impl::get_block_header(uint32_t block_num) const { auto result = _db.fetch_block_by_number(block_num); if(result) return *result; return {}; } -map> database_api::get_block_header_batch(const vector block_nums)const +map> database_api::get_block_header_batch( + const vector block_nums) const { return my->get_block_header_batch( block_nums ); } -map> database_api_impl::get_block_header_batch( - const vector block_nums) const +map> database_api_impl::get_block_header_batch( + const vector block_nums) const { - map> results; + map> results; for (const uint32_t block_num : block_nums) { results[block_num] = get_block_header(block_num); diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index f6b4cad206..3d5984c83f 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -50,8 +50,8 @@ class database_api_impl : public std::enable_shared_from_this void cancel_all_subscriptions(bool reset_callback, bool reset_market_subscriptions); // Blocks and transactions - optional get_block_header(uint32_t block_num)const; - map> get_block_header_batch(const vector block_nums)const; + optional get_block_header(uint32_t block_num)const; + map> get_block_header_batch(const vector block_nums)const; optional get_block(uint32_t block_num)const; processed_transaction get_transaction( uint32_t block_num, uint32_t trx_in_block )const; diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 4d9cf12c53..9cee4212bf 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -153,18 +153,18 @@ class database_api ///////////////////////////// /** - * @brief Retrieve a block header + * @brief Retrieve a signed block header * @param block_num Height of the block whose header should be returned * @return header of the referenced block, or null if no matching block was found */ - optional get_block_header(uint32_t block_num)const; + optional get_block_header(uint32_t block_num)const; /** - * @brief Retrieve multiple block header by block numbers - * @param block_nums vector containing heights of the block whose header should be returned + * @brief Retrieve multiple signed block headers by block numbers + * @param block_nums vector containing heights of the blocks whose headers should be returned * @return array of headers of the referenced blocks, or null if no matching block was found */ - map> get_block_header_batch(const vector block_nums)const; + map> get_block_header_batch(const vector block_nums)const; /** * @brief Retrieve a full, signed block From d8c8f014f67968463dbfcd37db84d20e20226c5d Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 13:25:52 +0000 Subject: [PATCH 247/338] Add tests for get_block* APIs --- tests/tests/database_api_tests.cpp | 69 ++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tests/tests/database_api_tests.cpp b/tests/tests/database_api_tests.cpp index 72979d1626..a6ab497449 100644 --- a/tests/tests/database_api_tests.cpp +++ b/tests/tests/database_api_tests.cpp @@ -1479,6 +1479,75 @@ BOOST_AUTO_TEST_CASE( get_transaction_hex ) } FC_LOG_AND_RETHROW() } +/// Tests get_block, get_block_header, get_block_header_batch +BOOST_AUTO_TEST_CASE( get_block_tests ) +{ try { + + generate_block(); + + ACTORS( (nathan) ); + auto block1 = generate_block( ~graphene::chain::database::skip_witness_signature ); + auto block2 = generate_block( ~graphene::chain::database::skip_witness_signature ); + + fund( nathan_id(db) ); + auto block3 = generate_block( ~graphene::chain::database::skip_witness_signature ); + + idump( (block1)(block2)(block3) ); + + uint32_t head_block_num = db.head_block_num(); + + graphene::app::database_api db_api(db); + auto head_block = db_api.get_block( head_block_num ); + idump( (head_block) ); + BOOST_REQUIRE( head_block.valid() ); + BOOST_CHECK_EQUAL( head_block->block_num(), head_block_num ); + BOOST_CHECK_EQUAL( head_block->transactions.size(), 1U ); + BOOST_CHECK( head_block->witness_signature != signature_type() ); + BOOST_CHECK( head_block->id() == block3.id() ); + + auto head_block_header = db_api.get_block_header( head_block_num ); + BOOST_REQUIRE( head_block_header.valid() ); + BOOST_CHECK_EQUAL( head_block_header->block_num(), head_block_num ); + BOOST_CHECK( head_block_header->witness_signature == head_block->witness_signature ); + BOOST_CHECK( head_block_header->id() == head_block->id() ); + + auto previous_block = db_api.get_block( head_block_num - 1 ); + BOOST_REQUIRE( previous_block.valid() ); + BOOST_CHECK_EQUAL( previous_block->block_num(), head_block_num - 1 ); + BOOST_CHECK_EQUAL( previous_block->transactions.size(), 0 ); + BOOST_CHECK( previous_block->id() == head_block->previous ); + BOOST_CHECK( previous_block->witness_signature != signature_type() ); + BOOST_CHECK( previous_block->witness_signature != head_block->witness_signature ); + BOOST_CHECK( previous_block->id() == block2.id() ); + + auto previous_block_header = db_api.get_block_header( head_block_num - 1 ); + BOOST_REQUIRE( previous_block_header.valid() ); + BOOST_CHECK_EQUAL( previous_block_header->block_num(), head_block_num - 1 ); + BOOST_CHECK( previous_block_header->witness_signature == previous_block->witness_signature ); + BOOST_CHECK( previous_block_header->id() == previous_block->id() ); + + auto next_block = db_api.get_block( head_block_num + 1 ); + BOOST_CHECK( !next_block.valid() ); + + auto next_block_header = db_api.get_block_header( head_block_num + 1 ); + BOOST_CHECK( !next_block_header.valid() ); + + const auto block_headers = db_api.get_block_header_batch( { head_block_num, head_block_num + 1, + head_block_num - 1 } ); + BOOST_REQUIRE_EQUAL( block_headers.size(), 3U ); + BOOST_CHECK_THROW( block_headers.at( head_block_num + 2 ), std::out_of_range ); + BOOST_CHECK( !block_headers.at( head_block_num + 1 ).valid() ); + BOOST_REQUIRE( block_headers.at( head_block_num ).valid() ); + BOOST_CHECK( block_headers.at( head_block_num )->block_num() == head_block_header->block_num() ); + BOOST_CHECK( block_headers.at( head_block_num )->id() == head_block_header->id() ); + BOOST_CHECK( block_headers.at( head_block_num )->witness_signature == head_block_header->witness_signature ); + BOOST_CHECK( block_headers.at( head_block_num - 1 )->block_num() == previous_block_header->block_num() ); + BOOST_CHECK( block_headers.at( head_block_num - 1 )->id() == previous_block_header->id() ); + BOOST_CHECK( block_headers.at( head_block_num - 1 )->witness_signature + == previous_block_header->witness_signature ); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_CASE(verify_account_authority) { try { From de723bb2aa5759e35257c18a211276e298d14a17 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 15:26:27 +0000 Subject: [PATCH 248/338] Add is_virtual field to operation_history_object --- libraries/chain/db_block.cpp | 20 +++++++++---------- .../chain/include/graphene/chain/database.hpp | 7 +++++-- .../chain/operation_history_object.hpp | 6 +++++- libraries/chain/small_objects.cpp | 2 +- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/libraries/chain/db_block.cpp b/libraries/chain/db_block.cpp index 2a9ebfb22d..fcf8e44a54 100644 --- a/libraries/chain/db_block.cpp +++ b/libraries/chain/db_block.cpp @@ -342,7 +342,7 @@ processed_transaction database::push_proposal(const proposal_object& proposal) _undo_db.set_max_size( _undo_db.size() + 1 ); auto session = _undo_db.start_undo_session(true); for( auto& op : proposal.proposed_transaction.operations ) - eval_state.operation_results.emplace_back(apply_operation(eval_state, op)); + eval_state.operation_results.emplace_back(apply_operation(eval_state, op)); // This is a virtual operation // Make sure there is no unpaid samet fund debt const auto& samet_fund_idx = get_index_type().indices().get(); FC_ASSERT( samet_fund_idx.empty() || samet_fund_idx.begin()->unpaid_amount == 0, @@ -532,14 +532,11 @@ void database::clear_pending() _pending_tx_session.reset(); } FC_CAPTURE_AND_RETHROW() } -uint32_t database::push_applied_operation( const operation& op ) +uint32_t database::push_applied_operation( const operation& op, bool is_virtual /* = true */ ) { - _applied_ops.emplace_back(op); - operation_history_object& oh = *(_applied_ops.back()); - oh.block_num = _current_block_num; - oh.trx_in_block = _current_trx_in_block; - oh.op_in_trx = _current_op_in_trx; - oh.virtual_op = _current_virtual_op++; + _applied_ops.emplace_back( operation_history_object( op, _current_block_num, _current_trx_in_block, + _current_op_in_trx, _current_virtual_op, is_virtual ) ); + ++_current_virtual_op; return _applied_ops.size() - 1; } void database::set_applied_operation_result( uint32_t op_id, const operation_result& result ) @@ -758,7 +755,7 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx for( const auto& op : ptrx.operations ) { _current_virtual_op = 0; - eval_state.operation_results.emplace_back(apply_operation(eval_state, op)); + eval_state.operation_results.emplace_back(apply_operation(eval_state, op, false)); // This is NOT a virtual op ++_current_op_in_trx; } ptrx.operation_results = std::move(eval_state.operation_results); @@ -771,7 +768,8 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx return ptrx; } FC_CAPTURE_AND_RETHROW( (trx) ) } -operation_result database::apply_operation(transaction_evaluation_state& eval_state, const operation& op) +operation_result database::apply_operation( transaction_evaluation_state& eval_state, const operation& op, + bool is_virtual /* = true */ ) { try { int i_which = op.which(); uint64_t u_which = uint64_t( i_which ); @@ -779,7 +777,7 @@ operation_result database::apply_operation(transaction_evaluation_state& eval_st FC_ASSERT( u_which < _operation_evaluators.size(), "No registered evaluator for operation ${op}", ("op",op) ); unique_ptr& eval = _operation_evaluators[ u_which ]; FC_ASSERT( eval, "No registered evaluator for operation ${op}", ("op",op) ); - auto op_id = push_applied_operation( op ); + auto op_id = push_applied_operation( op, is_virtual ); auto result = eval->evaluate( eval_state, op, true ); set_applied_operation_result( op_id, result ); return result; diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index cbcbadb61a..b953f4fdba 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -585,10 +585,12 @@ namespace graphene { namespace chain { * as any implied/virtual operations that resulted, such as filling an order. The * applied operations is cleared after applying each block and calling the block * observers which may want to index these operations. + * @param The operation to push + * @param is_virtual Whether the operation is a virtual operation * * @return the op_id which can be used to set the result after it has finished being applied. */ - uint32_t push_applied_operation( const operation& op ); + uint32_t push_applied_operation( const operation& op, bool is_virtual = true ); void set_applied_operation_result( uint32_t op_id, const operation_result& r ); const vector >& get_applied_operations()const; @@ -681,7 +683,8 @@ namespace graphene { namespace chain { // these were formerly private, but they have a fairly well-defined API, so let's make them public void apply_block( const signed_block& next_block, uint32_t skip = skip_nothing ); processed_transaction apply_transaction( const signed_transaction& trx, uint32_t skip = skip_nothing ); - operation_result apply_operation( transaction_evaluation_state& eval_state, const operation& op ); + operation_result apply_operation( transaction_evaluation_state& eval_state, const operation& op, + bool is_virtual = true ); private: void _apply_block( const signed_block& next_block ); diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index 086e3e440f..10cb4bdad1 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -51,8 +51,10 @@ namespace graphene { namespace chain { static constexpr uint8_t space_id = protocol_ids; static constexpr uint8_t type_id = operation_history_object_type; - operation_history_object( const operation& o ):op(o){} + explicit operation_history_object( const operation& o ):op(o){} operation_history_object(){} + operation_history_object( const operation& o, uint32_t bn, uint16_t tib, uint16_t oit, uint32_t vo, bool iv ) + : op(o), block_num(bn), trx_in_block(tib), op_in_trx(oit), virtual_op(vo), is_virtual(iv) {} operation op; operation_result result; @@ -64,6 +66,8 @@ namespace graphene { namespace chain { uint16_t op_in_trx = 0; /** any virtual operations implied by operation in block */ uint32_t virtual_op = 0; + /** Whether this is a virtual operation */ + bool is_virtual = false; }; /** diff --git a/libraries/chain/small_objects.cpp b/libraries/chain/small_objects.cpp index 87e58e2586..e860143013 100644 --- a/libraries/chain/small_objects.cpp +++ b/libraries/chain/small_objects.cpp @@ -139,7 +139,7 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::htlc_object, (graphene::db::obj (transfer) (conditions) (memo) ) FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::operation_history_object, (graphene::chain::object), - (op)(result)(block_num)(trx_in_block)(op_in_trx)(virtual_op) ) + (op)(result)(block_num)(trx_in_block)(op_in_trx)(virtual_op)(is_virtual) ) FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::account_history_object, (graphene::chain::object), (account)(operation_id)(sequence)(next) ) From 1ff5d8e2cae2b7ea80e3de0fa2b4d82aae87aded Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 15:32:38 +0000 Subject: [PATCH 249/338] Add block_time field to operation_history_object --- libraries/chain/db_block.cpp | 2 +- .../include/graphene/chain/operation_history_object.hpp | 7 +++++-- libraries/chain/small_objects.cpp | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/libraries/chain/db_block.cpp b/libraries/chain/db_block.cpp index fcf8e44a54..92440e4dbe 100644 --- a/libraries/chain/db_block.cpp +++ b/libraries/chain/db_block.cpp @@ -535,7 +535,7 @@ void database::clear_pending() uint32_t database::push_applied_operation( const operation& op, bool is_virtual /* = true */ ) { _applied_ops.emplace_back( operation_history_object( op, _current_block_num, _current_trx_in_block, - _current_op_in_trx, _current_virtual_op, is_virtual ) ); + _current_op_in_trx, _current_virtual_op, is_virtual, _current_block_time ) ); ++_current_virtual_op; return _applied_ops.size() - 1; } diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index 10cb4bdad1..380a0292d0 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -53,8 +53,9 @@ namespace graphene { namespace chain { explicit operation_history_object( const operation& o ):op(o){} operation_history_object(){} - operation_history_object( const operation& o, uint32_t bn, uint16_t tib, uint16_t oit, uint32_t vo, bool iv ) - : op(o), block_num(bn), trx_in_block(tib), op_in_trx(oit), virtual_op(vo), is_virtual(iv) {} + operation_history_object( const operation& o, uint32_t bn, uint16_t tib, uint16_t oit, uint32_t vo, bool iv, + const time_point_sec& bt ) + : op(o), block_num(bn), trx_in_block(tib), op_in_trx(oit), virtual_op(vo), is_virtual(iv), block_time(bt) {} operation op; operation_result result; @@ -68,6 +69,8 @@ namespace graphene { namespace chain { uint32_t virtual_op = 0; /** Whether this is a virtual operation */ bool is_virtual = false; + /** The timestamp of the block that caused this operation */ + time_point_sec block_time; }; /** diff --git a/libraries/chain/small_objects.cpp b/libraries/chain/small_objects.cpp index e860143013..291eac1230 100644 --- a/libraries/chain/small_objects.cpp +++ b/libraries/chain/small_objects.cpp @@ -139,7 +139,7 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::htlc_object, (graphene::db::obj (transfer) (conditions) (memo) ) FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::operation_history_object, (graphene::chain::object), - (op)(result)(block_num)(trx_in_block)(op_in_trx)(virtual_op)(is_virtual) ) + (op)(result)(block_num)(trx_in_block)(op_in_trx)(virtual_op)(is_virtual)(block_time) ) FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::account_history_object, (graphene::chain::object), (account)(operation_id)(sequence)(next) ) From 21bd95b5a25046c87264b5b8974b6eb98f273f68 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 16:05:11 +0000 Subject: [PATCH 250/338] Bump DB_VERSION --- libraries/chain/include/graphene/chain/config.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/graphene/chain/config.hpp b/libraries/chain/include/graphene/chain/config.hpp index 598df91053..4dc548855f 100644 --- a/libraries/chain/include/graphene/chain/config.hpp +++ b/libraries/chain/include/graphene/chain/config.hpp @@ -32,7 +32,7 @@ #define GRAPHENE_MAX_NESTED_OBJECTS (200) -const std::string GRAPHENE_CURRENT_DB_VERSION = "20220913"; +const std::string GRAPHENE_CURRENT_DB_VERSION = "20220916"; #define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4 #define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3 From ade248eb8d9826ab540e6b99a85c288ef24f2c66 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 17:52:37 +0000 Subject: [PATCH 251/338] Pass the block_nums param by reference to const --- libraries/app/database_api.cpp | 4 ++-- libraries/app/database_api_impl.hxx | 2 +- libraries/app/include/graphene/app/database_api.hpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index ab976333c5..1a83ce2d1f 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -240,13 +240,13 @@ optional database_api_impl::get_block_header(uint32_t block return {}; } map> database_api::get_block_header_batch( - const vector block_nums) const + const vector& block_nums) const { return my->get_block_header_batch( block_nums ); } map> database_api_impl::get_block_header_batch( - const vector block_nums) const + const vector& block_nums) const { map> results; for (const uint32_t block_num : block_nums) diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 3d5984c83f..153ef514e9 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -51,7 +51,7 @@ class database_api_impl : public std::enable_shared_from_this // Blocks and transactions optional get_block_header(uint32_t block_num)const; - map> get_block_header_batch(const vector block_nums)const; + map> get_block_header_batch(const vector& block_nums)const; optional get_block(uint32_t block_num)const; processed_transaction get_transaction( uint32_t block_num, uint32_t trx_in_block )const; diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 9cee4212bf..b76baff87d 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -164,7 +164,7 @@ class database_api * @param block_nums vector containing heights of the blocks whose headers should be returned * @return array of headers of the referenced blocks, or null if no matching block was found */ - map> get_block_header_batch(const vector block_nums)const; + map> get_block_header_batch(const vector& block_nums)const; /** * @brief Retrieve a full, signed block From b1455989d86acfdadf87972366e12388aec8eeb6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 18:24:41 +0000 Subject: [PATCH 252/338] Handle block_time and is_virtual fields in plugins --- libraries/plugins/account_history/account_history_plugin.cpp | 2 ++ libraries/plugins/elasticsearch/elasticsearch_plugin.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 8270af3f31..c391724802 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -108,6 +108,8 @@ void account_history_plugin_impl::update_account_histories( const signed_block& h.trx_in_block = o_op->trx_in_block; h.op_in_trx = o_op->op_in_trx; h.virtual_op = o_op->virtual_op; + h.is_virtual = o_op->is_virtual; + h.block_time = o_op->block_time; } } ) ); }; diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 298e459a34..c7ed9eec90 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -148,6 +148,8 @@ void elasticsearch_plugin_impl::update_account_histories( const signed_block& b h.trx_in_block = o_op->trx_in_block; h.op_in_trx = o_op->op_in_trx; h.virtual_op = o_op->virtual_op; + h.is_virtual = o_op->is_virtual; + h.block_time = o_op->block_time; } })); }; From d07cb031dace12875f827abaab1bc78f615839a2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 16 Sep 2022 18:25:29 +0000 Subject: [PATCH 253/338] Add tests for block_time and is_virtual fields --- tests/tests/history_api_tests.cpp | 34 +++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 7b0a98d8b4..f448aaf6d2 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -61,6 +61,8 @@ BOOST_AUTO_TEST_CASE(get_account_history) { BOOST_CHECK_EQUAL(histories.size(), 3u); BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); BOOST_CHECK_EQUAL(histories[2].op.which(), asset_create_op_id); + BOOST_CHECK( histories[2].block_time == db.head_block_time() ); + BOOST_CHECK( !histories[2].is_virtual ); // 1 account_create op larger than id1 histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), @@ -100,6 +102,38 @@ BOOST_AUTO_TEST_CASE(get_account_history) { } } +BOOST_AUTO_TEST_CASE(get_account_history_virtual_operation_test) { + try { + graphene::app::history_api hist_api(app); + + asset_id_type usd_id = create_user_issued_asset("USD").id; + + ACTORS( (dan)(bob) ); + fund( dan, asset(100) ); + issue_uia( bob_id, asset(100, usd_id) ); + + create_sell_order( dan_id, asset(100), asset(100, usd_id) ); + create_sell_order( bob_id, asset(100, usd_id), asset(100) ); + + generate_block(); + fc::usleep(fc::milliseconds(200)); + + auto fill_order_op_id = operation::tag::value; + + vector histories = hist_api.get_account_history("dan", operation_history_id_type(), + 100, operation_history_id_type()); + + BOOST_REQUIRE_GT( histories.size(), 0 ); + BOOST_CHECK_EQUAL( histories.front().op.which(), fill_order_op_id ); + BOOST_CHECK( histories.front().block_time == db.head_block_time() ); + BOOST_CHECK( histories.front().is_virtual ); + + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_CASE(get_account_history_notify_all_on_creation) { try { // Pass hard fork time From 8084f2f0ff389cabb7b06784925edc27925d6d5d Mon Sep 17 00:00:00 2001 From: Abit Date: Fri, 16 Sep 2022 22:06:18 +0200 Subject: [PATCH 254/338] Show operation history IDs in CLI wallet --- libraries/wallet/wallet_results.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libraries/wallet/wallet_results.cpp b/libraries/wallet/wallet_results.cpp index 3f881d4138..8bdefff6b7 100644 --- a/libraries/wallet/wallet_results.cpp +++ b/libraries/wallet/wallet_results.cpp @@ -53,6 +53,7 @@ std::map> wallet_a FC_ASSERT(b); ss << i.block_num << " "; ss << b->timestamp.to_iso_string() << " "; + ss << string(i.id) << " "; i.op.visit(operation_printer(ss, *this, i)); ss << " \n"; } @@ -74,6 +75,7 @@ std::map> wallet_a FC_ASSERT(b); ss << i.block_num << " "; ss << b->timestamp.to_iso_string() << " "; + ss << string(i.id) << " "; i.op.visit(operation_printer(ss, *this, i)); ss << " transaction_id : "; ss << d.transaction_id.str(); From e19064dbf07116fbdb12f0b582ca5f287108dfdb Mon Sep 17 00:00:00 2001 From: ioBanker <37595908+ioBanker@users.noreply.github.com> Date: Wed, 21 Sep 2022 00:19:14 +0300 Subject: [PATCH 255/338] Fine tuning README --- README.md | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index a582191ff9..92b8ef71e4 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ BitShares Core ============== -[BitShares Core](https://github.com/bitshares/bitshares-core) is the BitShares blockchain implementation and command-line interface. -The web browser based wallet is [BitShares UI](https://github.com/bitshares/bitshares-ui). +[BitShares Core](https://github.com/bitshares/bitshares-core) is the BitShares blockchain node software and command-line wallet software. +For UI reference wallet software visit [BitShares UI](https://github.com/bitshares/bitshares-ui). Visit [BitShares.org](https://bitshares.org/) to learn about BitShares and join the community at [BitSharesTalk.org](https://bitsharestalk.org/). @@ -25,23 +25,26 @@ Visit [Awesome BitShares](https://github.com/bitshares/awesome-bitshares) to fin |`testnet`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Docker/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A%22Docker%22+branch%3Atestnet)| |`master` of `bitshares-fc`|[![](https://github.com/bitshares/bitshares-fc/workflows/macOS/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"macOS"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-fc/workflows/Ubuntu%20Debug/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-fc/workflows/Ubuntu%20Release/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"Ubuntu+Release"+branch%3Amaster)| + Getting Started --------------- + Build instructions and additional documentation are available in the [Wiki](https://github.com/bitshares/bitshares-core/wiki). Prebuilt binaries can be found in the [releases page](https://github.com/bitshares/bitshares-core/releases) for download. + ### Build We recommend building on Ubuntu 20.04 LTS (64-bit) -**Build Dependencies:** +**OS Dependencies:** sudo apt-get update sudo apt-get install autoconf cmake make automake libtool git libboost-all-dev libssl-dev g++ libcurl4-openssl-dev doxygen -**Build Script:** +**Build Node:** git clone https://github.com/bitshares/bitshares-core.git cd bitshares-core @@ -52,8 +55,9 @@ We recommend building on Ubuntu 20.04 LTS (64-bit) cmake -DCMAKE_BUILD_TYPE=Release .. make -**Upgrade Script:** (prepend to the Build Script above if you built a prior release): +**Upgrade Node:** + cd bitshares-core git remote set-url origin https://github.com/bitshares/bitshares-core.git git checkout master git remote set-head origin --auto @@ -61,6 +65,8 @@ We recommend building on Ubuntu 20.04 LTS (64-bit) git submodule update --init --recursive # this command may fail git submodule sync --recursive git submodule update --init --recursive + cmake -DCMAKE_BUILD_TYPE=Release .. + make **NOTE:** @@ -84,17 +90,19 @@ manually build your preferred version and use it with BitShares by specifying it Example: `cmake -DOPENSSL_ROOT_DIR=/path/to/openssl ..` -### Run the node software +### Run + +**Run Node Software:** -**After Building**, the node software `witness_node` can be launched with: +Stay on `bitshares-core` directory before you run the below `witness_node` command ./programs/witness_node/witness_node -The node will automatically create a `witness_node_data_dir` directory with some config files. -The blockchain data will be stored in the directory too. -It may take several hours to fully synchronize the blockchain. +The node run will automatically create the `witness_node_data_dir` sub-directory along with node config files then start synchronizing with blockchain. +It may take (usually several hours) to fully download the blockchain data. +The blockchain data will be stored under the directory `witness_node_data_dir`. -You can exit the node using `Ctrl+C`. Please be aware that the node may need some time (usually a few minutes) to exit cleanly, please be patient. +You can stop the node run using `Ctrl+C`. Please note that stopping the node run may take (usually few minutes) to exit cleanly after stopping the node run using `Ctrl+C`. **IMPORTANT:** By default the node will start in reduced memory mode by using some of the commands detailed in [Memory reduction for nodes](https://github.com/bitshares/bitshares-core/wiki/Memory-reduction-for-nodes). In order to run a full node with all the account histories (which is usually not necessary) you need to remove `partial-operations` and `max-ops-per-account` from your config file. Please note that currently(2018-10-17) a full node will need more than 160GB of RAM to operate and required memory is growing fast. Consider the following table as **minimal requirements** before running a node: @@ -118,9 +126,9 @@ You can run the program with `--help` parameter to see more info: ./programs/witness_node/witness_node --help -### Run the command-line wallet software +### Command-line Wallet Software -To start the command-line wallet, in a separate terminal you can run: +Stay on `bitshares-core` directory before you run the below `cli_wallet` command ./programs/cli_wallet/cli_wallet @@ -200,7 +208,7 @@ BitShares UI bugs should be reported to the [UI issue tracker](https://github.co Up to date online Doxygen documentation can be found at [Doxygen.BitShares.org](https://doxygen.bitshares.org/hierarchy.html). -Using the API +Built-In APIs ------------- ### Node API @@ -379,7 +387,9 @@ FAQ connecting to. Therefore the API to add p2p connections needs to be set up with proper access controls. + License ------- + BitShares Core is under the MIT license. See [LICENSE](https://github.com/bitshares/bitshares-core/blob/master/LICENSE.txt) for more information. From 2d0f875c5b27bfdd2d7378e1423defb7b6734ec0 Mon Sep 17 00:00:00 2001 From: ioBanker <37595908+ioBanker@users.noreply.github.com> Date: Wed, 21 Sep 2022 17:12:38 +0300 Subject: [PATCH 256/338] Update README.md --- README.md | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 92b8ef71e4..5244f3e1cb 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ BitShares Core ============== [BitShares Core](https://github.com/bitshares/bitshares-core) is the BitShares blockchain node software and command-line wallet software. -For UI reference wallet software visit [BitShares UI](https://github.com/bitshares/bitshares-ui). +For UI reference wallet software (browser-based wallet and desktop wallet) visit [BitShares UI](https://github.com/bitshares/bitshares-ui). Visit [BitShares.org](https://bitshares.org/) to learn about BitShares and join the community at [BitSharesTalk.org](https://bitsharestalk.org/). @@ -12,7 +12,7 @@ Visit [Awesome BitShares](https://github.com/bitshares/awesome-bitshares) to fin * [Getting Started](#getting-started) * [Support](#support) -* [Using the API](#using-the-api) +* [Using Built-In APIs](#using-built-in-apis) * [Accessing restrictable node API sets](#accessing-restrictable-node-api-sets) * [FAQ](#faq) * [License](#license) @@ -39,12 +39,12 @@ Prebuilt binaries can be found in the [releases page](https://github.com/bitshar We recommend building on Ubuntu 20.04 LTS (64-bit) -**OS Dependencies:** +**Install OS Dependencies:** sudo apt-get update sudo apt-get install autoconf cmake make automake libtool git libboost-all-dev libssl-dev g++ libcurl4-openssl-dev doxygen -**Build Node:** +**Build Node And Command-Line Wallet:** git clone https://github.com/bitshares/bitshares-core.git cd bitshares-core @@ -55,7 +55,7 @@ We recommend building on Ubuntu 20.04 LTS (64-bit) cmake -DCMAKE_BUILD_TYPE=Release .. make -**Upgrade Node:** +**Upgrade Node And Command-Line Wallet:** cd bitshares-core git remote set-url origin https://github.com/bitshares/bitshares-core.git @@ -65,6 +65,8 @@ We recommend building on Ubuntu 20.04 LTS (64-bit) git submodule update --init --recursive # this command may fail git submodule sync --recursive git submodule update --init --recursive + mkdir build + cd build cmake -DCMAKE_BUILD_TYPE=Release .. make @@ -94,15 +96,19 @@ manually build your preferred version and use it with BitShares by specifying it **Run Node Software:** -Stay on `bitshares-core` directory before you run the below `witness_node` command +Stay on `bitshares-core/build` directory before you run the below `witness_node` command ./programs/witness_node/witness_node -The node run will automatically create the `witness_node_data_dir` sub-directory along with node config files then start synchronizing with blockchain. -It may take (usually several hours) to fully download the blockchain data. +Under `build` directory the node run will automatically create the directory `witness_node_data_dir` along with config files underneath then start synchronizing the blockchain. +It may take (usually several hours) to fully synchronize the blockchain data. The blockchain data will be stored under the directory `witness_node_data_dir`. -You can stop the node run using `Ctrl+C`. Please note that stopping the node run may take (usually few minutes) to exit cleanly after stopping the node run using `Ctrl+C`. +**Stop Node Software:** + +For stopping the node run cleanly; you will need to access the node run terminal then press on `Ctrl+C` then wait for the run to stop, please note that it may take (usually few minutes) to exit the run. +It's recommended to use linux command `screen`(https://help.ubuntu.com/community/Screen) to inisiate the node run so you can go back to the node run screen to stop the run. + **IMPORTANT:** By default the node will start in reduced memory mode by using some of the commands detailed in [Memory reduction for nodes](https://github.com/bitshares/bitshares-core/wiki/Memory-reduction-for-nodes). In order to run a full node with all the account histories (which is usually not necessary) you need to remove `partial-operations` and `max-ops-per-account` from your config file. Please note that currently(2018-10-17) a full node will need more than 160GB of RAM to operate and required memory is growing fast. Consider the following table as **minimal requirements** before running a node: @@ -208,7 +214,7 @@ BitShares UI bugs should be reported to the [UI issue tracker](https://github.co Up to date online Doxygen documentation can be found at [Doxygen.BitShares.org](https://doxygen.bitshares.org/hierarchy.html). -Built-In APIs +Using Built-In APIs ------------- ### Node API From bb59fab98a56fa2da55f47d48ef764fe7b351594 Mon Sep 17 00:00:00 2001 From: ioBanker <37595908+ioBanker@users.noreply.github.com> Date: Wed, 21 Sep 2022 17:16:24 +0300 Subject: [PATCH 257/338] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5244f3e1cb..49e3f63951 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ The blockchain data will be stored under the directory `witness_node_data_dir`. **Stop Node Software:** For stopping the node run cleanly; you will need to access the node run terminal then press on `Ctrl+C` then wait for the run to stop, please note that it may take (usually few minutes) to exit the run. -It's recommended to use linux command `screen`(https://help.ubuntu.com/community/Screen) to inisiate the node run so you can go back to the node run screen to stop the run. +It's recommended to use linux command `screen`(https://help.ubuntu.com/community/Screen) to inisiate the node run so you can go back to the node run screen to stop it. **IMPORTANT:** By default the node will start in reduced memory mode by using some of the commands detailed in [Memory reduction for nodes](https://github.com/bitshares/bitshares-core/wiki/Memory-reduction-for-nodes). From 598553404d387bfe7f9ea5cfd8aeb0109484eada Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 30 Sep 2022 15:57:17 +0000 Subject: [PATCH 258/338] Add option to keep operations in the last X blocks --- libraries/app/config_util.cpp | 2 +- .../account_history_plugin.cpp | 173 ++++++++++++------ 2 files changed, 115 insertions(+), 60 deletions(-) diff --git a/libraries/app/config_util.cpp b/libraries/app/config_util.cpp index da3e82db0f..c42571c33e 100644 --- a/libraries/app/config_util.cpp +++ b/libraries/app/config_util.cpp @@ -257,7 +257,7 @@ static void create_new_config_file(const fc::path& config_ini_path, const fc::pa if( name == "partial-operations" ) return new_option_description(name, bpo::value()->default_value(true), o->description() ); if( name == "max-ops-per-account" ) - return new_option_description(name, bpo::value()->default_value(100), o->description() ); + return new_option_description(name, bpo::value()->default_value(100), o->description() ); return o; }; graphene::app::detail::deduplicator dedup(modify_option_defaults); diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index c391724802..8779f64d63 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -35,6 +35,8 @@ #include #include +#include + #include namespace graphene { namespace account_history { @@ -50,6 +52,7 @@ class account_history_plugin_impl : _self( _plugin ) { } + private: /** this method is called as a callback after a block is applied * and will process/index all operations that were applied in the block. */ @@ -62,7 +65,6 @@ class account_history_plugin_impl friend class graphene::account_history::account_history_plugin; - private: account_history_plugin& _self; flat_set _tracked_accounts; flat_set _extended_history_accounts; @@ -71,10 +73,19 @@ class account_history_plugin_impl primary_index< operation_history_index >* _oho_index; uint64_t _max_ops_per_account = -1; uint64_t _extended_max_ops_per_account = -1; + uint32_t _min_blocks_to_keep = 30000; + uint64_t _max_ops_per_acc_by_min_blocks = 1000; + + uint64_t get_max_ops_to_keep( const account_id_type& account_id ); - /** add one history record, then check and remove the earliest history record */ - void add_account_history( const account_id_type account_id, const operation_history_id_type op_id ); + /** add one history record, then check and remove the earliest history record(s) */ + void add_account_history( const account_id_type& account_id, const operation_history_object& op ); + void remove_old_histories( const account_statistics_object& stats_obj, + const account_history_object& latest_aho, + const operation_history_object& latest_op ); + + void init_program_options(const boost::program_options::variables_map& options); }; void account_history_plugin_impl::update_account_histories( const signed_block& b ) @@ -182,7 +193,7 @@ void account_history_plugin_impl::update_account_histories( const signed_block& // that indexing now happens in observers' post_evaluate() // add history - add_account_history( account_id, oho->id ); + add_account_history( account_id, *oho ); } } } @@ -202,7 +213,7 @@ void account_history_plugin_impl::update_account_histories( const signed_block& { if (!oho.valid()) { oho = create_oho(); } // add history - add_account_history( account_id, oho->id ); + add_account_history( account_id, *oho ); } } } @@ -212,72 +223,100 @@ void account_history_plugin_impl::update_account_histories( const signed_block& } } -void account_history_plugin_impl::add_account_history( const account_id_type account_id, - const operation_history_id_type op_id ) +void account_history_plugin_impl::add_account_history( const account_id_type& account_id, + const operation_history_object& op ) { graphene::chain::database& db = database(); - const auto& stats_obj = account_id(db).statistics(db); + const auto& stats_obj = db.get_account_stats_by_owner( account_id ); // add new entry - const auto& ath = db.create( [&]( account_history_object& obj ){ - obj.operation_id = op_id; + const auto& aho = db.create( [&account_id,&op,&stats_obj](account_history_object& obj){ + obj.operation_id = op.id; obj.account = account_id; obj.sequence = stats_obj.total_ops + 1; obj.next = stats_obj.most_recent_op; }); - db.modify( stats_obj, [&]( account_statistics_object& obj ){ - obj.most_recent_op = ath.id; - obj.total_ops = ath.sequence; + db.modify( stats_obj, [&aho]( account_statistics_object& obj ){ + obj.most_recent_op = aho.id; + obj.total_ops = aho.sequence; }); + // Remove the earliest account history entries if too many. + remove_old_histories( stats_obj, aho, op ); +} + +uint64_t account_history_plugin_impl::get_max_ops_to_keep( const account_id_type& account_id ) +{ + const graphene::chain::database& db = database(); // Amount of history to keep depends on if account is in the "extended history" list bool extended_hist = ( _extended_history_accounts.find( account_id ) != _extended_history_accounts.end() ); - if( !extended_hist && !_extended_history_registrars.empty() ) { - const account_id_type registrar_id = account_id(db).registrar; + if( !extended_hist && !_extended_history_registrars.empty() ) + { + const account_id_type& registrar_id = account_id(db).registrar; extended_hist = ( _extended_history_registrars.find( registrar_id ) != _extended_history_registrars.end() ); } // _max_ops_per_account is guaranteed to be non-zero outside; max_ops_to_keep // will likewise be non-zero, and also non-negative (it is unsigned). auto max_ops_to_keep = _max_ops_per_account; - if (extended_hist && _extended_max_ops_per_account > max_ops_to_keep) { + if( extended_hist && _extended_max_ops_per_account > max_ops_to_keep ) + { max_ops_to_keep = _extended_max_ops_per_account; } - // Remove the earliest account history entry if too many. - if( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) + if( 0 == max_ops_to_keep ) + return 1; + return max_ops_to_keep; +} + +// Remove the earliest account history entries if too many. +void account_history_plugin_impl::remove_old_histories( const account_statistics_object& stats_obj, + const account_history_object& latest_aho, + const operation_history_object& latest_op ) +{ + graphene::chain::database& db = database(); + const account_id_type& account_id = latest_aho.account; + auto max_ops_to_keep = get_max_ops_to_keep( account_id ); + + while( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) { // look for the earliest entry const auto& his_idx = db.get_index_type(); const auto& by_seq_idx = his_idx.indices().get(); - auto itr = by_seq_idx.lower_bound( boost::make_tuple( account_id, 0 ) ); + auto itr = by_seq_idx.lower_bound( account_id ); // make sure don't remove the one just added - if( itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath.id ) + if( itr == by_seq_idx.end() || itr->account != account_id || itr->id == latest_aho.id ) + return; + + // if found, check whether to remove + const auto remove_op_id = itr->operation_id; + const auto& remove_op = remove_op_id(db); + if( remove_op.block_num + _min_blocks_to_keep > latest_op.block_num + && stats_obj.total_ops - stats_obj.removed_ops <= _max_ops_per_acc_by_min_blocks ) + return; + + // remove the entry, and adjust account stats object + const auto itr_remove = itr; + ++itr; + db.remove( *itr_remove ); + db.modify( stats_obj, [&]( account_statistics_object& obj ){ + obj.removed_ops = obj.removed_ops + 1; + }); + // modify previous node's next pointer + // this should be always true, but just have a check here + if( itr != by_seq_idx.end() && itr->account == account_id ) { - // if found, remove the entry, and adjust account stats object - const auto remove_op_id = itr->operation_id; - const auto itr_remove = itr; - ++itr; - db.remove( *itr_remove ); - db.modify( stats_obj, [&]( account_statistics_object& obj ){ - obj.removed_ops = obj.removed_ops + 1; + db.modify( *itr, [&]( account_history_object& obj ){ + obj.next = account_history_id_type(); }); - // modify previous node's next pointer - // this should be always true, but just have a check here - if( itr != by_seq_idx.end() && itr->account == account_id ) - { - db.modify( *itr, [&]( account_history_object& obj ){ - obj.next = account_history_id_type(); - }); - } - // else need to modify the head pointer, but it shouldn't be true + } + // else need to modify the head pointer, but it shouldn't be true - // remove the operation history entry (1.11.x) if configured and no reference left - if( _partial_operations ) + // remove the operation history entry (1.11.x) if configured and no reference left + if( _partial_operations ) + { + // check for references + const auto& by_opid_idx = his_idx.indices().get(); + if( by_opid_idx.find( remove_op_id ) == by_opid_idx.end() ) { - // check for references - const auto& by_opid_idx = his_idx.indices().get(); - if( by_opid_idx.find( remove_op_id ) == by_opid_idx.end() ) - { - // if no reference, remove - db.remove( remove_op_id(db) ); - } + // if no reference, remove + db.remove( remove_op ); } } } @@ -315,7 +354,8 @@ void account_history_plugin::plugin_set_program_options( ("partial-operations", boost::program_options::value(), "Keep only those operations in memory that are related to account history tracking") ("max-ops-per-account", boost::program_options::value(), - "Maximum number of operations per account that will be kept in memory") + "Maximum number of operations per account that will be kept in memory. " + "Note that the actual number may be higher due to the min-blocks-to-keep option.") ("extended-max-ops-per-account", boost::program_options::value(), "Maximum number of operations to keep for accounts for which extended history is kept") ("extended-history-by-account", @@ -324,31 +364,46 @@ void account_history_plugin::plugin_set_program_options( ("extended-history-by-registrar", boost::program_options::value>()->composing()->multitoken(), "Track longer history for accounts with this registrar (may specify multiple times)") + ("min-blocks-to-keep", boost::program_options::value(), + "Operations which are in the latest X blocks will be kept in memory. " + "Note that this may exceed the limit defined by the max-ops-per-account option, " + "but will be limited by the max-ops-per-acc-by-min-blocks option.") + ("max-ops-per-acc-by-min-blocks", boost::program_options::value(), + "A potential higher limit on the maximum number of operations per account to be kept in memory " + "when the min-blocks-to-keep option exceeds the limit defined by the max-ops-per-account option. " + "If this is less than max-ops-per-account, max-ops-per-account will be used.") ; cfg.add(cli); } void account_history_plugin::plugin_initialize(const boost::program_options::variables_map& options) { + my->init_program_options( options ); + database().applied_block.connect( [&]( const signed_block& b){ my->update_account_histories(b); } ); my->_oho_index = database().add_index< primary_index< operation_history_index > >(); database().add_index< primary_index< account_history_index > >(); +} - LOAD_VALUE_SET(options, "track-account", my->_tracked_accounts, graphene::chain::account_id_type); - if (options.count("partial-operations") > 0) { - my->_partial_operations = options["partial-operations"].as(); - } - if (options.count("max-ops-per-account") > 0) { - my->_max_ops_per_account = options["max-ops-per-account"].as(); - } - if (options.count("extended-max-ops-per-account") > 0) { - auto emopa = options["extended-max-ops-per-account"].as(); - my->_extended_max_ops_per_account = (emopa > my->_max_ops_per_account) ? emopa : my->_max_ops_per_account; - } - LOAD_VALUE_SET(options, "extended-history-by-account", my->_extended_history_accounts, +void detail::account_history_plugin_impl::init_program_options(const boost::program_options::variables_map& options) +{ + LOAD_VALUE_SET(options, "track-account", _tracked_accounts, graphene::chain::account_id_type); + + utilities::get_program_option( options, "partial-operations", _partial_operations ); + utilities::get_program_option( options, "max-ops-per-account", _max_ops_per_account ); + utilities::get_program_option( options, "extended-max-ops-per-account", _extended_max_ops_per_account ); + if( _extended_max_ops_per_account < _max_ops_per_account ) + _extended_max_ops_per_account = _max_ops_per_account; + + LOAD_VALUE_SET(options, "extended-history-by-account", _extended_history_accounts, graphene::chain::account_id_type); - LOAD_VALUE_SET(options, "extended-history-by-registrar", my->_extended_history_registrars, + LOAD_VALUE_SET(options, "extended-history-by-registrar", _extended_history_registrars, graphene::chain::account_id_type); + + utilities::get_program_option( options, "min-blocks-to-keep", _min_blocks_to_keep ); + utilities::get_program_option( options, "max-ops-per-acc-by-min-blocks", _max_ops_per_acc_by_min_blocks ); + if( _max_ops_per_acc_by_min_blocks < _max_ops_per_account ) + _max_ops_per_acc_by_min_blocks = _max_ops_per_account; } void account_history_plugin::plugin_startup() From 54733bf25d23101bd3e2f9cffbecb5ead7405564 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 30 Sep 2022 16:42:18 +0000 Subject: [PATCH 259/338] Wrap long lines, update indents --- tests/tests/history_api_tests.cpp | 137 ++++++++++++++++++------------ 1 file changed, 83 insertions(+), 54 deletions(-) diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index f448aaf6d2..356b543cc3 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -202,7 +202,8 @@ BOOST_AUTO_TEST_CASE(get_account_history_additional) { // account_id_type() and dan share operation id 1(account create) - share can be also in id 0 // no history at all in the chain - vector histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 4, operation_history_id_type(0)); + vector histories = + hist_api.get_account_history("1.2.0", operation_history_id_type(0), 4, operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 0u); create_bitasset("USD", account_id_type()); // create op 0 @@ -281,43 +282,51 @@ BOOST_AUTO_TEST_CASE(get_account_history_additional) { BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); // f(A, 1, 5, 9) = { 5, 3 } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(9)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(9)); BOOST_CHECK_EQUAL(histories.size(), 2u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); // f(A, 1, 5, 6) = { 5, 3 } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(6)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(6)); BOOST_CHECK_EQUAL(histories.size(), 2u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); // f(A, 1, 5, 5) = { 5, 3 } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(5)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(5)); BOOST_CHECK_EQUAL(histories.size(), 2u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); // f(A, 1, 5, 4) = { 3 } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(4)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(4)); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); // f(A, 1, 5, 3) = { 3 } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(3)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(3)); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); // f(A, 1, 5, 2) = { } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(2)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(2)); BOOST_CHECK_EQUAL(histories.size(), 0u); // f(A, 1, 5, 1) = { } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(1)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(1)); BOOST_CHECK_EQUAL(histories.size(), 0u); // f(A, 1, 5, 0) = { 5, 3 } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(0)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 2u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); @@ -474,11 +483,13 @@ BOOST_AUTO_TEST_CASE(get_account_history_additional) { // 0 limits histories = hist_api.get_account_history("dan", operation_history_id_type(0), 0, operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 0u); - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(3), 0, operation_history_id_type(9)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(3), 0, + operation_history_id_type(9)); BOOST_CHECK_EQUAL(histories.size(), 0u); // non existent account - histories = hist_api.get_account_history("1.2.18", operation_history_id_type(0), 4, operation_history_id_type(0)); + histories = hist_api.get_account_history("1.2.18", operation_history_id_type(0), 4, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 0u); // create a new account C = alice { 7 } @@ -487,16 +498,19 @@ BOOST_AUTO_TEST_CASE(get_account_history_additional) { generate_block(); // f(C, 0, 4, 10) = { 7 } - histories = hist_api.get_account_history("alice", operation_history_id_type(0), 4, operation_history_id_type(10)); + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 4, + operation_history_id_type(10)); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u); // f(C, 8, 4, 10) = { } - histories = hist_api.get_account_history("alice", operation_history_id_type(8), 4, operation_history_id_type(10)); + histories = hist_api.get_account_history("alice", operation_history_id_type(8), 4, + operation_history_id_type(10)); BOOST_CHECK_EQUAL(histories.size(), 0u); // f(A, 0, 10, 0) = { 7, 5, 3, 1, 0 } - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 5u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u); BOOST_CHECK_EQUAL(histories[1].id.instance(), 5u); @@ -534,19 +548,25 @@ BOOST_AUTO_TEST_CASE(track_account) { generate_block(); // anything against account_id_type() should be {} - vector histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); + vector histories = + hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 0u); - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 10, operation_history_id_type(0)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 10, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 0u); - histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 1, operation_history_id_type(2)); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 1, + operation_history_id_type(2)); BOOST_CHECK_EQUAL(histories.size(), 0u); // anything against alice should be {} - histories = hist_api.get_account_history("alice", operation_history_id_type(0), 10, operation_history_id_type(0)); + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 10, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 0u); - histories = hist_api.get_account_history("alice", operation_history_id_type(1), 10, operation_history_id_type(0)); + histories = hist_api.get_account_history("alice", operation_history_id_type(1), 10, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 0u); - histories = hist_api.get_account_history("alice", operation_history_id_type(1), 1, operation_history_id_type(2)); + histories = hist_api.get_account_history("alice", operation_history_id_type(1), 1, + operation_history_id_type(2)); BOOST_CHECK_EQUAL(histories.size(), 0u); // dan should have history @@ -608,7 +628,8 @@ BOOST_AUTO_TEST_CASE(track_account2) { generate_block(); // all account_id_type() should have 4 ops {4,2,1,0} - vector histories = hist_api.get_account_history("committee-account", operation_history_id_type(0), 10, operation_history_id_type(0)); + vector histories = hist_api.get_account_history("committee-account", + operation_history_id_type(0), 10, operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 4u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u); @@ -616,18 +637,21 @@ BOOST_AUTO_TEST_CASE(track_account2) { BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); // all alice account should have 2 ops {3, 0} - histories = hist_api.get_account_history("alice", operation_history_id_type(0), 10, operation_history_id_type(0)); + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 10, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 2u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); // alice first op should be {0} - histories = hist_api.get_account_history("alice", operation_history_id_type(0), 1, operation_history_id_type(1)); + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 1, + operation_history_id_type(1)); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); // alice second op should be {3} - histories = hist_api.get_account_history("alice", operation_history_id_type(1), 1, operation_history_id_type(0)); + histories = hist_api.get_account_history("alice", operation_history_id_type(1), 1, + operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); @@ -699,7 +723,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_operations) { generate_block(); // history is set to limit transactions to 75 (see database_fixture.hpp) - // so asking for more should only return 75 (and not throw exception, + // so asking for more should only return 75 (and not throw exception, // see https://github.com/bitshares/bitshares-core/issues/1490 histories = hist_api.get_account_history_operations( "committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 100); @@ -715,7 +739,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_operations) { } //new test case for increasing the limit based on the config file BOOST_AUTO_TEST_CASE(api_limit_get_account_history_operations) { - try { + try { graphene::app::history_api hist_api(app); //account_id_type() do 3 ops create_bitasset("CNY", account_id_type()); @@ -730,38 +754,38 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history_operations) { //account_id_type() did 1 asset_create op vector histories = hist_api.get_account_history_operations( - "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); + "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); BOOST_CHECK_EQUAL(histories[0].op.which(), asset_create_op_id); //account_id_type() did 2 account_create ops histories = hist_api.get_account_history_operations( - "committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); + "committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); BOOST_CHECK_EQUAL(histories.size(), 2u); BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); // No asset_create op larger than id1 histories = hist_api.get_account_history_operations( - "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(1), 200); + "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(1), 200); BOOST_CHECK_EQUAL(histories.size(), 0u); // Limit 1 returns 1 result histories = hist_api.get_account_history_operations( - "committee-account", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 1); + "committee-account", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 1); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); // alice has 1 op histories = hist_api.get_account_history_operations( - "alice", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 200); + "alice", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 200); BOOST_CHECK_EQUAL(histories.size(), 1u); BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); // create a bunch of accounts for(int i = 0; i < 126; ++i) { - std::string acct_name = "mytempacct" + std::to_string(i); + std::string acct_name = "mytempacct" + std::to_string(i); create_account(acct_name); } generate_block(); @@ -769,19 +793,22 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history_operations) { // history is set to limit transactions to 125 (see database_fixture.hpp) // so asking for more should only return 125 (and not throw exception, // see https://github.com/bitshares/bitshares-core/issues/1490 - GRAPHENE_CHECK_THROW(hist_api.get_account_history_operations("commitee-account", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 301), fc::exception); - histories = hist_api.get_account_history_operations("committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); + GRAPHENE_CHECK_THROW( hist_api.get_account_history_operations("commitee-account", account_create_op_id, + operation_history_id_type(),operation_history_id_type(), 301), + fc::exception ); + histories = hist_api.get_account_history_operations("committee-account", account_create_op_id, + operation_history_id_type(), operation_history_id_type(), 200); BOOST_REQUIRE_EQUAL( histories.size(), 125u ); - } - catch (fc::exception &e) - { - edump((e.to_detail_string())); + } + catch (fc::exception &e) + { + edump((e.to_detail_string())); throw; - } + } } BOOST_AUTO_TEST_CASE(api_limit_get_account_history) { - try{ + try{ graphene::app::history_api hist_api(app); //account_id_type() do 3 ops create_bitasset("USD", account_id_type()); @@ -794,7 +821,8 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history) { int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; //account_id_type() did 3 ops and includes id0 - vector histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 210, operation_history_id_type()); + vector histories = + hist_api.get_account_history("1.2.0", operation_history_id_type(), 210, operation_history_id_type()); BOOST_CHECK_EQUAL(histories.size(), 3u); BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); @@ -820,21 +848,23 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history) { // create a bunch of accounts for(int i = 0; i < 126; ++i) { - std::string acct_name = "mytempacct" + std::to_string(i); - create_account(acct_name); + std::string acct_name = "mytempacct" + std::to_string(i); + create_account(acct_name); } generate_block(); - GRAPHENE_CHECK_THROW(hist_api.get_account_history("1.2.0", operation_history_id_type(), 260, operation_history_id_type()), fc::exception); + GRAPHENE_CHECK_THROW( hist_api.get_account_history("1.2.0", operation_history_id_type(), 260, + operation_history_id_type()), + fc::exception ); histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 210, operation_history_id_type()); BOOST_REQUIRE_EQUAL( histories.size(), 125u ); - } catch (fc::exception &e) { + } catch (fc::exception &e) { edump((e.to_detail_string())); throw; - } + } } BOOST_AUTO_TEST_CASE(api_limit_get_relative_account_history) { - try{ + try{ graphene::app::history_api hist_api(app); //account_id_type() do 3 ops create_bitasset("USD", account_id_type()); @@ -848,14 +878,14 @@ BOOST_AUTO_TEST_CASE(api_limit_get_relative_account_history) { vector histories = hist_api.get_relative_account_history("1.2.0", 126, 210, 0); BOOST_REQUIRE_EQUAL( histories.size(), 0u ); - } catch (fc::exception &e) { + } catch (fc::exception &e) { edump((e.to_detail_string())); throw; - } + } } BOOST_AUTO_TEST_CASE(api_limit_get_account_history_by_operations) { - try { + try { graphene::app::history_api hist_api(app); flat_set operation_types; //account_id_type() do 3 ops @@ -867,12 +897,11 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history_by_operations) { GRAPHENE_CHECK_THROW(hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 260), fc::exception); auto histories = hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 210); BOOST_REQUIRE_EQUAL( histories.total_count, 3u ); - } - catch (fc::exception &e) { + } + catch (fc::exception &e) { edump((e.to_detail_string())); throw; - } + } } - BOOST_AUTO_TEST_SUITE_END() From 72285166430caf8b95cb6d51b498aa128827fcde Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 30 Sep 2022 16:57:45 +0000 Subject: [PATCH 260/338] Update tests for the new min-blocks-to-keep option --- tests/common/database_fixture.cpp | 4 ++++ tests/tests/history_api_tests.cpp | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 4cf9d749bb..e7df360c9f 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -210,15 +210,18 @@ std::shared_ptr database_fixture_base::in if (fixture.current_test_name == "get_account_history_operations") { fc::set_option( options, "max-ops-per-account", (uint64_t)75 ); + fc::set_option( options, "min-blocks-to-keep", (uint32_t)0 ); } if (fixture.current_test_name == "api_limit_get_account_history_operations") { fc::set_option( options, "max-ops-per-account", (uint64_t)125 ); + fc::set_option( options, "min-blocks-to-keep", (uint32_t)0 ); fc::set_option( options, "api-limit-get-account-history-operations", (uint32_t)300 ); } if(fixture.current_test_name =="api_limit_get_account_history") { fc::set_option( options, "max-ops-per-account", (uint64_t)125 ); + fc::set_option( options, "min-blocks-to-keep", (uint32_t)0 ); fc::set_option( options, "api-limit-get-account-history", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_grouped_limit_orders") @@ -228,6 +231,7 @@ std::shared_ptr database_fixture_base::in if(fixture.current_test_name =="api_limit_get_relative_account_history") { fc::set_option( options, "max-ops-per-account", (uint64_t)125 ); + fc::set_option( options, "min-blocks-to-keep", (uint32_t)0 ); fc::set_option( options, "api-limit-get-relative-account-history", (uint32_t)250 ); } if(fixture.current_test_name =="api_limit_get_account_history_by_operations") diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 356b543cc3..a7798de9b2 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -49,7 +49,7 @@ BOOST_AUTO_TEST_CASE(get_account_history) { create_account( "bob", account_id_type()(db), GRAPHENE_TEMP_ACCOUNT(db) ); generate_block(); - fc::usleep(fc::milliseconds(2000)); + fc::usleep(fc::milliseconds(100)); int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; @@ -116,7 +116,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_virtual_operation_test) { create_sell_order( bob_id, asset(100, usd_id), asset(100) ); generate_block(); - fc::usleep(fc::milliseconds(200)); + fc::usleep(fc::milliseconds(100)); auto fill_order_op_id = operation::tag::value; @@ -148,7 +148,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_notify_all_on_creation) { create_account( "bob", account_id_type()(db), GRAPHENE_TEMP_ACCOUNT(db) ); generate_block(); - fc::usleep(fc::milliseconds(2000)); + fc::usleep(fc::milliseconds(100)); int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; @@ -679,7 +679,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_operations) { create_account("alice"); generate_block(); - fc::usleep(fc::milliseconds(2000)); + fc::usleep(fc::milliseconds(100)); int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; @@ -722,7 +722,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_operations) { } generate_block(); - // history is set to limit transactions to 75 (see database_fixture.hpp) + // history is set to limit transactions to 75 (see database_fixture.cpp) // so asking for more should only return 75 (and not throw exception, // see https://github.com/bitshares/bitshares-core/issues/1490 histories = hist_api.get_account_history_operations( @@ -790,7 +790,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history_operations) { } generate_block(); - // history is set to limit transactions to 125 (see database_fixture.hpp) + // history is set to limit transactions to 125 (see database_fixture.cpp) // so asking for more should only return 125 (and not throw exception, // see https://github.com/bitshares/bitshares-core/issues/1490 GRAPHENE_CHECK_THROW( hist_api.get_account_history_operations("commitee-account", account_create_op_id, From 32d4d0a54a1e3b24e93f54308f58ef74ea978015 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 30 Sep 2022 19:56:26 +0000 Subject: [PATCH 261/338] Update description of some program options --- .../account_history/account_history_plugin.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 8779f64d63..eca42fe0d3 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -357,7 +357,8 @@ void account_history_plugin::plugin_set_program_options( "Maximum number of operations per account that will be kept in memory. " "Note that the actual number may be higher due to the min-blocks-to-keep option.") ("extended-max-ops-per-account", boost::program_options::value(), - "Maximum number of operations to keep for accounts for which extended history is kept") + "Maximum number of operations to keep for accounts for which extended history is kept. " + "This option only takes effect when track-account is not used and max-ops-per-account is not zero.") ("extended-history-by-account", boost::program_options::value>()->composing()->multitoken(), "Track longer history for these accounts (may specify multiple times)") @@ -366,12 +367,13 @@ void account_history_plugin::plugin_set_program_options( "Track longer history for accounts with this registrar (may specify multiple times)") ("min-blocks-to-keep", boost::program_options::value(), "Operations which are in the latest X blocks will be kept in memory. " - "Note that this may exceed the limit defined by the max-ops-per-account option, " - "but will be limited by the max-ops-per-acc-by-min-blocks option.") + "This option only takes effect when track-account is not used and max-ops-per-account is not zero. " + "Note that this option may cause more history records to be kept in memory than the limit defined by the " + "max-ops-per-account option, but the amount will be limited by the max-ops-per-acc-by-min-blocks option.") ("max-ops-per-acc-by-min-blocks", boost::program_options::value(), "A potential higher limit on the maximum number of operations per account to be kept in memory " - "when the min-blocks-to-keep option exceeds the limit defined by the max-ops-per-account option. " - "If this is less than max-ops-per-account, max-ops-per-account will be used.") + "when the min-blocks-to-keep option causes the amount to exceed the limit defined by the " + "max-ops-per-account option. If this is less than max-ops-per-account, max-ops-per-account will be used.") ; cfg.add(cli); } From e97455cb2d5904fb34a084ac008a72cdb935e7e0 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 30 Sep 2022 21:21:52 +0000 Subject: [PATCH 262/338] Add tests for min-blocks-to-keep option --- tests/common/database_fixture.cpp | 6 +++ tests/tests/history_api_tests.cpp | 77 +++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index e7df360c9f..98584ac53c 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -207,6 +207,12 @@ std::shared_ptr database_fixture_base::in fc::set_option( options, "p2p-endpoint", std::string( ep ) ); } + if (fixture.current_test_name == "min_blocks_to_keep_test") + { + fc::set_option( options, "max-ops-per-account", (uint64_t)2 ); + fc::set_option( options, "min-blocks-to-keep", (uint32_t)3 ); + fc::set_option( options, "max-ops-per-acc-by-min-blocks", (uint64_t)5 ); + } if (fixture.current_test_name == "get_account_history_operations") { fc::set_option( options, "max-ops-per-account", (uint64_t)75 ); diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index a7798de9b2..11c1c70445 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -605,6 +605,7 @@ BOOST_AUTO_TEST_CASE(track_account) { throw; } } + BOOST_AUTO_TEST_CASE(track_account2) { try { graphene::app::history_api hist_api(app); @@ -669,6 +670,82 @@ BOOST_AUTO_TEST_CASE(track_account2) { } } +BOOST_AUTO_TEST_CASE(min_blocks_to_keep_test) { + try { + + graphene::app::history_api hist_api(app); + + generate_block(); + generate_block(); + generate_block(); + generate_block(); + generate_block(); + + vector histories = + hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // max-ops-per-account = 2 + // min-blocks-to-keep = 3 + // max-ops-per-acc-by-min-blocks = 5 + + //account_id_type() creates some ops + create_bitasset("USA", account_id_type()); + create_bitasset("USB", account_id_type()); + create_bitasset("USC", account_id_type()); + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + + create_bitasset("USD", account_id_type()); + create_bitasset("USE", account_id_type()); + create_bitasset("USF", account_id_type()); + create_bitasset("USG", account_id_type()); + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 5u); + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + + generate_block(); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, + operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_CASE(get_account_history_operations) { try { graphene::app::history_api hist_api(app); From 927426180356c385178d4a15b6df6529db712c6c Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 1 Oct 2022 07:26:31 +0000 Subject: [PATCH 263/338] Remove redundant or commented out code --- .../graphene/account_history/account_history_plugin.hpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp index 6ceda572a7..f630a36ed2 100644 --- a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp +++ b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp @@ -24,17 +24,9 @@ #pragma once #include -#include - -#include - -#include namespace graphene { namespace account_history { using namespace chain; - //using namespace graphene::db; - //using boost::multi_index_container; - //using namespace boost::multi_index; // // Plugins should #define their SPACE_ID's so plugins with From 0ca04b9271843b9605d96a72887ec1cce4009496 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 1 Oct 2022 09:46:17 +0000 Subject: [PATCH 264/338] Delete too old history when new block arrives --- .../chain/include/graphene/chain/config.hpp | 2 +- .../account_history_plugin.cpp | 83 +++++++++++++++---- .../account_history_plugin.hpp | 38 ++++++++- 3 files changed, 103 insertions(+), 20 deletions(-) diff --git a/libraries/chain/include/graphene/chain/config.hpp b/libraries/chain/include/graphene/chain/config.hpp index 4dc548855f..5390eb2270 100644 --- a/libraries/chain/include/graphene/chain/config.hpp +++ b/libraries/chain/include/graphene/chain/config.hpp @@ -32,7 +32,7 @@ #define GRAPHENE_MAX_NESTED_OBJECTS (200) -const std::string GRAPHENE_CURRENT_DB_VERSION = "20220916"; +const std::string GRAPHENE_CURRENT_DB_VERSION = "20220930"; #define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4 #define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3 diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index eca42fe0d3..2214d81135 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -81,9 +81,11 @@ class account_history_plugin_impl /** add one history record, then check and remove the earliest history record(s) */ void add_account_history( const account_id_type& account_id, const operation_history_object& op ); - void remove_old_histories( const account_statistics_object& stats_obj, - const account_history_object& latest_aho, - const operation_history_object& latest_op ); + void remove_old_histories_by_account( const account_statistics_object& stats_obj, + uint32_t latest_block_num, + const exceeded_account_info_object* p_exa_obj = nullptr ); + + void remove_old_histories( uint32_t latest_block_num ); void init_program_options(const boost::program_options::variables_map& options); }; @@ -221,6 +223,8 @@ void account_history_plugin_impl::update_account_histories( const signed_block& if (_partial_operations && ! oho.valid()) skip_oho_id(); } + + remove_old_histories( b.block_num() ); } void account_history_plugin_impl::add_account_history( const account_id_type& account_id, @@ -240,7 +244,7 @@ void account_history_plugin_impl::add_account_history( const account_id_type& ac obj.total_ops = aho.sequence; }); // Remove the earliest account history entries if too many. - remove_old_histories( stats_obj, aho, op ); + remove_old_histories_by_account( stats_obj, op.block_num ); } uint64_t account_history_plugin_impl::get_max_ops_to_keep( const account_id_type& account_id ) @@ -265,31 +269,51 @@ uint64_t account_history_plugin_impl::get_max_ops_to_keep( const account_id_type return max_ops_to_keep; } +void account_history_plugin_impl::remove_old_histories( uint32_t latest_block_num ) +{ + if( latest_block_num <= _min_blocks_to_keep ) + return; + uint32_t oldest_block_num_to_keep = latest_block_num - _min_blocks_to_keep + 1; + + graphene::chain::database& db = database(); + const auto& exa_idx = db.get_index_type().indices().get(); + for( auto itr = exa_idx.begin(); + itr != exa_idx.end() && itr->block_num < oldest_block_num_to_keep; + itr = exa_idx.begin() ) + { + const auto& stats_obj = db.get_account_stats_by_owner( itr->account_id ); + remove_old_histories_by_account( stats_obj, latest_block_num, &(*itr) ); + } +} + // Remove the earliest account history entries if too many. -void account_history_plugin_impl::remove_old_histories( const account_statistics_object& stats_obj, - const account_history_object& latest_aho, - const operation_history_object& latest_op ) +void account_history_plugin_impl::remove_old_histories_by_account( const account_statistics_object& stats_obj, + uint32_t latest_block_num, + const exceeded_account_info_object* p_exa_obj ) { graphene::chain::database& db = database(); - const account_id_type& account_id = latest_aho.account; - auto max_ops_to_keep = get_max_ops_to_keep( account_id ); + const account_id_type& account_id = stats_obj.owner; + auto max_ops_to_keep = get_max_ops_to_keep( account_id ); // >= 1 + uint32_t oldest_block_num = latest_block_num; while( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) { // look for the earliest entry const auto& his_idx = db.get_index_type(); const auto& by_seq_idx = his_idx.indices().get(); auto itr = by_seq_idx.lower_bound( account_id ); - // make sure don't remove the one just added - if( itr == by_seq_idx.end() || itr->account != account_id || itr->id == latest_aho.id ) - return; + // make sure don't remove the latest one + // this should always be false, just check to be safe + if( itr == by_seq_idx.end() || itr->account != account_id || itr->id == stats_obj.most_recent_op ) + break; // if found, check whether to remove const auto remove_op_id = itr->operation_id; const auto& remove_op = remove_op_id(db); - if( remove_op.block_num + _min_blocks_to_keep > latest_op.block_num + oldest_block_num = remove_op.block_num; + if( remove_op.block_num + _min_blocks_to_keep > latest_block_num && stats_obj.total_ops - stats_obj.removed_ops <= _max_ops_per_acc_by_min_blocks ) - return; + break; // remove the entry, and adjust account stats object const auto itr_remove = itr; @@ -320,15 +344,36 @@ void account_history_plugin_impl::remove_old_histories( const account_statistics } } } + // deal with exceeded_account_info_object + if( !p_exa_obj ) + { + const auto& exa_idx = db.get_index_type().indices().get(); + auto itr = exa_idx.find( account_id ); + if( itr != exa_idx.end() ) + p_exa_obj = &(*itr); + } + if( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) + { + // create or update exceeded_account_info_object + if( p_exa_obj ) + db.modify( *p_exa_obj, [oldest_block_num]( exceeded_account_info_object& obj ){ + obj.block_num = oldest_block_num; + }); + else + db.create( + [&account_id, oldest_block_num]( exceeded_account_info_object& obj ){ + obj.account_id = account_id; + obj.block_num = oldest_block_num; + }); + } + // remove exceeded_account_info_object if found + else if( p_exa_obj ) + db.remove( *p_exa_obj ); } } // end namespace detail - - - - account_history_plugin::account_history_plugin(graphene::app::application& app) : plugin(app), my( std::make_unique(*this) ) @@ -385,6 +430,8 @@ void account_history_plugin::plugin_initialize(const boost::program_options::var database().applied_block.connect( [&]( const signed_block& b){ my->update_account_histories(b); } ); my->_oho_index = database().add_index< primary_index< operation_history_index > >(); database().add_index< primary_index< account_history_index > >(); + + database().add_index< primary_index< exceeded_account_info_index > >(); } void detail::account_history_plugin_impl::init_program_options(const boost::program_options::variables_map& options) diff --git a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp index f630a36ed2..dd52eafac4 100644 --- a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp +++ b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp @@ -25,6 +25,8 @@ #include +#include + namespace graphene { namespace account_history { using namespace chain; @@ -44,9 +46,40 @@ namespace graphene { namespace account_history { enum account_history_object_type { - key_account_object_type = 0 + exceeded_account_info_object_type = 0 }; +/// This struct tracks accounts that have exceeded the max-ops-per-account limit +struct exceeded_account_info_object : public abstract_object +{ + static constexpr uint8_t space_id = ACCOUNT_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = exceeded_account_info_object_type; + + /// The ID of the account + account_id_type account_id; + /// The height of the block containing the oldest (not yet removed) operation related to this account + uint32_t block_num; +}; + +struct by_account; +struct by_block_num; +using exceeded_account_multi_idx_type = multi_index_container< + exceeded_account_info_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, + member< exceeded_account_info_object, account_id_type, &exceeded_account_info_object::account_id > >, + ordered_unique< tag, + composite_key< + exceeded_account_info_object, + member< exceeded_account_info_object, uint32_t, &exceeded_account_info_object::block_num >, + member< object, object_id_type, &object::id > + > + > + > +>; + +using exceeded_account_info_index = generic_index< exceeded_account_info_object, exceeded_account_multi_idx_type >; namespace detail { @@ -73,3 +106,6 @@ class account_history_plugin : public graphene::app::plugin }; } } //graphene::account_history + +FC_REFLECT_DERIVED( graphene::account_history::exceeded_account_info_object, (graphene::db::object), + (account_id)(block_num) ) From 183fd706bcbf92dc6f10b37251f9942dd705040a Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 1 Oct 2022 21:01:01 +0000 Subject: [PATCH 265/338] Modify LOAD_VALUE_SET macro to require a semicolon --- libraries/app/include/graphene/app/plugin.hpp | 9 ++++++--- libraries/plugins/witness/witness.cpp | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/libraries/app/include/graphene/app/plugin.hpp b/libraries/app/include/graphene/app/plugin.hpp index cc61aafc27..5dd38a0b90 100644 --- a/libraries/app/include/graphene/app/plugin.hpp +++ b/libraries/app/include/graphene/app/plugin.hpp @@ -135,10 +135,13 @@ namespace impl { #define DEFAULT_VALUE_VECTOR(value) default_value({fc::json::to_string(value)}, fc::json::to_string(value)) #define LOAD_VALUE_SET(options, name, container, type) \ -if( options.count(name) > 0 ) { \ +do { \ + if( options.count(name) > 0 ) { \ const std::vector& ops = options[name].as>(); \ - std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), &graphene::app::impl::dejsonify); \ -} + std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), \ + &graphene::app::impl::dejsonify); \ + } \ +} while (false) /// @} } } //graphene::app diff --git a/libraries/plugins/witness/witness.cpp b/libraries/plugins/witness/witness.cpp index 1d9ad36c9e..82352485bc 100644 --- a/libraries/plugins/witness/witness.cpp +++ b/libraries/plugins/witness/witness.cpp @@ -120,7 +120,7 @@ void witness_plugin::plugin_initialize(const boost::program_options::variables_m { try { ilog("witness plugin: plugin_initialize() begin"); _options = &options; - LOAD_VALUE_SET(options, "witness-id", _witnesses, chain::witness_id_type) + LOAD_VALUE_SET(options, "witness-id", _witnesses, chain::witness_id_type); if( options.count("private-key") > 0 ) { From 56b1026d00314e52bb635de613b8aeed22937b65 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 1 Oct 2022 21:41:34 +0000 Subject: [PATCH 266/338] Simplify code --- .../account_history_plugin.cpp | 43 +++++++++++-------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 2214d81135..97a9d46092 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -76,22 +76,27 @@ class account_history_plugin_impl uint32_t _min_blocks_to_keep = 30000; uint64_t _max_ops_per_acc_by_min_blocks = 1000; + uint32_t _latest_block_number_to_remove = 0; + uint64_t get_max_ops_to_keep( const account_id_type& account_id ); /** add one history record, then check and remove the earliest history record(s) */ void add_account_history( const account_id_type& account_id, const operation_history_object& op ); void remove_old_histories_by_account( const account_statistics_object& stats_obj, - uint32_t latest_block_num, const exceeded_account_info_object* p_exa_obj = nullptr ); - void remove_old_histories( uint32_t latest_block_num ); + void remove_old_histories(); void init_program_options(const boost::program_options::variables_map& options); }; void account_history_plugin_impl::update_account_histories( const signed_block& b ) { + auto latest_block_number = b.block_num(); + _latest_block_number_to_remove = ( latest_block_number > _min_blocks_to_keep ) ? + ( latest_block_number - _min_blocks_to_keep ) : 0; + graphene::chain::database& db = database(); const vector >& hist = db.get_applied_operations(); bool is_first = true; @@ -224,7 +229,7 @@ void account_history_plugin_impl::update_account_histories( const signed_block& skip_oho_id(); } - remove_old_histories( b.block_num() ); + remove_old_histories(); } void account_history_plugin_impl::add_account_history( const account_id_type& account_id, @@ -244,7 +249,7 @@ void account_history_plugin_impl::add_account_history( const account_id_type& ac obj.total_ops = aho.sequence; }); // Remove the earliest account history entries if too many. - remove_old_histories_by_account( stats_obj, op.block_num ); + remove_old_histories_by_account( stats_obj ); } uint64_t account_history_plugin_impl::get_max_ops_to_keep( const account_id_type& account_id ) @@ -269,34 +274,36 @@ uint64_t account_history_plugin_impl::get_max_ops_to_keep( const account_id_type return max_ops_to_keep; } -void account_history_plugin_impl::remove_old_histories( uint32_t latest_block_num ) +void account_history_plugin_impl::remove_old_histories() { - if( latest_block_num <= _min_blocks_to_keep ) + if( 0 == _latest_block_number_to_remove ) return; - uint32_t oldest_block_num_to_keep = latest_block_num - _min_blocks_to_keep + 1; - graphene::chain::database& db = database(); + const graphene::chain::database& db = database(); const auto& exa_idx = db.get_index_type().indices().get(); - for( auto itr = exa_idx.begin(); - itr != exa_idx.end() && itr->block_num < oldest_block_num_to_keep; - itr = exa_idx.begin() ) + auto itr = exa_idx.begin(); + while( itr != exa_idx.end() && itr->block_num <= _latest_block_number_to_remove ) { const auto& stats_obj = db.get_account_stats_by_owner( itr->account_id ); - remove_old_histories_by_account( stats_obj, latest_block_num, &(*itr) ); + remove_old_histories_by_account( stats_obj, &(*itr) ); + itr = exa_idx.begin(); } } // Remove the earliest account history entries if too many. void account_history_plugin_impl::remove_old_histories_by_account( const account_statistics_object& stats_obj, - uint32_t latest_block_num, const exceeded_account_info_object* p_exa_obj ) { graphene::chain::database& db = database(); const account_id_type& account_id = stats_obj.owner; auto max_ops_to_keep = get_max_ops_to_keep( account_id ); // >= 1 + auto number_of_ops_to_remove = ( stats_obj.total_ops > max_ops_to_keep ) ? + ( stats_obj.total_ops - max_ops_to_keep ) : 0; + auto number_of_ops_to_remove_by_blks = ( stats_obj.total_ops > _max_ops_per_acc_by_min_blocks ) ? + ( stats_obj.total_ops - _max_ops_per_acc_by_min_blocks ) : 0; - uint32_t oldest_block_num = latest_block_num; - while( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) + uint32_t oldest_block_num = _latest_block_number_to_remove; + while( stats_obj.removed_ops < number_of_ops_to_remove ) { // look for the earliest entry const auto& his_idx = db.get_index_type(); @@ -311,8 +318,8 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account const auto remove_op_id = itr->operation_id; const auto& remove_op = remove_op_id(db); oldest_block_num = remove_op.block_num; - if( remove_op.block_num + _min_blocks_to_keep > latest_block_num - && stats_obj.total_ops - stats_obj.removed_ops <= _max_ops_per_acc_by_min_blocks ) + if( remove_op.block_num > _latest_block_number_to_remove + && stats_obj.removed_ops >= number_of_ops_to_remove_by_blks ) break; // remove the entry, and adjust account stats object @@ -352,7 +359,7 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account if( itr != exa_idx.end() ) p_exa_obj = &(*itr); } - if( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) + if( stats_obj.removed_ops < number_of_ops_to_remove ) { // create or update exceeded_account_info_object if( p_exa_obj ) From 4a6d9d5d43738eb35a5b11690d09f053a6e387ce Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 1 Oct 2022 21:42:17 +0000 Subject: [PATCH 267/338] Add tests for partial-operations option --- tests/common/database_fixture.cpp | 1 + tests/tests/history_api_tests.cpp | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 98584ac53c..fb0e11bde6 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -209,6 +209,7 @@ std::shared_ptr database_fixture_base::in if (fixture.current_test_name == "min_blocks_to_keep_test") { + fc::set_option( options, "partial-operations", true ); fc::set_option( options, "max-ops-per-account", (uint64_t)2 ); fc::set_option( options, "min-blocks-to-keep", (uint32_t)3 ); fc::set_option( options, "max-ops-per-acc-by-min-blocks", (uint64_t)5 ); diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 11c1c70445..2f1459575b 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -697,8 +697,9 @@ BOOST_AUTO_TEST_CASE(min_blocks_to_keep_test) { generate_block(); histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); - BOOST_CHECK_EQUAL(histories.size(), 3u); - + BOOST_REQUIRE_EQUAL(histories.size(), 3u); + operation_history_id_type oldest_op_hist_id = histories.back().id; + BOOST_CHECK( db.find(oldest_op_hist_id) ); generate_block(); histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, @@ -714,6 +715,7 @@ BOOST_AUTO_TEST_CASE(min_blocks_to_keep_test) { histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); BOOST_CHECK_EQUAL(histories.size(), 5u); + BOOST_CHECK( !db.find(oldest_op_hist_id) ); generate_block(); histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, From fcf479815766216aaa124aead0ebfd5a4d725265 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 1 Oct 2022 21:50:00 +0000 Subject: [PATCH 268/338] Add default value to description of some options --- .../plugins/account_history/account_history_plugin.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 97a9d46092..301576f2d0 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -421,11 +421,13 @@ void account_history_plugin::plugin_set_program_options( "Operations which are in the latest X blocks will be kept in memory. " "This option only takes effect when track-account is not used and max-ops-per-account is not zero. " "Note that this option may cause more history records to be kept in memory than the limit defined by the " - "max-ops-per-account option, but the amount will be limited by the max-ops-per-acc-by-min-blocks option.") + "max-ops-per-account option, but the amount will be limited by the max-ops-per-acc-by-min-blocks option. " + "(default: 30000)") ("max-ops-per-acc-by-min-blocks", boost::program_options::value(), "A potential higher limit on the maximum number of operations per account to be kept in memory " "when the min-blocks-to-keep option causes the amount to exceed the limit defined by the " - "max-ops-per-account option. If this is less than max-ops-per-account, max-ops-per-account will be used.") + "max-ops-per-account option. If this is less than max-ops-per-account, max-ops-per-account will be used. " + "(default: 1000)") ; cfg.add(cli); } From ca200dc099f3b935402c7845dd15e5d6c29816d6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Oct 2022 03:46:56 +0000 Subject: [PATCH 269/338] Shorten a struct name --- .../account_history_plugin.cpp | 22 +++++++++---------- .../account_history_plugin.hpp | 20 ++++++++--------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 301576f2d0..54c5d0cfcf 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -84,7 +84,7 @@ class account_history_plugin_impl void add_account_history( const account_id_type& account_id, const operation_history_object& op ); void remove_old_histories_by_account( const account_statistics_object& stats_obj, - const exceeded_account_info_object* p_exa_obj = nullptr ); + const exceeded_account_object* p_exa_obj = nullptr ); void remove_old_histories(); @@ -280,7 +280,7 @@ void account_history_plugin_impl::remove_old_histories() return; const graphene::chain::database& db = database(); - const auto& exa_idx = db.get_index_type().indices().get(); + const auto& exa_idx = db.get_index_type().indices().get(); auto itr = exa_idx.begin(); while( itr != exa_idx.end() && itr->block_num <= _latest_block_number_to_remove ) { @@ -292,7 +292,7 @@ void account_history_plugin_impl::remove_old_histories() // Remove the earliest account history entries if too many. void account_history_plugin_impl::remove_old_histories_by_account( const account_statistics_object& stats_obj, - const exceeded_account_info_object* p_exa_obj ) + const exceeded_account_object* p_exa_obj ) { graphene::chain::database& db = database(); const account_id_type& account_id = stats_obj.owner; @@ -351,29 +351,29 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account } } } - // deal with exceeded_account_info_object + // deal with exceeded_account_object if( !p_exa_obj ) { - const auto& exa_idx = db.get_index_type().indices().get(); + const auto& exa_idx = db.get_index_type().indices().get(); auto itr = exa_idx.find( account_id ); if( itr != exa_idx.end() ) p_exa_obj = &(*itr); } if( stats_obj.removed_ops < number_of_ops_to_remove ) { - // create or update exceeded_account_info_object + // create or update exceeded_account_object if( p_exa_obj ) - db.modify( *p_exa_obj, [oldest_block_num]( exceeded_account_info_object& obj ){ + db.modify( *p_exa_obj, [oldest_block_num]( exceeded_account_object& obj ){ obj.block_num = oldest_block_num; }); else - db.create( - [&account_id, oldest_block_num]( exceeded_account_info_object& obj ){ + db.create( + [&account_id, oldest_block_num]( exceeded_account_object& obj ){ obj.account_id = account_id; obj.block_num = oldest_block_num; }); } - // remove exceeded_account_info_object if found + // remove exceeded_account_object if found else if( p_exa_obj ) db.remove( *p_exa_obj ); } @@ -440,7 +440,7 @@ void account_history_plugin::plugin_initialize(const boost::program_options::var my->_oho_index = database().add_index< primary_index< operation_history_index > >(); database().add_index< primary_index< account_history_index > >(); - database().add_index< primary_index< exceeded_account_info_index > >(); + database().add_index< primary_index< exceeded_account_index > >(); } void detail::account_history_plugin_impl::init_program_options(const boost::program_options::variables_map& options) diff --git a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp index dd52eafac4..706f9abde0 100644 --- a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp +++ b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp @@ -46,14 +46,14 @@ namespace graphene { namespace account_history { enum account_history_object_type { - exceeded_account_info_object_type = 0 + exceeded_account_object_type = 0 }; /// This struct tracks accounts that have exceeded the max-ops-per-account limit -struct exceeded_account_info_object : public abstract_object +struct exceeded_account_object : public abstract_object { static constexpr uint8_t space_id = ACCOUNT_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = exceeded_account_info_object_type; + static constexpr uint8_t type_id = exceeded_account_object_type; /// The ID of the account account_id_type account_id; @@ -63,23 +63,23 @@ struct exceeded_account_info_object : public abstract_object, member< object, object_id_type, &object::id > >, ordered_unique< tag, - member< exceeded_account_info_object, account_id_type, &exceeded_account_info_object::account_id > >, + member< exceeded_account_object, account_id_type, &exceeded_account_object::account_id > >, ordered_unique< tag, composite_key< - exceeded_account_info_object, - member< exceeded_account_info_object, uint32_t, &exceeded_account_info_object::block_num >, + exceeded_account_object, + member< exceeded_account_object, uint32_t, &exceeded_account_object::block_num >, member< object, object_id_type, &object::id > > > > >; -using exceeded_account_info_index = generic_index< exceeded_account_info_object, exceeded_account_multi_idx_type >; +using exceeded_account_index = generic_index< exceeded_account_object, exceeded_account_multi_index_type >; namespace detail { @@ -107,5 +107,5 @@ class account_history_plugin : public graphene::app::plugin } } //graphene::account_history -FC_REFLECT_DERIVED( graphene::account_history::exceeded_account_info_object, (graphene::db::object), +FC_REFLECT_DERIVED( graphene::account_history::exceeded_account_object, (graphene::db::object), (account_id)(block_num) ) From 1db2e1a1a35d9028b0949387a6cad130a4d1e0d7 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Oct 2022 04:49:47 +0000 Subject: [PATCH 270/338] Refactor code for better performance --- .../account_history_plugin.cpp | 71 +++++++++++-------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 54c5d0cfcf..450d540a38 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -91,11 +91,15 @@ class account_history_plugin_impl void init_program_options(const boost::program_options::variables_map& options); }; +template< typename T > +static T get_biggest_number_to_remove( T biggest_number, T amount_to_keep ) +{ + return ( biggest_number > amount_to_keep ) ? ( biggest_number - amount_to_keep ) : 0; +} + void account_history_plugin_impl::update_account_histories( const signed_block& b ) { - auto latest_block_number = b.block_num(); - _latest_block_number_to_remove = ( latest_block_number > _min_blocks_to_keep ) ? - ( latest_block_number - _min_blocks_to_keep ) : 0; + _latest_block_number_to_remove = get_biggest_number_to_remove( b.block_num(), _min_blocks_to_keep ); graphene::chain::database& db = database(); const vector >& hist = db.get_applied_operations(); @@ -297,47 +301,38 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account graphene::chain::database& db = database(); const account_id_type& account_id = stats_obj.owner; auto max_ops_to_keep = get_max_ops_to_keep( account_id ); // >= 1 - auto number_of_ops_to_remove = ( stats_obj.total_ops > max_ops_to_keep ) ? - ( stats_obj.total_ops - max_ops_to_keep ) : 0; - auto number_of_ops_to_remove_by_blks = ( stats_obj.total_ops > _max_ops_per_acc_by_min_blocks ) ? - ( stats_obj.total_ops - _max_ops_per_acc_by_min_blocks ) : 0; + auto number_of_ops_to_remove = get_biggest_number_to_remove( stats_obj.total_ops, max_ops_to_keep ); + auto number_of_ops_to_remove_by_blks = get_biggest_number_to_remove( stats_obj.total_ops, + _max_ops_per_acc_by_min_blocks ); + + const auto& his_idx = db.get_index_type(); + const auto& by_seq_idx = his_idx.indices().get(); + + auto removed_ops = stats_obj.removed_ops; + // look for the earliest entry if needed + auto itr = ( removed_ops < number_of_ops_to_remove ) ? by_seq_idx.lower_bound( account_id ) + : by_seq_idx.begin(); uint32_t oldest_block_num = _latest_block_number_to_remove; - while( stats_obj.removed_ops < number_of_ops_to_remove ) + while( removed_ops < number_of_ops_to_remove ) { - // look for the earliest entry - const auto& his_idx = db.get_index_type(); - const auto& by_seq_idx = his_idx.indices().get(); - auto itr = by_seq_idx.lower_bound( account_id ); // make sure don't remove the latest one // this should always be false, just check to be safe if( itr == by_seq_idx.end() || itr->account != account_id || itr->id == stats_obj.most_recent_op ) break; // if found, check whether to remove - const auto remove_op_id = itr->operation_id; + const auto& aho_to_remove = *itr; + const auto remove_op_id = aho_to_remove.operation_id; const auto& remove_op = remove_op_id(db); oldest_block_num = remove_op.block_num; - if( remove_op.block_num > _latest_block_number_to_remove - && stats_obj.removed_ops >= number_of_ops_to_remove_by_blks ) + if( remove_op.block_num > _latest_block_number_to_remove && removed_ops >= number_of_ops_to_remove_by_blks ) break; - // remove the entry, and adjust account stats object - const auto itr_remove = itr; + // remove the entry ++itr; - db.remove( *itr_remove ); - db.modify( stats_obj, [&]( account_statistics_object& obj ){ - obj.removed_ops = obj.removed_ops + 1; - }); - // modify previous node's next pointer - // this should be always true, but just have a check here - if( itr != by_seq_idx.end() && itr->account == account_id ) - { - db.modify( *itr, [&]( account_history_object& obj ){ - obj.next = account_history_id_type(); - }); - } - // else need to modify the head pointer, but it shouldn't be true + db.remove( aho_to_remove ); + ++removed_ops; // remove the operation history entry (1.11.x) if configured and no reference left if( _partial_operations ) @@ -351,6 +346,22 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account } } } + // adjust account stats object and the oldest entry + if( removed_ops != stats_obj.removed_ops ) + { + db.modify( stats_obj, [removed_ops]( account_statistics_object& obj ){ + obj.removed_ops = removed_ops; + }); + // modify previous node's next pointer + // this should be always true, but just have a check here + if( itr != by_seq_idx.end() && itr->account == account_id ) + { + db.modify( *itr, []( account_history_object& obj ){ + obj.next = account_history_id_type(); + }); + } + // else need to modify the head pointer, but it shouldn't be true + } // deal with exceeded_account_object if( !p_exa_obj ) { From 74018b5030a443afad793ecb02335b549c0bb838 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Oct 2022 05:32:11 +0000 Subject: [PATCH 271/338] Shorten a variable name --- .../graphene/account_history/account_history_plugin.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp index 706f9abde0..d0cd598281 100644 --- a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp +++ b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp @@ -63,7 +63,7 @@ struct exceeded_account_object : public abstract_object struct by_account; struct by_block_num; -using exceeded_account_multi_index_type = multi_index_container< +using exceeded_account_multi_idx_type = multi_index_container< exceeded_account_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -79,7 +79,7 @@ using exceeded_account_multi_index_type = multi_index_container< > >; -using exceeded_account_index = generic_index< exceeded_account_object, exceeded_account_multi_index_type >; +using exceeded_account_index = generic_index< exceeded_account_object, exceeded_account_multi_idx_type >; namespace detail { From ab9f5ff1a8ced9ad66cb21508a167b0b89f433da Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Oct 2022 05:33:05 +0000 Subject: [PATCH 272/338] Update variable names to avoid variable shadowing --- .../account_history_plugin.cpp | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 450d540a38..7383a95435 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -310,19 +310,19 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account auto removed_ops = stats_obj.removed_ops; // look for the earliest entry if needed - auto itr = ( removed_ops < number_of_ops_to_remove ) ? by_seq_idx.lower_bound( account_id ) - : by_seq_idx.begin(); + auto aho_itr = ( removed_ops < number_of_ops_to_remove ) ? by_seq_idx.lower_bound( account_id ) + : by_seq_idx.begin(); uint32_t oldest_block_num = _latest_block_number_to_remove; while( removed_ops < number_of_ops_to_remove ) { // make sure don't remove the latest one // this should always be false, just check to be safe - if( itr == by_seq_idx.end() || itr->account != account_id || itr->id == stats_obj.most_recent_op ) + if( aho_itr == by_seq_idx.end() || aho_itr->account != account_id || aho_itr->id == stats_obj.most_recent_op ) break; // if found, check whether to remove - const auto& aho_to_remove = *itr; + const auto& aho_to_remove = *aho_itr; const auto remove_op_id = aho_to_remove.operation_id; const auto& remove_op = remove_op_id(db); oldest_block_num = remove_op.block_num; @@ -330,7 +330,7 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account break; // remove the entry - ++itr; + ++aho_itr; db.remove( aho_to_remove ); ++removed_ops; @@ -354,9 +354,9 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account }); // modify previous node's next pointer // this should be always true, but just have a check here - if( itr != by_seq_idx.end() && itr->account == account_id ) + if( aho_itr != by_seq_idx.end() && aho_itr->account == account_id ) { - db.modify( *itr, []( account_history_object& obj ){ + db.modify( *aho_itr, []( account_history_object& obj ){ obj.next = account_history_id_type(); }); } @@ -366,9 +366,9 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account if( !p_exa_obj ) { const auto& exa_idx = db.get_index_type().indices().get(); - auto itr = exa_idx.find( account_id ); - if( itr != exa_idx.end() ) - p_exa_obj = &(*itr); + auto exa_itr = exa_idx.find( account_id ); + if( exa_itr != exa_idx.end() ) + p_exa_obj = &(*exa_itr); } if( stats_obj.removed_ops < number_of_ops_to_remove ) { From e8c80c45c76d38af2a8f7c4cd409e96fbfce660a Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 2 Oct 2022 05:54:55 +0000 Subject: [PATCH 273/338] Extract a code block into a function --- .../account_history_plugin.cpp | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 7383a95435..c7855fe866 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -88,6 +88,10 @@ class account_history_plugin_impl void remove_old_histories(); + /// When the partial_operations option is set, + /// if the specified operation history object is no longer referenced, remove it from database + void check_and_remove_op_history_obj( const operation_history_object& op ); + void init_program_options(const boost::program_options::variables_map& options); }; @@ -294,6 +298,22 @@ void account_history_plugin_impl::remove_old_histories() } } +void account_history_plugin_impl::check_and_remove_op_history_obj( const operation_history_object& op ) +{ + if( _partial_operations ) + { + // check for references + graphene::chain::database& db = database(); + const auto& his_idx = db.get_index_type(); + const auto& by_opid_idx = his_idx.indices().get(); + if( by_opid_idx.find( op.id ) == by_opid_idx.end() ) + { + // if no reference, remove + db.remove( op ); + } + } +} + // Remove the earliest account history entries if too many. void account_history_plugin_impl::remove_old_histories_by_account( const account_statistics_object& stats_obj, const exceeded_account_object* p_exa_obj ) @@ -323,8 +343,7 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account // if found, check whether to remove const auto& aho_to_remove = *aho_itr; - const auto remove_op_id = aho_to_remove.operation_id; - const auto& remove_op = remove_op_id(db); + const auto& remove_op = aho_to_remove.operation_id(db); oldest_block_num = remove_op.block_num; if( remove_op.block_num > _latest_block_number_to_remove && removed_ops >= number_of_ops_to_remove_by_blks ) break; @@ -335,16 +354,7 @@ void account_history_plugin_impl::remove_old_histories_by_account( const account ++removed_ops; // remove the operation history entry (1.11.x) if configured and no reference left - if( _partial_operations ) - { - // check for references - const auto& by_opid_idx = his_idx.indices().get(); - if( by_opid_idx.find( remove_op_id ) == by_opid_idx.end() ) - { - // if no reference, remove - db.remove( remove_op ); - } - } + check_and_remove_op_history_obj( remove_op ); } // adjust account stats object and the oldest entry if( removed_ops != stats_obj.removed_ops ) From 55e4cc91f968dd834e43ffe420c4bb89c3c6c984 Mon Sep 17 00:00:00 2001 From: ioBanker <37595908+ioBanker@users.noreply.github.com> Date: Mon, 3 Oct 2022 23:59:59 +0300 Subject: [PATCH 274/338] Update README.md --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 49e3f63951..ea91ccca61 100644 --- a/README.md +++ b/README.md @@ -35,11 +35,11 @@ Build instructions and additional documentation are available in the Prebuilt binaries can be found in the [releases page](https://github.com/bitshares/bitshares-core/releases) for download. -### Build +### Installing Node and Command-Line Wallet Software We recommend building on Ubuntu 20.04 LTS (64-bit) -**Install OS Dependencies:** +**Install Operating System Dependencies:** sudo apt-get update sudo apt-get install autoconf cmake make automake libtool git libboost-all-dev libssl-dev g++ libcurl4-openssl-dev doxygen @@ -92,7 +92,7 @@ manually build your preferred version and use it with BitShares by specifying it Example: `cmake -DOPENSSL_ROOT_DIR=/path/to/openssl ..` -### Run +### Running and Stopping Node Software **Run Node Software:** @@ -132,9 +132,9 @@ You can run the program with `--help` parameter to see more info: ./programs/witness_node/witness_node --help -### Command-line Wallet Software +### Using Command-Line Wallet -Stay on `bitshares-core` directory before you run the below `cli_wallet` command +Stay on `bitshares-core/build` directory before you run the below `cli_wallet` command ./programs/cli_wallet/cli_wallet @@ -331,10 +331,10 @@ FAQ Yes. Documentation of the code base, including APIs, can be generated using Doxygen. Simply run `doxygen` in this directory. - If both Doxygen and perl are available in your build environment, the CLI wallet's `help` and `gethelp` + If both Doxygen and perl are available in your build environment, the command-line wallet's `help` and `gethelp` commands will display help generated from the doxygen documentation. - If your CLI wallet's `help` command displays descriptions without parameter names like + If your command-line wallet's `help` command displays descriptions without parameter names like `signed_transaction transfer(string, string, string, string, string, bool)` it means CMake was unable to find Doxygen or perl during configuration. If found, the output should look like this: @@ -342,7 +342,7 @@ FAQ - Is there a way to allow external program to drive `cli_wallet` via websocket, JSONRPC, or HTTP? - Yes. External programs may connect to the CLI wallet and make its calls over a websockets API. To do this, run the wallet in + Yes. External programs may connect to the command-line wallet and make its calls over a websockets API. To do this, run the wallet in server mode, i.e. `cli_wallet -s "127.0.0.1:9999"` and then have the external program connect to it over the specified port (in this example, port 9999). Please check the ["Using the API"](#using-the-api) section for more info. From 9f823e0853ce212112ada803ca84c181cf29f4a8 Mon Sep 17 00:00:00 2001 From: ioBanker <37595908+ioBanker@users.noreply.github.com> Date: Tue, 4 Oct 2022 00:01:51 +0300 Subject: [PATCH 275/338] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ea91ccca61..74511091e2 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ The blockchain data will be stored under the directory `witness_node_data_dir`. **Stop Node Software:** For stopping the node run cleanly; you will need to access the node run terminal then press on `Ctrl+C` then wait for the run to stop, please note that it may take (usually few minutes) to exit the run. -It's recommended to use linux command `screen`(https://help.ubuntu.com/community/Screen) to inisiate the node run so you can go back to the node run screen to stop it. +It's recommended to use linux command [screen](https://help.ubuntu.com/community/Screen) to inisiate the node run so you can go back to the node run screen to stop it. **IMPORTANT:** By default the node will start in reduced memory mode by using some of the commands detailed in [Memory reduction for nodes](https://github.com/bitshares/bitshares-core/wiki/Memory-reduction-for-nodes). From d580e29cf6111f404a0f3ec5fd65339d217e0036 Mon Sep 17 00:00:00 2001 From: ioBanker <37595908+ioBanker@users.noreply.github.com> Date: Tue, 4 Oct 2022 00:06:00 +0300 Subject: [PATCH 276/338] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 74511091e2..9d30fff277 100644 --- a/README.md +++ b/README.md @@ -101,17 +101,17 @@ Stay on `bitshares-core/build` directory before you run the below `witness_node` ./programs/witness_node/witness_node Under `build` directory the node run will automatically create the directory `witness_node_data_dir` along with config files underneath then start synchronizing the blockchain. -It may take (usually several hours) to fully synchronize the blockchain data. +It may take usually several hours to fully synchronize the blockchain data. The blockchain data will be stored under the directory `witness_node_data_dir`. **Stop Node Software:** -For stopping the node run cleanly; you will need to access the node run terminal then press on `Ctrl+C` then wait for the run to stop, please note that it may take (usually few minutes) to exit the run. +For stopping the node run cleanly; you will need to access the node run terminal then press on `Ctrl+C` then wait for the run to stop, please note that it may take usually few minutes to exit the run. It's recommended to use linux command [screen](https://help.ubuntu.com/community/Screen) to inisiate the node run so you can go back to the node run screen to stop it. **IMPORTANT:** By default the node will start in reduced memory mode by using some of the commands detailed in [Memory reduction for nodes](https://github.com/bitshares/bitshares-core/wiki/Memory-reduction-for-nodes). -In order to run a full node with all the account histories (which is usually not necessary) you need to remove `partial-operations` and `max-ops-per-account` from your config file. Please note that currently(2018-10-17) a full node will need more than 160GB of RAM to operate and required memory is growing fast. Consider the following table as **minimal requirements** before running a node: +In order to run a full node with all the account histories which usually unnecessary, you need to remove `partial-operations` and `max-ops-per-account` from your config file. Please note that currently(2018-10-17) a full node will need more than 160GB of RAM to operate and required memory is growing fast. Consider the following table as **minimal requirements** before running a node: | Default | Full | Minimal | ElasticSearch | --- | --- | --- | --- From 5b84e952434ff6f335041733d7f5f58baf2562af Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 3 Oct 2022 22:37:53 +0000 Subject: [PATCH 277/338] Add history_api::get_account_history_by_time API and refactor history_api::get_account_history --- libraries/app/api.cpp | 81 ++++++++++++++----- libraries/app/include/graphene/app/api.hpp | 47 ++++++++--- .../chain/operation_history_object.hpp | 15 ++++ .../include/graphene/protocol/object_id.hpp | 5 ++ tests/tests/history_api_tests.cpp | 1 + 5 files changed, 119 insertions(+), 30 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 868486fdd1..b01438cde3 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -408,7 +408,7 @@ namespace graphene { namespace app { uint32_t limit, operation_history_id_type start ) const { - FC_ASSERT( _app.chain_database() ); + FC_ASSERT( _app.chain_database(), "database unavailable" ); const auto& db = *_app.chain_database(); const auto configured_limit = _app.get_options().api_limit_get_account_history; @@ -417,13 +417,16 @@ namespace graphene { namespace app { ("configured_limit", configured_limit) ); vector result; + if( start == operation_history_id_type() ) + // Note: this means we can hardly use ID 0 as start to query for exactly the object with ID 0 + start = operation_history_id_type::max(); + if( start < stop ) + return result; + account_id_type account; try { database_api_helper db_api_helper( _app ); account = db_api_helper.get_account_from_string(account_id_or_name)->id; - const account_history_object& node = account(db).statistics(db).most_recent_op(db); - if(start == operation_history_id_type() || start.instance.value > node.operation_id.instance.value) - start = node.operation_id; } catch(...) { return result; } if(_app.is_plugin_enabled("elasticsearch")) { @@ -438,20 +441,62 @@ namespace graphene { namespace app { } } - const auto& hist_idx = db.get_index_type(); - const auto& by_op_idx = hist_idx.indices().get(); - auto index_start = by_op_idx.begin(); - auto itr = by_op_idx.lower_bound(boost::make_tuple(account, start)); + const auto& by_op_idx = db.get_index_type().indices().get(); + auto itr = by_op_idx.lower_bound( boost::make_tuple( account, start ) ); + auto itr_end = by_op_idx.lower_bound( boost::make_tuple( account, stop ) ); - while(itr != index_start && itr->account == account && itr->operation_id.instance.value > stop.instance.value - && result.size() < limit) + while( itr != itr_end && result.size() < limit ) + { + result.emplace_back( itr->operation_id(db) ); + ++itr; + } + // Deal with a special case : include the object with ID 0 when it fits + if( 0 == stop.instance.value && result.size() < limit && itr != by_op_idx.end() ) { - if(itr->operation_id.instance.value <= start.instance.value) - result.push_back(itr->operation_id(db)); - --itr; + const auto& obj = *itr; + if( obj.account == account ) + result.emplace_back( obj.operation_id(db) ); } - if(stop.instance.value == 0 && result.size() < limit && itr->account == account) { - result.push_back(itr->operation_id(db)); + + return result; + } + + vector history_api::get_account_history_by_time( + const std::string& account_name_or_id, + const optional& olimit, + const optional& ostart ) const + { + FC_ASSERT( _app.chain_database(), "database unavailable" ); + const auto& db = *_app.chain_database(); + + const auto configured_limit = _app.get_options().api_limit_get_account_history; + uint32_t limit = olimit.valid() ? *olimit : configured_limit; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + vector result; + account_id_type account; + try { + database_api_helper db_api_helper( _app ); + account = db_api_helper.get_account_from_string(account_name_or_id)->id; + } catch(...) { return result; } + + fc::time_point_sec start = ostart.valid() ? *ostart : fc::time_point_sec::maximum(); + + const auto& op_hist_idx = db.get_index_type().indices().get(); + auto op_hist_itr = op_hist_idx.lower_bound( start ); + if( op_hist_itr == op_hist_idx.end() ) + return result; + + const auto& acc_hist_idx = db.get_index_type().indices().get(); + auto itr = acc_hist_idx.lower_bound( boost::make_tuple( account, op_hist_itr->id ) ); + auto itr_end = acc_hist_idx.upper_bound( account ); + + while( itr != itr_end && result.size() < limit ) + { + result.emplace_back( itr->operation_id(db) ); + ++itr; } return result; @@ -464,7 +509,7 @@ namespace graphene { namespace app { operation_history_id_type stop, uint32_t limit ) const { - FC_ASSERT( _app.chain_database() ); + FC_ASSERT( _app.chain_database(), "database unavailable" ); const auto& db = *_app.chain_database(); const auto configured_limit = _app.get_options().api_limit_get_account_history_operations; @@ -509,7 +554,7 @@ namespace graphene { namespace app { uint32_t limit, uint64_t start ) const { - FC_ASSERT( _app.chain_database() ); + FC_ASSERT( _app.chain_database(), "database unavailable" ); const auto& db = *_app.chain_database(); const auto configured_limit = _app.get_options().api_limit_get_relative_account_history; @@ -551,7 +596,7 @@ namespace graphene { namespace app { uint32_t block_num, const optional& trx_in_block ) const { - FC_ASSERT(_app.chain_database()); + FC_ASSERT( _app.chain_database(), "database unavailable" ); const auto& db = *_app.chain_database(); const auto& idx = db.get_index_type().indices().get(); auto range = trx_in_block.valid() ? idx.equal_range( boost::make_tuple( block_num, *trx_in_block ) ) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index e1049c8289..a4054b897f 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -78,13 +78,13 @@ namespace graphene { namespace app { }; /** - * @brief Get operations relevant to the specificed account + * @brief Get the history of operations related to the specified account * @param account_name_or_id The account name or ID whose history should be queried * @param stop ID of the earliest operation to retrieve * @param limit Maximum number of operations to retrieve, must not exceed the configured value of * @a api_limit_get_account_history * @param start ID of the most recent operation to retrieve - * @return A list of operations performed by account, ordered from most recent to oldest. + * @return A list of operations related to the specified account, ordered from most recent to oldest. */ vector get_account_history( const std::string& account_name_or_id, @@ -94,11 +94,33 @@ namespace graphene { namespace app { )const; /** - * @brief Get operations relevant to the specified account filtering by operation type + * @brief Get the history of operations related to the specified account no later than the specified time + * @param account_name_or_id The account name or ID whose history should be queried + * @param limit Maximum number of operations to retrieve, must not exceed the configured value of + * @a api_limit_get_account_history + * @param start the time point to start looping back through history + * @return A list of operations related to the specified account, ordered from most recent to oldest. + * + * @note + * 1. If @p account_name_or_id cannot be tied to an account, an empty list will be returned + * 2. @p limit can be omitted or be @a null, if so the configured value of + * @a api_limit_get_account_history will be used + * 3. @p start can be omitted or be @a null, if so the api will return the "first page" of the history + * 4. One or more optional parameters can be omitted from the end of the parameter list, and the optional + * parameters in the middle cannot be omitted (but can be @a null). + */ + vector get_account_history_by_time( + const std::string& account_name_or_id, + const optional& limit = optional(), + const optional& start = optional() + )const; + + /** + * @brief Get the history of operations related to the specified account filtering by operation types * @param account_name_or_id The account name or ID whose history should be queried * @param operation_types The IDs of the operation we want to get operations in the account - * ( 0 = transfer , 1 = limit order create, ...) - * @param start the sequence number where to start looping back throw the history + * ( 0 = transfer , 1 = limit order create, ...) + * @param start the sequence number where to start looping back through the history * @param limit the max number of entries to return (from start number), must not exceed the configured * value of @a api_limit_get_account_history_by_operations * @return history_operation_detail @@ -111,15 +133,15 @@ namespace graphene { namespace app { )const; /** - * @brief Get only asked operations relevant to the specified account + * @brief Get the history of operations related to the specified account filtering by operation type * @param account_name_or_id The account name or ID whose history should be queried * @param operation_type The type of the operation we want to get operations in the account - * ( 0 = transfer , 1 = limit order create, ...) + * ( 0 = transfer , 1 = limit order create, ...) * @param stop ID of the earliest operation to retrieve * @param limit Maximum number of operations to retrieve, must not exceed the configured value of * @a api_limit_get_account_history_operations * @param start ID of the most recent operation to retrieve - * @return A list of operations performed by account, ordered from most recent to oldest. + * @return A list of operations related to the specified account, ordered from most recent to oldest. */ vector get_account_history_operations( const std::string& account_name_or_id, @@ -130,9 +152,9 @@ namespace graphene { namespace app { )const; /** - * @brief Get operations relevant to the specified account referenced - * by an event numbering specific to the account. The current number of operations - * for the account can be found in the account statistics (or use 0 for start). + * @brief Get the history of operations related to the specified account referenced + * by an event numbering specific to the account. The current number of operations + * for the account can be found in the account statistics (or use 0 for start). * @param account_name_or_id The account name or ID whose history should be queried * @param stop Sequence number of earliest operation. 0 is default and will * query 'limit' number of operations. @@ -140,7 +162,7 @@ namespace graphene { namespace app { * @a api_limit_get_relative_account_history * @param start Sequence number of the most recent operation to retrieve. * 0 is default, which will start querying from the most recent operation. - * @return A list of operations performed by account, ordered from most recent to oldest. + * @return A list of operations related to the specified account, ordered from most recent to oldest. */ vector get_relative_account_history( const std::string& account_name_or_id, @@ -787,6 +809,7 @@ FC_REFLECT( graphene::app::asset_api::asset_holders, (asset_id)(count) ) FC_API(graphene::app::history_api, (get_account_history) + (get_account_history_by_time) (get_account_history_by_operations) (get_account_history_operations) (get_relative_account_history) diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index 380a0292d0..df959152dc 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -106,6 +106,7 @@ namespace graphene { namespace chain { }; struct by_block; + struct by_time; using operation_history_mlti_idx_type = multi_index_container< operation_history_object, @@ -118,6 +119,16 @@ namespace graphene { namespace chain { member< operation_history_object, uint16_t, &operation_history_object::op_in_trx>, member< operation_history_object, uint32_t, &operation_history_object::virtual_op> > + >, + ordered_unique< tag, + composite_key< operation_history_object, + member< operation_history_object, time_point_sec, &operation_history_object::block_time>, + member< object, object_id_type, &object::id > + >, + composite_key_compare< + std::greater< time_point_sec >, + std::greater< object_id_type > + > > > >; @@ -142,6 +153,10 @@ namespace graphene { namespace chain { composite_key< account_history_object, member< account_history_object, account_id_type, &account_history_object::account>, member< account_history_object, operation_history_id_type, &account_history_object::operation_id> + >, + composite_key_compare< + std::less< account_id_type >, + std::greater< operation_history_id_type > > >, ordered_non_unique< tag, diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index e414f6ddae..ba04e079d3 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -108,6 +108,11 @@ namespace graphene { namespace db { static constexpr uint16_t space_type = uint16_t(uint16_t(space_id) << 8) | uint16_t(type_id); + static constexpr object_id max() + { + return object_id( 0xffffffffffff ); + } + object_id() = default; object_id( unsigned_int i ):instance(i){} explicit object_id( uint64_t i ):instance(i) diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 2f1459575b..397337b21a 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -222,6 +222,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_additional) { create_bitasset("OIL", dan.id); // create op 6 generate_block(); + fc::usleep(fc::milliseconds(100)); // f(A, 0, 4, 9) = { 5, 3, 1, 0 } histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(9)); From a7c14865f42dd52586c3870b52a583b0435d4159 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 4 Oct 2022 10:56:56 +0000 Subject: [PATCH 278/338] Add tests for get_account_history_by_time API --- tests/tests/history_api_tests.cpp | 135 ++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 397337b21a..815f8a616d 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -526,6 +526,141 @@ BOOST_AUTO_TEST_CASE(get_account_history_additional) { } } +BOOST_AUTO_TEST_CASE(get_account_history_by_time) { + try { + graphene::app::history_api hist_api(app); + + auto time1 = db.head_block_time(); + + // A = account_id_type() with records { 5, 3, 1, 0 }, and + // B = dan with records { 6, 4, 2, 1 } + // account_id_type() and dan share operation id 1(account create) - share can be also in id 0 + + // no history at all in the chain + vector histories = hist_api.get_account_history_by_time("1.2.0"); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + create_bitasset("USD", account_id_type()); // create op 0 + generate_block(); + fc::usleep(fc::milliseconds(100)); + + auto time2 = db.head_block_time(); + + // what if the account only has one history entry and it is 0? + histories = hist_api.get_account_history_by_time("1.2.0"); + BOOST_REQUIRE_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + + histories = hist_api.get_account_history_by_time("1.2.0", 0); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + histories = hist_api.get_account_history_by_time("1.2.0", 2, time1); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + histories = hist_api.get_account_history_by_time("1.2.0", 10, time2); + BOOST_REQUIRE_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + + histories = hist_api.get_account_history_by_time("1.2.0", {}, time2); + BOOST_REQUIRE_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + + BOOST_CHECK_THROW( hist_api.get_account_history_by_time( "1.2.0", 102 ), fc::exception ); + + const account_object& dan = create_account("dan"); // create op 1 + + create_bitasset("CNY", dan.id); // create op 2 + create_bitasset("BTC", account_id_type()); // create op 3 + create_bitasset("XMR", dan.id); // create op 4 + create_bitasset("EUR", account_id_type()); // create op 5 + create_bitasset("OIL", dan.id); // create op 6 + + generate_block(); + fc::usleep(fc::milliseconds(100)); + + auto time3 = db.head_block_time(); + + histories = hist_api.get_account_history_by_time("1.2.0", {}, time2); + BOOST_REQUIRE_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + + histories = hist_api.get_account_history_by_time("1.2.0", {}, time2 + fc::seconds(1)); + BOOST_REQUIRE_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + + histories = hist_api.get_account_history_by_time("1.2.0", {}, time3); + BOOST_REQUIRE_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + histories = hist_api.get_account_history_by_time("1.2.0", 2, time3); + BOOST_REQUIRE_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + histories = hist_api.get_account_history_by_time("1.2.0", 2); + BOOST_REQUIRE_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + histories = hist_api.get_account_history_by_time("1.2.0"); + BOOST_REQUIRE_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + histories = hist_api.get_account_history_by_time("dan", {}, time3); + BOOST_REQUIRE_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + histories = hist_api.get_account_history_by_time("dan", 5, time3); + BOOST_REQUIRE_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + histories = hist_api.get_account_history_by_time("dan", 5); + BOOST_REQUIRE_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + histories = hist_api.get_account_history_by_time("dan"); + BOOST_REQUIRE_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + histories = hist_api.get_account_history_by_time("dan", 2, time3); + BOOST_REQUIRE_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + + histories = hist_api.get_account_history_by_time("dan", 2, time2); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + histories = hist_api.get_account_history_by_time("dan", {}, time1); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + histories = hist_api.get_account_history_by_time("nathan", 2); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_CASE(track_account) { try { graphene::app::history_api hist_api(app); From d49745f3610780461df109e4bc820b21022296d2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 4 Oct 2022 12:35:46 +0000 Subject: [PATCH 279/338] Add history_api::get_block_operations_by_time API --- libraries/app/api.cpp | 19 ++++++++++++++++ libraries/app/include/graphene/app/api.hpp | 26 +++++++++++++++++----- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index b01438cde3..3cec7094e4 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -606,6 +606,25 @@ namespace graphene { namespace app { return result; } + vector history_api::get_block_operations_by_time( + const optional& start ) const + { + FC_ASSERT( _app.chain_database(), "database unavailable" ); + const auto& db = *_app.chain_database(); + const auto& idx = db.get_index_type().indices().get(); + auto itr = start.valid() ? idx.lower_bound( *start ) : idx.begin(); + + vector result; + if( itr == idx.end() ) + return result; + + auto itr_end = idx.upper_bound( itr->block_time ); + + std::copy( itr, itr_end, std::back_inserter( result ) ); + + return result; + } + flat_set history_api::get_market_history_buckets()const { auto market_hist_plugin = _app.get_plugin( "market_history" ); diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index a4054b897f..e89129bdf3 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -171,22 +171,37 @@ namespace graphene { namespace app { uint64_t start = 0) const; /** - * @brief Get all operations inside a block or a transaction, including virtual operations + * @brief Get all operations within a block or a transaction, including virtual operations * @param block_num the number (height) of the block to fetch * @param trx_in_block the sequence of a transaction in the block, starts from @a 0, optional. * If specified, will return only operations of that transaction. * If omitted, will return all operations in the specified block. * @return a list of @a operation_history objects ordered by ID * - * @note the data is fetched from @a account_history plugin, thus the result is possible to - * be incomplete due to the @a partial-operations option configured in the API node. - * For complete data, it is recommended to query from ElasticSearch where data is - * maintained by @a elastic_search plugin. + * @note the data is fetched from the @a account_history plugin, so results may be + * incomplete due to the @a partial-operations option configured in the API node. + * To get complete data, it is recommended to query from ElasticSearch where the data is + * maintained by the @a elastic_search plugin. */ vector get_block_operation_history( uint32_t block_num, const optional& trx_in_block = {} ) const; + /** + * @brief Get all operations, including virtual operations, within the most recent block + * (no later than the specified time) containing at least one operation + * @param start time point, optional, if omitted, the data of the latest block containing at least + * one operation will be returned + * @return a list of @a operation_history objects ordered by ID + * + * @note the data is fetched from the @a account_history plugin, so results may be + * incomplete or incorrect due to the @a partial-operations option configured in the API node. + * To get complete data, it is recommended to query from ElasticSearch where the data is + * maintained by the @a elastic_search plugin. + */ + vector get_block_operations_by_time( + const optional& start = optional() ) const; + /** * @brief Get details of order executions occurred most recently in a trading pair * @param a Asset symbol or ID in a trading pair @@ -814,6 +829,7 @@ FC_API(graphene::app::history_api, (get_account_history_operations) (get_relative_account_history) (get_block_operation_history) + (get_block_operations_by_time) (get_fill_order_history) (get_market_history) (get_market_history_buckets) From 0a5727e243f7d35cb7631376f7219bf39486cf5f Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 4 Oct 2022 12:42:30 +0000 Subject: [PATCH 280/338] Add tests for get_block_operations_by_time API --- tests/tests/history_api_tests.cpp | 36 +++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 815f8a616d..143d2e3369 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -96,6 +96,42 @@ BOOST_AUTO_TEST_CASE(get_account_history) { histories = hist_api.get_block_operation_history(head_block_num, 1u); BOOST_CHECK_EQUAL(histories.size(), 1u); + // get_block_operations_by_time + auto time1 = db.head_block_time(); + histories = hist_api.get_block_operations_by_time(time1); + BOOST_CHECK_EQUAL(histories.size(), 3u); + + histories = hist_api.get_block_operations_by_time(time1 + fc::seconds(1)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + + histories = hist_api.get_block_operations_by_time(time1 - fc::seconds(1)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + generate_block(); + auto time2 = db.head_block_time(); + + histories = hist_api.get_block_operations_by_time(time1); + BOOST_CHECK_EQUAL(histories.size(), 3u); + + histories = hist_api.get_block_operations_by_time(time1 - fc::seconds(1)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + histories = hist_api.get_block_operations_by_time(time2); + BOOST_CHECK_EQUAL(histories.size(), 3u); + + create_bitasset("USX", account_id_type()); + generate_block(); + auto time3 = db.head_block_time(); + + histories = hist_api.get_block_operations_by_time(time2); + BOOST_CHECK_EQUAL(histories.size(), 3u); + + histories = hist_api.get_block_operations_by_time(time3); + BOOST_CHECK_EQUAL(histories.size(), 1u); + + histories = hist_api.get_block_operations_by_time(time3 + fc::seconds(1)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + } catch (fc::exception &e) { edump((e.to_detail_string())); throw; From 70e5b3e80f65026731e132c5a06f71ffaafd68d2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 4 Oct 2022 17:44:18 +0000 Subject: [PATCH 281/338] Fix API docs --- libraries/app/include/graphene/app/api.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index e89129bdf3..ad92678ab2 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -192,7 +192,7 @@ namespace graphene { namespace app { * (no later than the specified time) containing at least one operation * @param start time point, optional, if omitted, the data of the latest block containing at least * one operation will be returned - * @return a list of @a operation_history objects ordered by ID + * @return a list of @a operation_history objects ordered by ID in descending order * * @note the data is fetched from the @a account_history plugin, so results may be * incomplete or incorrect due to the @a partial-operations option configured in the API node. From 9bbb5be1501151e76688e91e48e6c2e6bdd32bac Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 5 Oct 2022 14:47:55 +0000 Subject: [PATCH 282/338] Add an optional param to get_block_header* APIs ... to indicate whether to return signatures. Including 2 database APIs: - get_block_header - get_block_header_batch --- libraries/app/api_objects.cpp | 6 +++++ libraries/app/database_api.cpp | 27 +++++++++++-------- libraries/app/database_api_impl.hxx | 5 ++-- .../app/include/graphene/app/api_objects.hpp | 10 +++++++ .../app/include/graphene/app/database_api.hpp | 22 ++++++++++----- .../include/graphene/protocol/block.hpp | 5 ++++ 6 files changed, 55 insertions(+), 20 deletions(-) diff --git a/libraries/app/api_objects.cpp b/libraries/app/api_objects.cpp index 49d911189c..69d08d64a7 100644 --- a/libraries/app/api_objects.cpp +++ b/libraries/app/api_objects.cpp @@ -130,4 +130,10 @@ market_ticker::market_ticker(const fc::time_point_sec& now, quote_volume = "0"; } +maybe_signed_block_header::maybe_signed_block_header( const signed_block_header& bh, bool with_witness_signature ) +: block_header( bh ), + witness_signature( with_witness_signature ? bh.witness_signature : optional() ) +{ // Nothing else to do +} + } } // graphene::app diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 1a83ce2d1f..fe927ce956 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -227,31 +227,36 @@ void database_api_impl::cancel_all_subscriptions( bool reset_callback, bool rese // // ////////////////////////////////////////////////////////////////////// -optional database_api::get_block_header(uint32_t block_num)const +optional database_api::get_block_header( + uint32_t block_num, const optional& with_witness_signature )const { - return my->get_block_header( block_num ); + bool with_signature = ( with_witness_signature.valid() && *with_witness_signature ); + return my->get_block_header( block_num, with_signature ); } -optional database_api_impl::get_block_header(uint32_t block_num) const +optional database_api_impl::get_block_header( + uint32_t block_num, bool with_witness_signature )const { auto result = _db.fetch_block_by_number(block_num); if(result) - return *result; + return maybe_signed_block_header( *result, with_witness_signature ); return {}; } -map> database_api::get_block_header_batch( - const vector& block_nums) const + +map> database_api::get_block_header_batch( + const vector& block_nums, const optional& with_witness_signatures )const { - return my->get_block_header_batch( block_nums ); + bool with_signatures = ( with_witness_signatures.valid() && *with_witness_signatures ); + return my->get_block_header_batch( block_nums, with_signatures ); } -map> database_api_impl::get_block_header_batch( - const vector& block_nums) const +map> database_api_impl::get_block_header_batch( + const vector& block_nums, bool with_witness_signatures )const { - map> results; + map> results; for (const uint32_t block_num : block_nums) { - results[block_num] = get_block_header(block_num); + results[block_num] = get_block_header( block_num, with_witness_signatures ); } return results; } diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 153ef514e9..09e9afe413 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -50,8 +50,9 @@ class database_api_impl : public std::enable_shared_from_this void cancel_all_subscriptions(bool reset_callback, bool reset_market_subscriptions); // Blocks and transactions - optional get_block_header(uint32_t block_num)const; - map> get_block_header_batch(const vector& block_nums)const; + optional get_block_header( uint32_t block_num, bool with_witness_signature )const; + map> get_block_header_batch( + const vector& block_nums, bool with_witness_signatures )const; optional get_block(uint32_t block_num)const; processed_transaction get_transaction( uint32_t block_num, uint32_t trx_in_block )const; diff --git a/libraries/app/include/graphene/app/api_objects.hpp b/libraries/app/include/graphene/app/api_objects.hpp index 6e77291fb8..7dccae802c 100644 --- a/libraries/app/include/graphene/app/api_objects.hpp +++ b/libraries/app/include/graphene/app/api_objects.hpp @@ -177,6 +177,14 @@ namespace graphene { namespace app { optional statistics; }; + struct maybe_signed_block_header : block_header + { + maybe_signed_block_header() = default; + explicit maybe_signed_block_header( const signed_block_header& bh, bool with_witness_signature = true ); + + optional witness_signature; + }; + } } FC_REFLECT( graphene::app::more_data, @@ -221,3 +229,5 @@ FC_REFLECT_DERIVED( graphene::app::extended_asset_object, (graphene::chain::asse FC_REFLECT_DERIVED( graphene::app::extended_liquidity_pool_object, (graphene::chain::liquidity_pool_object), (statistics) ) +FC_REFLECT_DERIVED( graphene::app::maybe_signed_block_header, (graphene::protocol::block_header), + (witness_signature) ) diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index b76baff87d..a4ffea0ac1 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -153,18 +153,26 @@ class database_api ///////////////////////////// /** - * @brief Retrieve a signed block header + * @brief Retrieve a block header * @param block_num Height of the block whose header should be returned + * @param with_witness_signature Whether to return witness signature. Optional. + * If omitted or is @a false, will not return witness signature. * @return header of the referenced block, or null if no matching block was found */ - optional get_block_header(uint32_t block_num)const; + optional get_block_header( + uint32_t block_num, + const optional& with_witness_signature = optional() )const; /** - * @brief Retrieve multiple signed block headers by block numbers - * @param block_nums vector containing heights of the blocks whose headers should be returned - * @return array of headers of the referenced blocks, or null if no matching block was found - */ - map> get_block_header_batch(const vector& block_nums)const; + * @brief Retrieve multiple block headers by block numbers + * @param block_nums vector containing heights of the blocks whose headers should be returned + * @param with_witness_signatures Whether to return witness signatures. Optional. + * If omitted or is @a false, will not return witness signatures. + * @return array of headers of the referenced blocks, or null if no matching block was found + */ + map> get_block_header_batch( + const vector& block_nums, + const optional& with_witness_signatures = optional() )const; /** * @brief Retrieve a full, signed block diff --git a/libraries/protocol/include/graphene/protocol/block.hpp b/libraries/protocol/include/graphene/protocol/block.hpp index 499d970998..8c7e91b39e 100644 --- a/libraries/protocol/include/graphene/protocol/block.hpp +++ b/libraries/protocol/include/graphene/protocol/block.hpp @@ -39,6 +39,8 @@ namespace graphene { namespace protocol { // More info in https://github.com/bitshares/bitshares-core/issues/1136 extensions_type extensions; + virtual ~block_header() = default; + static uint32_t num_from_id(const block_id_type& id); }; @@ -51,6 +53,9 @@ namespace graphene { namespace protocol { bool validate_signee( const fc::ecc::public_key& expected_signee )const; signature_type witness_signature; + + signed_block_header() = default; + explicit signed_block_header( const block_header& header ) : block_header( header ) {} protected: mutable fc::ecc::public_key _signee; mutable block_id_type _block_id; From f34390622915bafb8244caec933868ae181cf486 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 5 Oct 2022 15:02:39 +0000 Subject: [PATCH 283/338] Update tests for get_block* APIs --- tests/tests/database_api_tests.cpp | 90 +++++++++++++++++++++++++----- 1 file changed, 77 insertions(+), 13 deletions(-) diff --git a/tests/tests/database_api_tests.cpp b/tests/tests/database_api_tests.cpp index a6ab497449..b26c2b7b31 100644 --- a/tests/tests/database_api_tests.cpp +++ b/tests/tests/database_api_tests.cpp @@ -1483,6 +1483,14 @@ BOOST_AUTO_TEST_CASE( get_transaction_hex ) BOOST_AUTO_TEST_CASE( get_block_tests ) { try { + const auto& get_block_id = []( const graphene::app::maybe_signed_block_header& header ) + { + signed_block_header signed_header( static_cast( header ) ); + if( header.witness_signature.valid() ) + signed_header.witness_signature = *header.witness_signature; + return signed_header.id(); + }; + generate_block(); ACTORS( (nathan) ); @@ -1505,11 +1513,35 @@ BOOST_AUTO_TEST_CASE( get_block_tests ) BOOST_CHECK( head_block->witness_signature != signature_type() ); BOOST_CHECK( head_block->id() == block3.id() ); - auto head_block_header = db_api.get_block_header( head_block_num ); + auto head_block_header = db_api.get_block_header( head_block_num, true ); + idump( (head_block_header) ); BOOST_REQUIRE( head_block_header.valid() ); BOOST_CHECK_EQUAL( head_block_header->block_num(), head_block_num ); - BOOST_CHECK( head_block_header->witness_signature == head_block->witness_signature ); - BOOST_CHECK( head_block_header->id() == head_block->id() ); + BOOST_REQUIRE( head_block_header->witness_signature.valid() ); + BOOST_CHECK( *head_block_header->witness_signature == head_block->witness_signature ); + BOOST_CHECK( get_block_id( *head_block_header ) == head_block->id() ); + + auto head_block_header2 = db_api.get_block_header( head_block_num ); + idump( (head_block_header2) ); + BOOST_REQUIRE( head_block_header2.valid() ); + BOOST_CHECK_EQUAL( head_block_header2->block_num(), head_block_num ); + BOOST_CHECK( head_block_header2->previous == head_block->previous ); + BOOST_CHECK( head_block_header2->timestamp == head_block->timestamp ); + BOOST_CHECK( head_block_header2->witness == head_block->witness ); + BOOST_CHECK( head_block_header2->transaction_merkle_root == head_block->transaction_merkle_root ); + BOOST_CHECK( !head_block_header2->witness_signature.valid() ); + BOOST_CHECK( get_block_id( *head_block_header2 ) != head_block->id() ); + + auto head_block_header3 = db_api.get_block_header( head_block_num, false ); + idump( (head_block_header3) ); + BOOST_REQUIRE( head_block_header3.valid() ); + BOOST_CHECK_EQUAL( head_block_header3->block_num(), head_block_num ); + BOOST_CHECK( head_block_header3->previous == head_block->previous ); + BOOST_CHECK( head_block_header3->timestamp == head_block->timestamp ); + BOOST_CHECK( head_block_header3->witness == head_block->witness ); + BOOST_CHECK( head_block_header3->transaction_merkle_root == head_block->transaction_merkle_root ); + BOOST_CHECK( !head_block_header3->witness_signature.valid() ); + BOOST_CHECK( get_block_id( *head_block_header3 ) == get_block_id( *head_block_header2 ) ); auto previous_block = db_api.get_block( head_block_num - 1 ); BOOST_REQUIRE( previous_block.valid() ); @@ -1520,31 +1552,63 @@ BOOST_AUTO_TEST_CASE( get_block_tests ) BOOST_CHECK( previous_block->witness_signature != head_block->witness_signature ); BOOST_CHECK( previous_block->id() == block2.id() ); - auto previous_block_header = db_api.get_block_header( head_block_num - 1 ); + auto previous_block_header = db_api.get_block_header( head_block_num - 1, true ); BOOST_REQUIRE( previous_block_header.valid() ); BOOST_CHECK_EQUAL( previous_block_header->block_num(), head_block_num - 1 ); - BOOST_CHECK( previous_block_header->witness_signature == previous_block->witness_signature ); - BOOST_CHECK( previous_block_header->id() == previous_block->id() ); + BOOST_REQUIRE( previous_block_header->witness_signature.valid() ); + BOOST_CHECK( *previous_block_header->witness_signature == previous_block->witness_signature ); + BOOST_CHECK( get_block_id( *previous_block_header ) == previous_block->id() ); auto next_block = db_api.get_block( head_block_num + 1 ); BOOST_CHECK( !next_block.valid() ); - auto next_block_header = db_api.get_block_header( head_block_num + 1 ); + auto next_block_header = db_api.get_block_header( head_block_num + 1, true ); BOOST_CHECK( !next_block_header.valid() ); const auto block_headers = db_api.get_block_header_batch( { head_block_num, head_block_num + 1, - head_block_num - 1 } ); + head_block_num - 1 }, true ); BOOST_REQUIRE_EQUAL( block_headers.size(), 3U ); BOOST_CHECK_THROW( block_headers.at( head_block_num + 2 ), std::out_of_range ); BOOST_CHECK( !block_headers.at( head_block_num + 1 ).valid() ); BOOST_REQUIRE( block_headers.at( head_block_num ).valid() ); BOOST_CHECK( block_headers.at( head_block_num )->block_num() == head_block_header->block_num() ); - BOOST_CHECK( block_headers.at( head_block_num )->id() == head_block_header->id() ); - BOOST_CHECK( block_headers.at( head_block_num )->witness_signature == head_block_header->witness_signature ); + BOOST_CHECK( get_block_id( *block_headers.at( head_block_num ) ) == head_block->id() ); + BOOST_REQUIRE( block_headers.at( head_block_num )->witness_signature.valid() ); + BOOST_CHECK( *block_headers.at( head_block_num )->witness_signature == head_block->witness_signature ); + BOOST_REQUIRE( block_headers.at( head_block_num - 1 ).valid() ); BOOST_CHECK( block_headers.at( head_block_num - 1 )->block_num() == previous_block_header->block_num() ); - BOOST_CHECK( block_headers.at( head_block_num - 1 )->id() == previous_block_header->id() ); - BOOST_CHECK( block_headers.at( head_block_num - 1 )->witness_signature - == previous_block_header->witness_signature ); + BOOST_CHECK( get_block_id( *block_headers.at( head_block_num - 1 ) ) == previous_block->id() ); + BOOST_REQUIRE( block_headers.at( head_block_num - 1 )->witness_signature.valid() ); + BOOST_CHECK( *block_headers.at( head_block_num - 1 )->witness_signature == previous_block->witness_signature ); + + const auto block_headers2 = db_api.get_block_header_batch( { head_block_num, head_block_num + 1, + head_block_num - 1 } ); + BOOST_REQUIRE_EQUAL( block_headers2.size(), 3U ); + BOOST_CHECK_THROW( block_headers2.at( head_block_num + 2 ), std::out_of_range ); + BOOST_CHECK( !block_headers2.at( head_block_num + 1 ).valid() ); + BOOST_REQUIRE( block_headers2.at( head_block_num ).valid() ); + BOOST_CHECK( block_headers2.at( head_block_num )->block_num() == head_block_header->block_num() ); + BOOST_CHECK( get_block_id( *block_headers2.at( head_block_num ) ) == get_block_id( *head_block_header2 ) ); + BOOST_CHECK( !block_headers2.at( head_block_num )->witness_signature.valid() ); + BOOST_REQUIRE( block_headers2.at( head_block_num - 1 ).valid() ); + BOOST_CHECK( block_headers2.at( head_block_num - 1 )->block_num() == previous_block_header->block_num() ); + BOOST_CHECK( get_block_id( *block_headers2.at( head_block_num - 1 ) ) != previous_block->id() ); + BOOST_CHECK( !block_headers2.at( head_block_num - 1 )->witness_signature.valid() ); + + const auto block_headers3 = db_api.get_block_header_batch( { head_block_num, head_block_num + 1, + head_block_num - 1 }, false ); + BOOST_REQUIRE_EQUAL( block_headers3.size(), 3U ); + BOOST_CHECK_THROW( block_headers3.at( head_block_num + 2 ), std::out_of_range ); + BOOST_CHECK( !block_headers3.at( head_block_num + 1 ).valid() ); + BOOST_REQUIRE( block_headers3.at( head_block_num ).valid() ); + BOOST_CHECK( block_headers3.at( head_block_num )->block_num() == head_block_header->block_num() ); + BOOST_CHECK( get_block_id( *block_headers3.at( head_block_num ) ) == get_block_id( *head_block_header2 ) ); + BOOST_CHECK( !block_headers3.at( head_block_num )->witness_signature.valid() ); + BOOST_REQUIRE( block_headers3.at( head_block_num - 1 ).valid() ); + BOOST_CHECK( block_headers3.at( head_block_num - 1 )->block_num() == previous_block_header->block_num() ); + BOOST_CHECK( get_block_id( *block_headers3.at( head_block_num - 1 ) ) + == get_block_id( *block_headers2.at( head_block_num - 1 ) ) ); + BOOST_CHECK( !block_headers3.at( head_block_num - 1 )->witness_signature.valid() ); } FC_LOG_AND_RETHROW() } From 43c95ab19c1f049864ad26a2fc913aff085cfc29 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 6 Oct 2022 12:57:26 +0000 Subject: [PATCH 284/338] Add a comment --- libraries/app/api_objects.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/app/api_objects.cpp b/libraries/app/api_objects.cpp index 69d08d64a7..6a14686850 100644 --- a/libraries/app/api_objects.cpp +++ b/libraries/app/api_objects.cpp @@ -131,7 +131,7 @@ market_ticker::market_ticker(const fc::time_point_sec& now, } maybe_signed_block_header::maybe_signed_block_header( const signed_block_header& bh, bool with_witness_signature ) -: block_header( bh ), +: block_header( bh ), // Slice intentionally witness_signature( with_witness_signature ? bh.witness_signature : optional() ) { // Nothing else to do } From 7f83c30bb84c85d4acb55a8f091a24216c4df8fb Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 7 Oct 2022 19:25:03 +0000 Subject: [PATCH 285/338] Add database_api::get_next_object_id API --- libraries/app/database_api.cpp | 30 ++++++++++- libraries/app/database_api_impl.hxx | 2 + .../app/include/graphene/app/database_api.hpp | 12 +++++ .../account_history_plugin.cpp | 3 +- .../api_helper_indexes/api_helper_indexes.cpp | 51 +++++++++++++++++++ .../api_helper_indexes/api_helper_indexes.hpp | 19 +++++++ .../custom_operations_plugin.cpp | 3 +- .../elasticsearch/elasticsearch_plugin.cpp | 3 +- .../market_history/market_history_plugin.cpp | 3 +- libraries/plugins/snapshot/snapshot.cpp | 1 + .../template_plugin/template_plugin.cpp | 3 +- 11 files changed, 124 insertions(+), 6 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index fe927ce956..dd83047566 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -94,7 +94,7 @@ database_api_impl::database_api_impl( graphene::chain::database& db, const appli amount_in_collateral_index = &_db.get_index_type< primary_index< call_order_index > >() .get_secondary_index(); } - catch( fc::assert_exception& e ) + catch( const fc::assert_exception& ) { amount_in_collateral_index = nullptr; } @@ -108,6 +108,17 @@ database_api_impl::database_api_impl( graphene::chain::database& db, const appli { asset_in_liquidity_pools_index = nullptr; } + + try + { + next_object_ids_index = &_db.get_index_type< primary_index< simple_index< chain_property_object > > >() + .get_secondary_index(); + } + catch( const fc::assert_exception& ) + { + next_object_ids_index = nullptr; + } + } database_api_impl::~database_api_impl() @@ -349,6 +360,23 @@ dynamic_global_property_object database_api_impl::get_dynamic_global_properties( return _db.get(dynamic_global_property_id_type()); } +object_id_type database_api::get_next_object_id( uint8_t space_id, uint8_t type_id, + bool with_pending_transactions )const +{ + return my->get_next_object_id( space_id, type_id, with_pending_transactions ); +} + +object_id_type database_api_impl::get_next_object_id( uint8_t space_id, uint8_t type_id, + bool with_pending_transactions )const +{ + if( with_pending_transactions ) + return _db.get_index( space_id, type_id ).get_next_id(); + + FC_ASSERT( next_object_ids_index, "api_helper_indexes plugin is not enabled on this server." ); + + return next_object_ids_index->get_next_id( space_id, type_id ); +} + ////////////////////////////////////////////////////////////////////// // // // Keys // diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 09e9afe413..86a4c31d80 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -62,6 +62,7 @@ class database_api_impl : public std::enable_shared_from_this fc::variant_object get_config()const; chain_id_type get_chain_id()const; dynamic_global_property_object get_dynamic_global_properties()const; + object_id_type get_next_object_id( uint8_t space_id, uint8_t type_id, bool with_pending_transactions )const; // Keys vector> get_key_references( vector key )const; @@ -430,6 +431,7 @@ class database_api_impl : public std::enable_shared_from_this const graphene::api_helper_indexes::amount_in_collateral_index* amount_in_collateral_index; const graphene::api_helper_indexes::asset_in_liquidity_pools_index* asset_in_liquidity_pools_index; + const graphene::api_helper_indexes::next_object_ids_index* next_object_ids_index; }; } } // graphene::app diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index a4ffea0ac1..752fefc2b4 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -228,6 +228,17 @@ class database_api */ dynamic_global_property_object get_dynamic_global_properties()const; + /** + * @brief Get the next object ID in an object space + * @param space_id The space ID + * @param type_id The type ID + * @param with_pending_transactions Whether to include pending transactions + * @return The next object ID to be assigned + * @throw fc::exception If the object space does not exist, or @p with_pending_transactions is @a false but + * the api_helper_indexes plugin is not enabled + */ + object_id_type get_next_object_id( uint8_t space_id, uint8_t type_id, bool with_pending_transactions )const; + ////////// // Keys // ////////// @@ -1475,6 +1486,7 @@ FC_API(graphene::app::database_api, (get_config) (get_chain_id) (get_dynamic_global_properties) + (get_next_object_id) // Keys (get_key_references) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index c7855fe866..58a6119c63 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -457,7 +457,8 @@ void account_history_plugin::plugin_initialize(const boost::program_options::var { my->init_program_options( options ); - database().applied_block.connect( [&]( const signed_block& b){ my->update_account_histories(b); } ); + // connect with group 0 to process before some special steps (e.g. snapshot or next_object_id) + database().applied_block.connect( 0, [this]( const signed_block& b){ my->update_account_histories(b); } ); my->_oho_index = database().add_index< primary_index< operation_history_index > >(); database().add_index< primary_index< account_history_index > >(); diff --git a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp index 79c996399c..2e1530f2cb 100644 --- a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp +++ b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp @@ -26,6 +26,7 @@ #include #include #include +#include namespace graphene { namespace api_helper_indexes { @@ -198,6 +199,56 @@ void api_helper_indexes::plugin_startup() for( const auto& pool : database().get_index_type().indices() ) asset_in_liquidity_pools_idx->object_inserted( pool ); + next_object_ids_idx = database().add_secondary_index< primary_index>, + next_object_ids_index >(); + refresh_next_ids(); + // connect with no group specified to process after the ones with a group specified + database().applied_block.connect( [this]( const chain::signed_block& ) + { + refresh_next_ids(); + _next_ids_map_initialized = true; + }); } +void api_helper_indexes::refresh_next_ids() +{ + const auto& db = database(); + if( _next_ids_map_initialized ) + { + for( auto& item : next_object_ids_idx->_next_ids ) + { + item.second = db.get_index( item.first.first, item.first.second ).get_next_id(); + } + return; + } + + // Assuming that all indexes have been created when processing the first block, + // for better performance, only do this twice, one on plugin startup, the other on the first block. + constexpr uint8_t max = 255; + size_t count = 0; + size_t failed_count = 0; + for( uint8_t space = 0; space < max; ++space ) + { + for( uint8_t type = 0; type < max; ++type ) + { + try + { + const auto& idx = db.get_index( space, type ); + next_object_ids_idx->_next_ids[ std::make_pair( space, type ) ] = idx.get_next_id(); + ++count; + } + catch( const fc::exception& ) + { + ++failed_count; + } + } + } + dlog( "${count} indexes detected, ${failed_count} not found", ("count",count)("failed_count",failed_count) ); +} + +object_id_type next_object_ids_index::get_next_id( uint8_t space_id, uint8_t type_id ) const +{ try { + return _next_ids.at( std::make_pair( space_id, type_id ) ); +} FC_CAPTURE_AND_RETHROW( (space_id)(type_id) ) } + } } diff --git a/libraries/plugins/api_helper_indexes/include/graphene/api_helper_indexes/api_helper_indexes.hpp b/libraries/plugins/api_helper_indexes/include/graphene/api_helper_indexes/api_helper_indexes.hpp index bfd7502ab1..080d00c9c3 100644 --- a/libraries/plugins/api_helper_indexes/include/graphene/api_helper_indexes/api_helper_indexes.hpp +++ b/libraries/plugins/api_helper_indexes/include/graphene/api_helper_indexes/api_helper_indexes.hpp @@ -71,6 +71,21 @@ class asset_in_liquidity_pools_index: public secondary_index flat_map> asset_in_pools_map; }; +/** + * @brief This secondary index tracks the next ID of all object types. + * @note This is implemented with \c flat_map considering there aren't too many object types in the system thus + * the performance would be acceptable. + */ +class next_object_ids_index : public secondary_index +{ + public: + object_id_type get_next_id( uint8_t space_id, uint8_t type_id ) const; + + private: + friend class api_helper_indexes; + flat_map< std::pair, object_id_type > _next_ids; +}; + namespace detail { class api_helper_indexes_impl; @@ -96,6 +111,10 @@ class api_helper_indexes : public graphene::app::plugin std::unique_ptr my; amount_in_collateral_index* amount_in_collateral_idx = nullptr; asset_in_liquidity_pools_index* asset_in_liquidity_pools_idx = nullptr; + next_object_ids_index* next_object_ids_idx = nullptr; + + bool _next_ids_map_initialized = false; + void refresh_next_ids(); }; } } //graphene::template diff --git a/libraries/plugins/custom_operations/custom_operations_plugin.cpp b/libraries/plugins/custom_operations/custom_operations_plugin.cpp index 729b46c240..d792086c10 100644 --- a/libraries/plugins/custom_operations/custom_operations_plugin.cpp +++ b/libraries/plugins/custom_operations/custom_operations_plugin.cpp @@ -136,7 +136,8 @@ void custom_operations_plugin::plugin_initialize(const boost::program_options::v my->_start_block = options["custom-operations-start-block"].as(); } - database().applied_block.connect( [this]( const signed_block& b) { + // connect with group 0 to process before some special steps (e.g. snapshot or next_object_id) + database().applied_block.connect( 0, [this]( const signed_block& b) { if( b.block_num() >= my->_start_block ) my->onBlock(); } ); diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index c7ed9eec90..c62c3ef87c 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -575,7 +575,8 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia if( my->_options.elasticsearch_mode != mode::only_query ) { - database().applied_block.connect([this](const signed_block &b) { + // connect with group 0 to process before some special steps (e.g. snapshot or next_object_id) + database().applied_block.connect( 0, [this](const signed_block &b) { my->update_account_histories(b); }); } diff --git a/libraries/plugins/market_history/market_history_plugin.cpp b/libraries/plugins/market_history/market_history_plugin.cpp index 853f541c9c..29699b1eba 100644 --- a/libraries/plugins/market_history/market_history_plugin.cpp +++ b/libraries/plugins/market_history/market_history_plugin.cpp @@ -766,7 +766,8 @@ void market_history_plugin::plugin_set_program_options( void market_history_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { - database().applied_block.connect( [this]( const signed_block& b){ my->update_market_histories(b); } ); + // connect with group 0 to process before some special steps (e.g. snapshot or next_object_id) + database().applied_block.connect( 0, [this]( const signed_block& b){ my->update_market_histories(b); } ); database().add_index< primary_index< bucket_index > >(); database().add_index< primary_index< history_index > >(); diff --git a/libraries/plugins/snapshot/snapshot.cpp b/libraries/plugins/snapshot/snapshot.cpp index 12a0553417..f7bb825391 100644 --- a/libraries/plugins/snapshot/snapshot.cpp +++ b/libraries/plugins/snapshot/snapshot.cpp @@ -72,6 +72,7 @@ void snapshot_plugin::plugin_initialize(const boost::program_options::variables_ snapshot_block = options[OPT_BLOCK_NUM].as(); if( options.count(OPT_BLOCK_TIME) > 0 ) snapshot_time = fc::time_point_sec::from_iso_string( options[OPT_BLOCK_TIME].as() ); + // connect with no group specified to process after the ones with a group specified database().applied_block.connect( [&]( const graphene::chain::signed_block& b ) { check_snapshot( b ); }); diff --git a/libraries/plugins/template_plugin/template_plugin.cpp b/libraries/plugins/template_plugin/template_plugin.cpp index 72020a8588..abc7615983 100644 --- a/libraries/plugins/template_plugin/template_plugin.cpp +++ b/libraries/plugins/template_plugin/template_plugin.cpp @@ -103,7 +103,8 @@ void template_plugin::plugin_set_program_options( void template_plugin::plugin_initialize(const boost::program_options::variables_map& options) { - database().applied_block.connect( [&]( const signed_block& b) { + // connect with group 0 by default to process before some special steps (e.g. snapshot or next_object_id) + database().applied_block.connect( 0, [this]( const signed_block& b) { my->on_block(b); } ); From 94681bf331f1117ddfcaafbd948acbdd7b2608e0 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 7 Oct 2022 20:23:35 +0000 Subject: [PATCH 286/338] Add tests for get_next_object_id API --- tests/tests/database_api_tests.cpp | 82 ++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/tests/tests/database_api_tests.cpp b/tests/tests/database_api_tests.cpp index b26c2b7b31..fe2138221d 100644 --- a/tests/tests/database_api_tests.cpp +++ b/tests/tests/database_api_tests.cpp @@ -1985,10 +1985,22 @@ BOOST_AUTO_TEST_CASE( get_trade_history ) opt.has_market_history_plugin = true; graphene::app::database_api db_api( db, &opt); + // check get_next_object_id + auto next_id = db_api.get_next_object_id( MARKET_HISTORY_SPACE_ID, + graphene::market_history::order_history_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "5.0.0" ); + next_id = db_api.get_next_object_id( MARKET_HISTORY_SPACE_ID, + graphene::market_history::order_history_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "5.0.0" ); + ACTORS((bob)(alice)); const auto& eur = create_user_issued_asset("EUR"); + asset_id_type eur_id = eur.id; const auto& usd = create_user_issued_asset("USD"); + asset_id_type usd_id = usd.id; issue_uia( bob_id, usd.amount(1000000) ); issue_uia( alice_id, eur.amount(1000000) ); @@ -1996,9 +2008,29 @@ BOOST_AUTO_TEST_CASE( get_trade_history ) // maker create an order create_sell_order(bob, usd.amount(200), eur.amount(210)); + // btw check get_next_object_id + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "1.7.0" ); + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "1.7.1" ); + // taker match it create_sell_order(alice, eur.amount(210), usd.amount(200)); + // btw check get_next_object_id + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "1.7.0" ); + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "1.7.2" ); + generate_block(); // taker is selling @@ -2040,6 +2072,56 @@ BOOST_AUTO_TEST_CASE( get_trade_history ) BOOST_CHECK_EQUAL( bob_id.instance.value, history[0].side1_account_id.instance.value ); BOOST_CHECK_EQUAL( alice_id.instance.value, history[0].side2_account_id.instance.value ); + // check get_next_object_id + next_id = db_api.get_next_object_id( MARKET_HISTORY_SPACE_ID, + graphene::market_history::order_history_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "5.0.2" ); + next_id = db_api.get_next_object_id( MARKET_HISTORY_SPACE_ID, + graphene::market_history::order_history_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "5.0.2" ); + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "1.7.2" ); + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "1.7.2" ); + // maker create an order + create_sell_order(bob, asset(200,usd_id), asset(210,eur_id)); + // check get_next_object_id + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "1.7.2" ); + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "1.7.3" ); + BOOST_CHECK_THROW( db_api.get_next_object_id( 1,100,true ), fc::exception ); + BOOST_CHECK_THROW( db_api.get_next_object_id( 10,0,false ), fc::exception ); + + generate_block(); + // check get_next_object_id + next_id = db_api.get_next_object_id( MARKET_HISTORY_SPACE_ID, + graphene::market_history::order_history_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "5.0.2" ); + next_id = db_api.get_next_object_id( MARKET_HISTORY_SPACE_ID, + graphene::market_history::order_history_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "5.0.2" ); + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + false ); + BOOST_CHECK( std::string(next_id) == "1.7.3" ); + next_id = db_api.get_next_object_id( protocol_ids, + limit_order_object_type, + true ); + BOOST_CHECK( std::string(next_id) == "1.7.3" ); + } FC_LOG_AND_RETHROW() } From bd440cf0cdbda06fe7b0ca833b273fbf9b1fd985 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 13 Oct 2022 22:21:15 +0000 Subject: [PATCH 287/338] Refactor code about object_id and object_id_type And fix some other code smells. --- libraries/app/api.cpp | 32 +- libraries/app/database_api.cpp | 183 +++---- libraries/app/database_api_helper.hxx | 8 +- libraries/app/database_api_impl.hxx | 13 +- libraries/chain/account_evaluator.cpp | 4 +- libraries/chain/account_object.cpp | 29 +- libraries/chain/asset_evaluator.cpp | 12 +- libraries/chain/db_balance.cpp | 10 +- libraries/chain/db_debug.cpp | 2 +- libraries/chain/db_genesis.cpp | 10 +- libraries/chain/db_getter.cpp | 4 +- libraries/chain/db_maint.cpp | 10 +- libraries/chain/db_market.cpp | 2 +- libraries/chain/db_notify.cpp | 2 +- libraries/chain/db_update.cpp | 4 +- libraries/chain/evaluator.cpp | 2 +- libraries/chain/htlc_evaluator.cpp | 45 +- .../include/graphene/chain/account_object.hpp | 19 +- .../include/graphene/chain/asset_object.hpp | 21 +- .../include/graphene/chain/balance_object.hpp | 5 +- .../graphene/chain/block_summary_object.hpp | 6 +- .../graphene/chain/budget_record_object.hpp | 6 +- .../include/graphene/chain/buyback_object.hpp | 6 +- .../graphene/chain/chain_property_object.hpp | 6 +- .../chain/committee_member_object.hpp | 6 +- .../graphene/chain/confidential_object.hpp | 6 +- .../graphene/chain/credit_offer_object.hpp | 16 +- .../chain/custom_authority_object.hpp | 7 +- .../include/graphene/chain/fba_object.hpp | 6 +- .../graphene/chain/global_property_object.hpp | 12 +- .../include/graphene/chain/htlc_object.hpp | 7 +- .../graphene/chain/liquidity_pool_object.hpp | 5 +- .../include/graphene/chain/market_object.hpp | 22 +- .../chain/operation_history_object.hpp | 13 +- .../graphene/chain/proposal_object.hpp | 5 +- .../graphene/chain/samet_fund_object.hpp | 5 +- .../chain/special_authority_object.hpp | 6 +- .../include/graphene/chain/ticket_object.hpp | 5 +- .../chain/transaction_history_object.hpp | 6 +- .../graphene/chain/vesting_balance_object.hpp | 6 +- .../chain/withdraw_permission_object.hpp | 6 +- .../include/graphene/chain/witness_object.hpp | 9 +- .../chain/witness_schedule_object.hpp | 6 +- .../include/graphene/chain/worker_object.hpp | 5 +- libraries/chain/is_authorized_asset.cpp | 2 +- libraries/chain/market_evaluator.cpp | 10 +- libraries/chain/proposal_object.cpp | 23 +- .../chain/withdraw_permission_evaluator.cpp | 30 +- libraries/db/include/graphene/db/index.hpp | 44 +- libraries/db/include/graphene/db/object.hpp | 49 +- .../include/graphene/db/object_database.hpp | 41 +- .../db/include/graphene/db/simple_index.hpp | 24 +- .../db/include/graphene/db/undo_database.hpp | 10 +- libraries/db/object_database.cpp | 4 +- .../account_history_plugin.cpp | 4 +- .../account_history_plugin.hpp | 6 +- .../api_helper_indexes/api_helper_indexes.cpp | 10 +- .../custom_operations/custom_objects.hpp | 6 +- .../elasticsearch/elasticsearch_plugin.cpp | 12 +- .../market_history/market_history_plugin.hpp | 44 +- .../market_history/market_history_plugin.cpp | 4 +- libraries/protocol/asset_ops.cpp | 2 +- .../include/graphene/protocol/asset_ops.hpp | 3 +- .../include/graphene/protocol/object_id.hpp | 124 +++-- .../include/graphene/protocol/vote.hpp | 10 +- .../wallet/include/graphene/wallet/wallet.hpp | 15 +- .../graphene/wallet/wallet_structs.hpp | 2 +- libraries/wallet/wallet.cpp | 19 +- libraries/wallet/wallet_account.cpp | 8 +- libraries/wallet/wallet_api_impl.cpp | 4 +- libraries/wallet/wallet_api_impl.hpp | 24 +- libraries/wallet/wallet_asset.cpp | 2 +- libraries/wallet/wallet_sign.cpp | 10 +- libraries/wallet/wallet_transfer.cpp | 25 +- libraries/wallet/wallet_voting.cpp | 9 +- tests/app/main.cpp | 3 +- tests/cli/main.cpp | 50 +- tests/common/database_fixture.cpp | 11 +- tests/common/database_fixture.hpp | 4 +- tests/elasticsearch/main.cpp | 18 +- tests/tests/api_limit_tests.cpp | 22 +- tests/tests/authority_tests.cpp | 65 +-- tests/tests/bitasset_tests.cpp | 44 +- tests/tests/block_tests.cpp | 14 +- tests/tests/bsip48_75_tests.cpp | 36 +- tests/tests/bsip85_tests.cpp | 12 +- tests/tests/bsip86_tests.cpp | 6 +- tests/tests/bsrm_basic_tests.cpp | 44 +- tests/tests/bsrm_indvd_settlement_tests.cpp | 74 +-- tests/tests/bsrm_no_settlement_tests.cpp | 106 ++-- tests/tests/credit_offer_tests.cpp | 120 ++--- tests/tests/custom_authority_tests.cpp | 166 +++--- tests/tests/custom_operations.cpp | 2 +- tests/tests/database_api_tests.cpp | 60 +-- tests/tests/database_tests.cpp | 25 +- tests/tests/fee_tests.cpp | 199 +++---- tests/tests/force_settle_fee_tests.cpp | 100 ++-- tests/tests/force_settle_match_tests.cpp | 188 +++---- tests/tests/grouped_orders_api_tests.cpp | 2 +- tests/tests/history_api_tests.cpp | 20 +- tests/tests/htlc_tests.cpp | 12 +- tests/tests/liquidity_pool_tests.cpp | 187 ++++--- tests/tests/margin_call_fee_tests.cpp | 64 +-- tests/tests/market_fee_sharing_tests.cpp | 69 ++- tests/tests/market_rounding_tests.cpp | 204 ++++---- tests/tests/market_tests.cpp | 494 +++++++++--------- tests/tests/network_broadcast_api_tests.cpp | 6 +- tests/tests/operation_tests.cpp | 180 +++---- tests/tests/operation_tests2.cpp | 96 ++-- tests/tests/pob_tests.cpp | 75 ++- tests/tests/samet_fund_tests.cpp | 72 +-- tests/tests/settle_tests.cpp | 156 +++--- tests/tests/simple_maker_taker_fee_tests.cpp | 152 +++--- tests/tests/smartcoin_tests.cpp | 42 +- tests/tests/swan_tests.cpp | 34 +- tests/tests/uia_tests.cpp | 30 +- tests/tests/voting_tests.cpp | 68 +-- 117 files changed, 2271 insertions(+), 2241 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 3cec7094e4..1f252efb87 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -382,8 +382,8 @@ namespace graphene { namespace app { FC_ASSERT(_app.chain_database()); const auto& db = *_app.chain_database(); database_api_helper db_api_helper( _app ); - asset_id_type a = db_api_helper.get_asset_from_string( asset_a )->id; - asset_id_type b = db_api_helper.get_asset_from_string( asset_b )->id; + asset_id_type a = db_api_helper.get_asset_from_string( asset_a )->get_id(); + asset_id_type b = db_api_helper.get_asset_from_string( asset_b )->get_id(); if( a > b ) std::swap(a,b); const auto& history_idx = db.get_index_type().indices().get(); history_key hkey; @@ -426,7 +426,7 @@ namespace graphene { namespace app { account_id_type account; try { database_api_helper db_api_helper( _app ); - account = db_api_helper.get_account_from_string(account_id_or_name)->id; + account = db_api_helper.get_account_from_string(account_id_or_name)->get_id(); } catch(...) { return result; } if(_app.is_plugin_enabled("elasticsearch")) { @@ -479,8 +479,8 @@ namespace graphene { namespace app { account_id_type account; try { database_api_helper db_api_helper( _app ); - account = db_api_helper.get_account_from_string(account_name_or_id)->id; - } catch(...) { return result; } + account = db_api_helper.get_account_from_string(account_name_or_id)->get_id(); + } catch( const fc::exception& ) { return result; } fc::time_point_sec start = ostart.valid() ? *ostart : fc::time_point_sec::maximum(); @@ -490,7 +490,7 @@ namespace graphene { namespace app { return result; const auto& acc_hist_idx = db.get_index_type().indices().get(); - auto itr = acc_hist_idx.lower_bound( boost::make_tuple( account, op_hist_itr->id ) ); + auto itr = acc_hist_idx.lower_bound( boost::make_tuple( account, op_hist_itr->get_id() ) ); auto itr_end = acc_hist_idx.upper_bound( account ); while( itr != itr_end && result.size() < limit ) @@ -521,7 +521,7 @@ namespace graphene { namespace app { account_id_type account; try { database_api_helper db_api_helper( _app ); - account = db_api_helper.get_account_from_string(account_id_or_name)->id; + account = db_api_helper.get_account_from_string(account_id_or_name)->get_id(); } catch(...) { return result; } const auto& stats = account(db).statistics(db); if( stats.most_recent_op == account_history_id_type() ) return result; @@ -541,7 +541,7 @@ namespace graphene { namespace app { else node = &node->next(db); } if( stop.instance.value == 0 && result.size() < limit ) { - auto head = db.find(account_history_id_type()); + const auto* head = db.find(account_history_id_type()); if (head != nullptr && head->account == account && head->operation_id(db).op.which() == operation_type) result.push_back(head->operation_id(db)); } @@ -566,7 +566,7 @@ namespace graphene { namespace app { account_id_type account; try { database_api_helper db_api_helper( _app ); - account = db_api_helper.get_account_from_string(account_id_or_name)->id; + account = db_api_helper.get_account_from_string(account_id_or_name)->get_id(); } catch(...) { return result; } const auto& stats = account(db).statistics(db); if( start == 0 ) @@ -674,8 +674,8 @@ namespace graphene { namespace app { const auto& db = *_app.chain_database(); database_api_helper db_api_helper( _app ); - asset_id_type a = db_api_helper.get_asset_from_string( asset_a )->id; - asset_id_type b = db_api_helper.get_asset_from_string( asset_b )->id; + asset_id_type a = db_api_helper.get_asset_from_string( asset_a )->get_id(); + asset_id_type b = db_api_helper.get_asset_from_string( asset_b )->get_id(); vector result; const auto configured_limit = _app.get_options().api_limit_get_market_history; result.reserve( configured_limit ); @@ -899,7 +899,7 @@ namespace graphene { namespace app { ("configured_limit", configured_limit) ); database_api_helper db_api_helper( _app ); - asset_id_type asset_id = db_api_helper.get_asset_from_string( asset_symbol_or_id )->id; + asset_id_type asset_id = db_api_helper.get_asset_from_string( asset_symbol_or_id )->get_id(); const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); @@ -933,7 +933,7 @@ namespace graphene { namespace app { int64_t asset_api::get_asset_holders_count( const std::string& asset_symbol_or_id ) const { const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); database_api_helper db_api_helper( _app ); - asset_id_type asset_id = db_api_helper.get_asset_from_string( asset_symbol_or_id )->id; + asset_id_type asset_id = db_api_helper.get_asset_from_string( asset_symbol_or_id )->get_id(); auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); int64_t count = boost::distance(range) - 1; @@ -996,8 +996,8 @@ namespace graphene { namespace app { vector< limit_order_group > result; database_api_helper db_api_helper( _app ); - asset_id_type base_asset_id = db_api_helper.get_asset_from_string( base_asset )->id; - asset_id_type quote_asset_id = db_api_helper.get_asset_from_string( quote_asset )->id; + asset_id_type base_asset_id = db_api_helper.get_asset_from_string( base_asset )->get_id(); + asset_id_type quote_asset_id = db_api_helper.get_asset_from_string( quote_asset )->get_id(); price max_price = price::max( base_asset_id, quote_asset_id ); price min_price = price::min( base_asset_id, quote_asset_id ); @@ -1037,7 +1037,7 @@ namespace graphene { namespace app { if( o_account_name_or_id.valid() ) { const string& account_name_or_id = *o_account_name_or_id; - const account_id_type account_id = db_api_helper.get_account_from_string(account_name_or_id)->id; + const account_id_type account_id = db_api_helper.get_account_from_string(account_name_or_id)->get_id(); if( catalog.valid() ) { if( key.valid() ) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index dd83047566..ddd96aace9 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -61,7 +61,7 @@ database_api_helper::database_api_helper( graphene::chain::database& db, const a { // Nothing else to do } -database_api_helper::database_api_helper( graphene::app::application& app ) +database_api_helper::database_api_helper( const graphene::app::application& app ) :_db( *app.chain_database() ), _app_options( &app.get_options() ) { // Nothing else to do } @@ -486,7 +486,7 @@ bool database_api_impl::is_public_key_registered(string public_key) const account_id_type database_api::get_account_id_from_string(const std::string& name_or_id)const { - return my->get_account_from_string( name_or_id )->id; + return my->get_account_from_string( name_or_id )->get_id(); } vector> database_api::get_accounts( const vector& account_names_or_ids, @@ -573,7 +573,7 @@ std::map database_api_impl::get_full_accounts( const const auto& proposals_by_account = proposal_idx.get_secondary_index< graphene::chain::required_approval_index>(); - auto required_approvals_itr = proposals_by_account._account_to_proposals.find( account->id ); + auto required_approvals_itr = proposals_by_account._account_to_proposals.find( account->get_id() ); if( required_approvals_itr != proposals_by_account._account_to_proposals.end() ) { acnt.proposals.reserve( std::min(required_approvals_itr->second.size(), @@ -591,7 +591,7 @@ std::map database_api_impl::get_full_accounts( const // Add the account's balances const auto& balances = _db.get_index_type< primary_index< account_balance_index > >(). - get_secondary_index< balances_by_account_index >().get_account_balances( account->id ); + get_secondary_index< balances_by_account_index >().get_account_balances( account->get_id() ); for( const auto& balance : balances ) { if(acnt.balances.size() >= api_limit_get_full_accounts_lists) { @@ -603,7 +603,7 @@ std::map database_api_impl::get_full_accounts( const // Add the account's vesting balances auto vesting_range = _db.get_index_type().indices().get() - .equal_range(account->id); + .equal_range(account->get_id()); for(auto itr = vesting_range.first; itr != vesting_range.second; ++itr) { if(acnt.vesting_balances.size() >= api_limit_get_full_accounts_lists) { @@ -615,7 +615,7 @@ std::map database_api_impl::get_full_accounts( const // Add the account's orders auto order_range = _db.get_index_type().indices().get() - .equal_range(account->id); + .equal_range(account->get_id()); for(auto itr = order_range.first; itr != order_range.second; ++itr) { if(acnt.limit_orders.size() >= api_limit_get_full_accounts_lists) { @@ -624,7 +624,8 @@ std::map database_api_impl::get_full_accounts( const } acnt.limit_orders.emplace_back(*itr); } - auto call_range = _db.get_index_type().indices().get().equal_range(account->id); + auto call_range = _db.get_index_type().indices().get() + .equal_range(account->get_id()); for(auto itr = call_range.first; itr != call_range.second; ++itr) { if(acnt.call_orders.size() >= api_limit_get_full_accounts_lists) { @@ -634,7 +635,7 @@ std::map database_api_impl::get_full_accounts( const acnt.call_orders.emplace_back(*itr); } auto settle_range = _db.get_index_type().indices().get() - .equal_range(account->id); + .equal_range(account->get_id()); for(auto itr = settle_range.first; itr != settle_range.second; ++itr) { if(acnt.settle_orders.size() >= api_limit_get_full_accounts_lists) { @@ -645,19 +646,19 @@ std::map database_api_impl::get_full_accounts( const } // get assets issued by user - auto asset_range = _db.get_index_type().indices().get().equal_range(account->id); + auto asset_range = _db.get_index_type().indices().get().equal_range(account->get_id()); for(auto itr = asset_range.first; itr != asset_range.second; ++itr) { if(acnt.assets.size() >= api_limit_get_full_accounts_lists) { acnt.more_data_available.assets = true; break; } - acnt.assets.emplace_back(itr->id); + acnt.assets.emplace_back(itr->get_id()); } // get withdraws permissions auto withdraw_indices = _db.get_index_type().indices(); - auto withdraw_from_range = withdraw_indices.get().equal_range(account->id); + auto withdraw_from_range = withdraw_indices.get().equal_range(account->get_id()); for(auto itr = withdraw_from_range.first; itr != withdraw_from_range.second; ++itr) { if(acnt.withdraws_from.size() >= api_limit_get_full_accounts_lists) { @@ -666,7 +667,7 @@ std::map database_api_impl::get_full_accounts( const } acnt.withdraws_from.emplace_back(*itr); } - auto withdraw_authorized_range = withdraw_indices.get().equal_range(account->id); + auto withdraw_authorized_range = withdraw_indices.get().equal_range(account->get_id()); for(auto itr = withdraw_authorized_range.first; itr != withdraw_authorized_range.second; ++itr) { if(acnt.withdraws_to.size() >= api_limit_get_full_accounts_lists) { @@ -677,7 +678,8 @@ std::map database_api_impl::get_full_accounts( const } // get htlcs - auto htlc_from_range = _db.get_index_type().indices().get().equal_range(account->id); + auto htlc_from_range = _db.get_index_type().indices().get() + .equal_range(account->get_id()); for(auto itr = htlc_from_range.first; itr != htlc_from_range.second; ++itr) { if(acnt.htlcs_from.size() >= api_limit_get_full_accounts_lists) { @@ -686,7 +688,7 @@ std::map database_api_impl::get_full_accounts( const } acnt.htlcs_from.emplace_back(*itr); } - auto htlc_to_range = _db.get_index_type().indices().get().equal_range(account->id); + auto htlc_to_range = _db.get_index_type().indices().get().equal_range(account->get_id()); for(auto itr = htlc_to_range.first; itr != htlc_to_range.second; ++itr) { if(acnt.htlcs_to.size() >= api_limit_get_full_accounts_lists) { @@ -750,7 +752,7 @@ vector database_api_impl::get_account_references( const std::st const auto& idx = _db.get_index_type(); const auto& aidx = dynamic_cast(idx); const auto& refs = aidx.get_secondary_index(); - const account_id_type account_id = get_account_from_string(account_id_or_name)->id; + const account_id_type account_id = get_account_from_string(account_id_or_name)->get_id(); auto itr = refs.account_to_account_memberships.find(account_id); vector result; @@ -834,7 +836,7 @@ vector database_api_impl::get_account_balances( const std::string& accoun const flat_set& assets )const { const account_object* account = get_account_from_string(account_name_or_id); - account_id_type acnt = account->id; + account_id_type acnt = account->get_id(); vector result; if (assets.empty()) { @@ -917,7 +919,7 @@ vector database_api_impl::get_vesting_balances( const st { try { - const account_id_type account_id = get_account_from_string(account_id_or_name)->id; + const account_id_type account_id = get_account_from_string(account_id_or_name)->get_id(); vector result; auto vesting_range = _db.get_index_type().indices().get() .equal_range(account_id); @@ -938,7 +940,7 @@ vector database_api_impl::get_vesting_balances( const st asset_id_type database_api::get_asset_id_from_string(const std::string& symbol_or_id)const { - return my->get_asset_from_string( symbol_or_id )->id; + return my->get_asset_from_string( symbol_or_id )->get_id(); } vector> database_api::get_assets( @@ -1018,10 +1020,10 @@ vector database_api_impl::get_assets_by_issuer(const std: ("configured_limit", configured_limit) ); vector result; - const account_id_type account = get_account_from_string(issuer_name_or_id)->id; + const account_id_type account = get_account_from_string(issuer_name_or_id)->get_id(); const auto& asset_idx = _db.get_index_type().indices().get(); auto asset_index_end = asset_idx.end(); - auto asset_itr = asset_idx.lower_bound(boost::make_tuple(account, start)); + auto asset_itr = asset_idx.lower_bound(boost::make_tuple(account, object_id_type(start))); while(asset_itr != asset_index_end && asset_itr->issuer == account && result.size() < limit) { result.emplace_back( extend_asset( *asset_itr ) ); @@ -1062,8 +1064,8 @@ vector database_api_impl::get_limit_orders( const std::strin "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); - const asset_id_type asset_a_id = get_asset_from_string(a)->id; - const asset_id_type asset_b_id = get_asset_from_string(b)->id; + const asset_id_type asset_a_id = get_asset_from_string(a)->get_id(); + const asset_id_type asset_b_id = get_asset_from_string(b)->get_id(); return get_limit_orders(asset_a_id, asset_b_id, limit); } @@ -1090,11 +1092,12 @@ vector database_api_impl::get_limit_orders_by_account( const if (account == nullptr) return results; - limit_order_id_type start_id = ostart_id.valid() ? *ostart_id : limit_order_id_type(); + limit_order_id_type start_order_id = ostart_id.valid() ? *ostart_id : limit_order_id_type(); + object_id_type start_id { start_order_id }; const auto& index_by_account = _db.get_index_type().indices().get(); - auto lower_itr = index_by_account.lower_bound( std::make_tuple( account->id, start_id ) ); - auto upper_itr = index_by_account.upper_bound( account->id ); + auto lower_itr = index_by_account.lower_bound( std::make_tuple( account->get_id(), start_id ) ); + auto upper_itr = index_by_account.upper_bound( account->get_id() ); results.reserve( limit ); uint32_t count = 0; @@ -1135,8 +1138,8 @@ vector database_api_impl::get_account_limit_orders( FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); - auto base_id = assets[0]->id; - auto quote_id = assets[1]->id; + auto base_id = assets[0]->get_id(); + auto quote_id = assets[1]->get_id(); if (ostart_price.valid()) { FC_ASSERT(ostart_price->base.asset_id == base_id, "Base asset inconsistent with start price"); @@ -1150,10 +1153,11 @@ vector database_api_impl::get_account_limit_orders( // if both order_id and price are invalid, query the first page if ( !ostart_id.valid() && !ostart_price.valid() ) { - lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, price::max(base_id, quote_id))); + lower_itr = index_by_account.lower_bound(std::make_tuple(account->get_id(), price::max(base_id, quote_id))); } else if ( ostart_id.valid() ) { + object_id_type start_id { *ostart_id }; // in case of the order been deleted during page querying const limit_order_object *p_loo = _db.find(*ostart_id); @@ -1161,7 +1165,7 @@ vector database_api_impl::get_account_limit_orders( { if ( ostart_price.valid() ) { - lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, *ostart_price, *ostart_id)); + lower_itr = index_by_account.lower_bound(std::make_tuple(account->get_id(), *ostart_price, start_id)); } else { @@ -1178,16 +1182,16 @@ vector database_api_impl::get_account_limit_orders( FC_ASSERT(loo.sell_price.quote.asset_id == quote_id, "Order quote asset inconsistent with order"); FC_ASSERT(loo.seller == account->get_id(), "Order not owned by specified account"); - lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, loo.sell_price, *ostart_id)); + lower_itr = index_by_account.lower_bound(std::make_tuple(account->get_id(), loo.sell_price, start_id)); } } else { // if reach here start_price must be valid - lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, *ostart_price)); + lower_itr = index_by_account.lower_bound(std::make_tuple(account->get_id(), *ostart_price)); } - upper_itr = index_by_account.upper_bound(std::make_tuple(account->id, price::min(base_id, quote_id))); + upper_itr = index_by_account.upper_bound(std::make_tuple(account->get_id(), price::min(base_id, quote_id))); // Add the account's orders for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) @@ -1243,7 +1247,7 @@ vector database_api_impl::get_call_orders_by_account(const st ("configured_limit", configured_limit) ); vector result; - const account_id_type account = get_account_from_string(account_name_or_id)->id; + const account_id_type account = get_account_from_string(account_name_or_id)->get_id(); const auto& call_idx = _db.get_index_type().indices().get(); auto call_index_end = call_idx.end(); auto call_itr = call_idx.lower_bound(boost::make_tuple(account, start)); @@ -1268,7 +1272,7 @@ vector database_api_impl::get_settle_orders(const std:: "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); - const asset_id_type asset_a_id = get_asset_from_string(a)->id; + const asset_id_type asset_a_id = get_asset_from_string(a)->get_id(); const auto& settle_index = _db.get_index_type().indices().get(); const asset_object& mia = _db.get(asset_a_id); @@ -1303,10 +1307,10 @@ vector database_api_impl::get_settle_orders_by_account( ("configured_limit", configured_limit) ); vector result; - const account_id_type account = get_account_from_string(account_name_or_id)->id; + const account_id_type account = get_account_from_string(account_name_or_id)->get_id(); const auto& settle_idx = _db.get_index_type().indices().get(); auto settle_index_end = settle_idx.end(); - auto settle_itr = settle_idx.lower_bound(boost::make_tuple(account, start)); + auto settle_itr = settle_idx.lower_bound(boost::make_tuple(account, object_id_type(start))); while(settle_itr != settle_index_end && settle_itr->owner == account && result.size() < limit) { result.push_back(*settle_itr); @@ -1340,7 +1344,7 @@ vector database_api_impl::get_collateral_bids( const std: const asset_object& swan = *get_asset_from_string(asset_id_or_symbol); FC_ASSERT( swan.is_market_issued(), "Asset is not a MPA" ); - const asset_id_type asset_id = swan.id; + const asset_id_type asset_id = swan.get_id(); const auto& idx = _db.get_index_type().indices().get(); auto itr = idx.lower_bound( asset_id ); auto end = idx.upper_bound( asset_id ); @@ -1362,8 +1366,8 @@ void database_api::subscribe_to_market( std::function call void database_api_impl::subscribe_to_market( std::function callback, const std::string& a, const std::string& b ) { - auto asset_a_id = get_asset_from_string(a)->id; - auto asset_b_id = get_asset_from_string(b)->id; + auto asset_a_id = get_asset_from_string(a)->get_id(); + auto asset_b_id = get_asset_from_string(b)->get_id(); if(asset_a_id > asset_b_id) std::swap(asset_a_id,asset_b_id); FC_ASSERT(asset_a_id != asset_b_id); @@ -1377,8 +1381,8 @@ void database_api::unsubscribe_from_market(const std::string& a, const std::stri void database_api_impl::unsubscribe_from_market(const std::string& a, const std::string& b) { - auto asset_a_id = get_asset_from_string(a)->id; - auto asset_b_id = get_asset_from_string(b)->id; + auto asset_a_id = get_asset_from_string(a)->get_id(); + auto asset_b_id = get_asset_from_string(b)->get_id(); if(a > b) std::swap(asset_a_id,asset_b_id); FC_ASSERT(asset_a_id != asset_b_id); @@ -1399,8 +1403,8 @@ market_ticker database_api_impl::get_ticker( const string& base, const string& q FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); - auto base_id = assets[0]->id; - auto quote_id = assets[1]->id; + auto base_id = assets[0]->get_id(); + auto quote_id = assets[1]->get_id(); if( base_id > quote_id ) std::swap( base_id, quote_id ); const auto& ticker_idx = _db.get_index_type().indices().get(); auto itr = ticker_idx.find( std::make_tuple( base_id, quote_id ) ); @@ -1457,8 +1461,8 @@ order_book database_api_impl::get_order_book( const string& base, const string& FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); - auto base_id = assets[0]->id; - auto quote_id = assets[1]->id; + auto base_id = assets[0]->get_id(); + auto quote_id = assets[1]->get_id(); auto orders = get_limit_orders( base_id, quote_id, limit ); for( const auto& o : orders ) @@ -1470,7 +1474,7 @@ order_book database_api_impl::get_order_book( const string& base, const string& * o.sell_price.quote.amount.value / o.sell_price.base.amount.value ) ); auto base_amt = assets[0]->amount_to_string( o.for_sale ); - result.bids.emplace_back( order_price, quote_amt, base_amt, o.id, + result.bids.emplace_back( order_price, quote_amt, base_amt, o.get_id(), o.seller, o.seller(_db).name, o.expiration ); } else @@ -1479,7 +1483,7 @@ order_book database_api_impl::get_order_book( const string& base, const string& auto base_amt = assets[0]->amount_to_string( share_type( fc::uint128_t( o.for_sale.value ) * o.sell_price.quote.amount.value / o.sell_price.base.amount.value ) ); - result.asks.emplace_back( order_price, quote_amt, base_amt, o.id, + result.asks.emplace_back( order_price, quote_amt, base_amt, o.get_id(), o.seller, o.seller(_db).name, o.expiration ); } } @@ -1546,8 +1550,8 @@ vector database_api_impl::get_trade_history( const string& base, FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); - auto base_id = assets[0]->id; - auto quote_id = assets[1]->id; + auto base_id = assets[0]->get_id(); + auto quote_id = assets[1]->get_id(); if( base_id > quote_id ) std::swap( base_id, quote_id ); @@ -1652,8 +1656,8 @@ vector database_api_impl::get_trade_history_by_sequence( FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); - auto base_id = assets[0]->id; - auto quote_id = assets[1]->id; + auto base_id = assets[0]->get_id(); + auto quote_id = assets[1]->get_id(); if( base_id > quote_id ) std::swap( base_id, quote_id ); const auto& history_idx = _db.get_index_type().indices().get(); @@ -1762,7 +1766,7 @@ vector database_api::get_liquidity_pools_by_asse const optional& start_id, const optional& with_statistics )const { - asset_id_type asset_id = my->get_asset_from_string(asset_symbol_or_id)->id; + asset_id_type asset_id = my->get_asset_from_string(asset_symbol_or_id)->get_id(); return my->get_liquidity_pools_by_asset_x( limit, start_id, @@ -1776,7 +1780,7 @@ vector database_api::get_liquidity_pools_by_asse const optional& start_id, const optional& with_statistics )const { - asset_id_type asset_id = my->get_asset_from_string(asset_symbol_or_id)->id; + asset_id_type asset_id = my->get_asset_from_string(asset_symbol_or_id)->get_id(); return my->get_liquidity_pools_by_asset_x( limit, start_id, @@ -1813,7 +1817,7 @@ vector database_api_impl::get_liquidity_pools_by "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); - asset_id_type aid = get_asset_from_string(asset_symbol_or_id)->id; + asset_id_type aid = get_asset_from_string(asset_symbol_or_id)->get_id(); FC_ASSERT( asset_in_liquidity_pools_index, "Internal error" ); const auto& pools = asset_in_liquidity_pools_index->get_liquidity_pools_by_asset( aid ); @@ -1843,8 +1847,8 @@ vector database_api::get_liquidity_pools_by_both const optional& start_id, const optional& with_statistics )const { - asset_id_type asset_id_a = my->get_asset_from_string(asset_symbol_or_id_a)->id; - asset_id_type asset_id_b = my->get_asset_from_string(asset_symbol_or_id_b)->id; + asset_id_type asset_id_a = my->get_asset_from_string(asset_symbol_or_id_a)->get_id(); + asset_id_type asset_id_b = my->get_asset_from_string(asset_symbol_or_id_b)->get_id(); if( asset_id_a > asset_id_b ) std::swap( asset_id_a, asset_id_b ); return my->get_liquidity_pools_by_asset_x( @@ -1976,9 +1980,10 @@ vector database_api_impl::get_liquidity_pools_by vector results; - account_id_type owner = get_account_from_string(account_name_or_id)->id; + account_id_type owner = get_account_from_string(account_name_or_id)->get_id(); - asset_id_type start_id = ostart_id.valid() ? *ostart_id : asset_id_type(); + asset_id_type start_asset_id = ostart_id.valid() ? *ostart_id : asset_id_type(); + object_id_type start_id { start_asset_id }; // get assets owned by account const auto& idx = _db.get_index_type().indices().get(); @@ -2019,7 +2024,7 @@ vector database_api::get_samet_funds_by_owner( const optional& limit, const optional& start_id )const { - account_id_type owner = my->get_account_from_string(account_name_or_id)->id; + account_id_type owner = my->get_account_from_string(account_name_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< samet_fund_object, samet_fund_id_type @@ -2032,7 +2037,7 @@ vector database_api::get_samet_funds_by_asset( const optional& limit, const optional& start_id )const { - asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->id; + asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< samet_fund_object, samet_fund_id_type @@ -2063,7 +2068,7 @@ vector database_api::get_credit_offers_by_owner( const optional& limit, const optional& start_id )const { - account_id_type owner = my->get_account_from_string(account_name_or_id)->id; + account_id_type owner = my->get_account_from_string(account_name_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< credit_offer_object, credit_offer_id_type @@ -2076,7 +2081,7 @@ vector database_api::get_credit_offers_by_asset( const optional& limit, const optional& start_id )const { - asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->id; + asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< credit_offer_object, credit_offer_id_type @@ -2112,7 +2117,7 @@ vector database_api::get_credit_deals_by_offer_owner( const optional& limit, const optional& start_id )const { - account_id_type owner = my->get_account_from_string(account_name_or_id)->id; + account_id_type owner = my->get_account_from_string(account_name_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< credit_deal_object, credit_deal_id_type @@ -2125,7 +2130,7 @@ vector database_api::get_credit_deals_by_borrower( const optional& limit, const optional& start_id )const { - account_id_type borrower = my->get_account_from_string(account_name_or_id)->id; + account_id_type borrower = my->get_account_from_string(account_name_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< credit_deal_object, credit_deal_id_type @@ -2138,7 +2143,7 @@ vector database_api::get_credit_deals_by_debt_asset( const optional& limit, const optional& start_id )const { - asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->id; + asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< credit_deal_object, credit_deal_id_type @@ -2151,7 +2156,7 @@ vector database_api::get_credit_deals_by_collateral_asset( const optional& limit, const optional& start_id )const { - asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->id; + asset_id_type asset_type = my->get_asset_from_string(asset_symbol_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< credit_deal_object, credit_deal_id_type @@ -2191,7 +2196,7 @@ fc::optional database_api::get_witness_by_account(const std::str fc::optional database_api_impl::get_witness_by_account(const std::string& account_id_or_name) const { const auto& idx = _db.get_index_type().indices().get(); - const account_id_type account = get_account_from_string(account_id_or_name)->id; + const account_id_type account = get_account_from_string(account_id_or_name)->get_id(); auto itr = idx.find(account); if( itr != idx.end() ) return *itr; @@ -2225,7 +2230,7 @@ map database_api_impl::lookup_witness_accounts( const s for (const witness_object& witness : witnesses_by_id) if (auto account_iter = _db.find(witness.witness_account)) if (account_iter->name >= lower_bound_name) // we can ignore anything below lower_bound_name - witnesses_by_account_name.insert(std::make_pair(account_iter->name, witness.id)); + witnesses_by_account_name.insert(std::make_pair(account_iter->name, witness.get_id())); auto end_iter = witnesses_by_account_name.begin(); while( end_iter != witnesses_by_account_name.end() && limit > 0 ) @@ -2282,7 +2287,7 @@ fc::optional database_api_impl::get_committee_member_by const std::string& account_id_or_name )const { const auto& idx = _db.get_index_type().indices().get(); - const account_id_type account = get_account_from_string(account_id_or_name)->id; + const account_id_type account = get_account_from_string(account_id_or_name)->get_id(); auto itr = idx.find(account); if( itr != idx.end() ) return *itr; @@ -2314,9 +2319,9 @@ map database_api_impl::lookup_committee_member // TODO optimize std::map committee_members_by_account_name; for (const committee_member_object& committee_member : committee_members_by_id) - if (auto account_iter = _db.find(committee_member.committee_member_account)) - if (account_iter->name >= lower_bound_name) // we can ignore anything below lower_bound_name - committee_members_by_account_name.insert(std::make_pair(account_iter->name, committee_member.id)); + if (auto account_iter = _db.find(committee_member.committee_member_account)) + if (account_iter->name >= lower_bound_name) // we can ignore anything below lower_bound_name + committee_members_by_account_name.insert(std::make_pair(account_iter->name, committee_member.get_id())); auto end_iter = committee_members_by_account_name.begin(); while( end_iter != committee_members_by_account_name.end() && limit > 0 ) @@ -2388,7 +2393,7 @@ vector database_api_impl::get_workers_by_account(const std::strin vector result; const auto& workers_idx = _db.get_index_type().indices().get(); - const account_id_type account = get_account_from_string(account_id_or_name)->id; + const account_id_type account = get_account_from_string(account_id_or_name)->get_id(); auto range = workers_idx.equal_range(account); for(auto itr = range.first; itr != range.second; ++itr) { @@ -2642,7 +2647,7 @@ bool database_api_impl::verify_account_authority( const string& account_name_or_ { // create a dummy transfer transfer_operation op; - op.from = get_account_from_string(account_name_or_id)->id; + op.from = get_account_from_string(account_name_or_id)->get_id(); std::vector ops; ops.emplace_back(op); @@ -2779,7 +2784,7 @@ vector database_api_impl::get_proposed_transactions( const std: const auto& proposals_by_account = proposal_idx.get_secondary_index(); vector result; - const account_id_type id = get_account_from_string(account_id_or_name)->id; + const account_id_type id = get_account_from_string(account_id_or_name)->get_id(); auto required_approvals_itr = proposals_by_account._account_to_proposals.find( id ); if( required_approvals_itr != proposals_by_account._account_to_proposals.end() ) @@ -2849,8 +2854,8 @@ vector database_api_impl::get_withdraw_permissions_b const auto& withdraw_idx = _db.get_index_type().indices().get(); auto withdraw_index_end = withdraw_idx.end(); - const account_id_type account = get_account_from_string(account_id_or_name)->id; - auto withdraw_itr = withdraw_idx.lower_bound(boost::make_tuple(account, start)); + const account_id_type account = get_account_from_string(account_id_or_name)->get_id(); + auto withdraw_itr = withdraw_idx.lower_bound(boost::make_tuple(account, object_id_type(start))); while( withdraw_itr != withdraw_index_end && withdraw_itr->withdraw_from_account == account && result.size() < limit ) { @@ -2883,8 +2888,8 @@ vector database_api_impl::get_withdraw_permissions_b const auto& withdraw_idx = _db.get_index_type().indices().get(); auto withdraw_index_end = withdraw_idx.end(); - const account_id_type account = get_account_from_string(account_id_or_name)->id; - auto withdraw_itr = withdraw_idx.lower_bound(boost::make_tuple(account, start)); + const account_id_type account = get_account_from_string(account_id_or_name)->get_id(); + auto withdraw_itr = withdraw_idx.lower_bound(boost::make_tuple(account, object_id_type(start))); while(withdraw_itr != withdraw_index_end && withdraw_itr->authorized_account == account && result.size() < limit) { result.push_back(*withdraw_itr); @@ -2906,7 +2911,7 @@ optional database_api::get_htlc( htlc_id_type id, optional su fc::optional database_api_impl::get_htlc( htlc_id_type id, optional subscribe )const { - auto obj = get_objects( { id }, subscribe ).front(); + auto obj = get_objects( { object_id_type(id) }, subscribe ).front(); if ( !obj.is_null() ) { return fc::optional(obj.template as(GRAPHENE_MAX_NESTED_OBJECTS)); @@ -2933,8 +2938,8 @@ vector database_api_impl::get_htlc_by_from( const std::string accou const auto& htlc_idx = _db.get_index_type< htlc_index >().indices().get< by_from_id >(); auto htlc_index_end = htlc_idx.end(); - const account_id_type account = get_account_from_string(account_id_or_name)->id; - auto htlc_itr = htlc_idx.lower_bound(boost::make_tuple(account, start)); + const account_id_type account = get_account_from_string(account_id_or_name)->get_id(); + auto htlc_itr = htlc_idx.lower_bound(boost::make_tuple(account, object_id_type(start))); while(htlc_itr != htlc_index_end && htlc_itr->transfer.from == account && result.size() < limit) { @@ -2963,8 +2968,8 @@ vector database_api_impl::get_htlc_by_to( const std::string account const auto& htlc_idx = _db.get_index_type< htlc_index >().indices().get< by_to_id >(); auto htlc_index_end = htlc_idx.end(); - const account_id_type account = get_account_from_string(account_id_or_name)->id; - auto htlc_itr = htlc_idx.lower_bound(boost::make_tuple(account, start)); + const account_id_type account = get_account_from_string(account_id_or_name)->get_id(); + auto htlc_itr = htlc_idx.lower_bound(boost::make_tuple(account, object_id_type(start))); while(htlc_itr != htlc_index_end && htlc_itr->transfer.to == account && result.size() < limit) { @@ -2989,7 +2994,7 @@ vector database_api_impl::list_htlcs(const htlc_id_type start, uint vector result; const auto& htlc_idx = _db.get_index_type().indices().get(); - auto itr = htlc_idx.lower_bound(start); + auto itr = htlc_idx.lower_bound(object_id_type(start)); while(itr != htlc_idx.end() && result.size() < limit) { result.push_back(*itr); @@ -3020,7 +3025,7 @@ vector database_api::get_tickets_by_account( const optional& limit, const optional& start_id )const { - account_id_type account = my->get_account_from_string(account_name_or_id)->id; + account_id_type account = my->get_account_from_string(account_name_or_id)->get_id(); const auto& idx = my->_db.get_index_type().indices().get(); return my->get_objects_by_x< ticket_object, ticket_id_type @@ -3253,15 +3258,15 @@ void database_api_impl::handle_object_changed( bool force_notify, for(auto id : ids) { - if( id.is() ) + if( id.is() ) { enqueue_if_subscribed_to_market( find_object(id), broadcast_queue, full_object ); } - else if( id.is() ) + else if( id.is() ) { enqueue_if_subscribed_to_market( find_object(id), broadcast_queue, full_object ); } - else if( id.is() ) + else if( id.is() ) { enqueue_if_subscribed_to_market( find_object(id), broadcast_queue, full_object ); diff --git a/libraries/app/database_api_helper.hxx b/libraries/app/database_api_helper.hxx index 1c2898b42f..4b71a421c2 100644 --- a/libraries/app/database_api_helper.hxx +++ b/libraries/app/database_api_helper.hxx @@ -29,7 +29,7 @@ class database_api_helper { public: database_api_helper( graphene::chain::database& db, const application_options* app_options ); - explicit database_api_helper( graphene::app::application& app ); + explicit database_api_helper( const graphene::app::application& app ); // Member variables graphene::chain::database& _db; @@ -78,15 +78,17 @@ public: vector results; - OBJ_ID_TYPE start_id = ostart_id.valid() ? *ostart_id : OBJ_ID_TYPE(); + OBJ_ID_TYPE start_obj_id = ostart_id.valid() ? *ostart_id : OBJ_ID_TYPE(); + object_id_type start_id { start_obj_id }; auto lower_itr = idx.lower_bound( make_tuple_if_multiple( x..., start_id ) ); auto upper_itr = call_end_or_upper_bound( idx, x... ); results.reserve( limit ); - for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) + while( lower_itr != upper_itr && results.size() < limit ) { results.emplace_back( *lower_itr ); + ++lower_itr; } return results; diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 86a4c31d80..04f9f92a24 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -230,7 +230,7 @@ class database_api_impl : public std::enable_shared_from_this template extended_asset_object extend_asset( ASSET&& a )const { - asset_id_type id = a.id; + asset_id_type id = a.get_id(); extended_asset_object result = extended_asset_object( std::forward( a ) ); if( amount_in_collateral_index ) { @@ -260,12 +260,12 @@ class database_api_impl : public std::enable_shared_from_this template extended_liquidity_pool_object extend_liquidity_pool( LP&& a, bool with_stats )const { - liquidity_pool_id_type id = a.id; + liquidity_pool_id_type id = a.get_id(); extended_liquidity_pool_object result = extended_liquidity_pool_object( std::forward( a ) ); if( with_stats && _app_options && _app_options->has_market_history_plugin ) { liquidity_pool_ticker_id_type ticker_id( id.instance ); - const liquidity_pool_ticker_object* ticker = _db.find( ticker_id ); + const liquidity_pool_ticker_object* ticker = _db.find( ticker_id ); if( ticker ) result.statistics = *ticker; } @@ -291,7 +291,8 @@ class database_api_impl : public std::enable_shared_from_this vector results; - liquidity_pool_id_type start_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); + liquidity_pool_id_type start_pool_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); + object_id_type start_id { start_pool_id }; const auto& idx = _db.get_index_type().indices().get(); @@ -341,7 +342,7 @@ class database_api_impl : public std::enable_shared_from_this if( !_subscribe_callback ) return; - vector key = get_subscription_key( item ); + vector key = get_subscription_key( object_id_type(item) ); if( !_subscribe_filter.contains( key.data(), key.size() ) ) { _subscribe_filter.insert( key.data(), key.size() ); @@ -354,7 +355,7 @@ class database_api_impl : public std::enable_shared_from_this if( !_subscribe_callback ) return false; - vector key = get_subscription_key( item ); + vector key = get_subscription_key( object_id_type(item) ); return _subscribe_filter.contains( key.data(), key.size() ); } diff --git a/libraries/chain/account_evaluator.cpp b/libraries/chain/account_evaluator.cpp index 7fc5dda0ec..dbcbbdf22a 100644 --- a/libraries/chain/account_evaluator.cpp +++ b/libraries/chain/account_evaluator.cpp @@ -48,7 +48,7 @@ void verify_authority_accounts( const database& db, const authority& a ) "Maximum authority membership exceeded" ); for( const auto& acnt : a.account_auths ) { - GRAPHENE_ASSERT( db.find_object( acnt.first ) != nullptr, + GRAPHENE_ASSERT( db.find( acnt.first ) != nullptr, internal_verify_auth_account_not_found, "Account ${a} specified in authority does not exist", ("a", acnt.first) ); @@ -69,7 +69,7 @@ void verify_account_votes( const database& db, const account_options& options ) FC_ASSERT( options.num_committee <= chain_params.maximum_committee_count, "Voted for more committee members than currently allowed (${c})", ("c", chain_params.maximum_committee_count) ); - FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." ); + FC_ASSERT( db.find(options.voting_account), "Invalid proxy account specified." ); uint32_t max_vote_id = gpo.next_available_vote_id; bool has_worker_votes = false; diff --git a/libraries/chain/account_object.cpp b/libraries/chain/account_object.cpp index 7137801475..6f38c289fb 100644 --- a/libraries/chain/account_object.cpp +++ b/libraries/chain/account_object.cpp @@ -138,36 +138,38 @@ void account_member_index::object_inserted(const object& obj) { assert( dynamic_cast(&obj) ); // for debug only const account_object& a = static_cast(obj); + const account_id_type account_id = a.get_id(); auto account_members = get_account_members(a); for( auto item : account_members ) - account_to_account_memberships[item].insert(obj.id); + account_to_account_memberships[item].insert(account_id); auto key_members = get_key_members(a); for( auto item : key_members ) - account_to_key_memberships[item].insert(obj.id); + account_to_key_memberships[item].insert(account_id); auto address_members = get_address_members(a); for( auto item : address_members ) - account_to_address_memberships[item].insert(obj.id); + account_to_address_memberships[item].insert(account_id); } void account_member_index::object_removed(const object& obj) { assert( dynamic_cast(&obj) ); // for debug only const account_object& a = static_cast(obj); + const account_id_type account_id = a.get_id(); auto key_members = get_key_members(a); for( auto item : key_members ) - account_to_key_memberships[item].erase( obj.id ); + account_to_key_memberships[item].erase( account_id ); auto address_members = get_address_members(a); for( auto item : address_members ) - account_to_address_memberships[item].erase( obj.id ); + account_to_address_memberships[item].erase( account_id ); auto account_members = get_account_members(a); for( auto item : account_members ) - account_to_account_memberships[item].erase( obj.id ); + account_to_account_memberships[item].erase( account_id ); } void account_member_index::about_to_modify(const object& before) @@ -185,6 +187,7 @@ void account_member_index::object_modified(const object& after) { assert( dynamic_cast(&after) ); // for debug only const account_object& a = static_cast(after); + const account_id_type account_id = a.get_id(); { set after_account_members = get_account_members(a); @@ -194,15 +197,15 @@ void account_member_index::object_modified(const object& after) std::inserter(removed, removed.end())); for( auto itr = removed.begin(); itr != removed.end(); ++itr ) - account_to_account_memberships[*itr].erase(after.id); + account_to_account_memberships[*itr].erase(account_id); - vector added; added.reserve(after_account_members.size()); + vector added; added.reserve(after_account_members.size()); std::set_difference(after_account_members.begin(), after_account_members.end(), before_account_members.begin(), before_account_members.end(), std::inserter(added, added.end())); for( auto itr = added.begin(); itr != added.end(); ++itr ) - account_to_account_memberships[*itr].insert(after.id); + account_to_account_memberships[*itr].insert(account_id); } @@ -215,7 +218,7 @@ void account_member_index::object_modified(const object& after) std::inserter(removed, removed.end())); for( auto itr = removed.begin(); itr != removed.end(); ++itr ) - account_to_key_memberships[*itr].erase(after.id); + account_to_key_memberships[*itr].erase(account_id); vector added; added.reserve(after_key_members.size()); std::set_difference(after_key_members.begin(), after_key_members.end(), @@ -223,7 +226,7 @@ void account_member_index::object_modified(const object& after) std::inserter(added, added.end())); for( auto itr = added.begin(); itr != added.end(); ++itr ) - account_to_key_memberships[*itr].insert(after.id); + account_to_key_memberships[*itr].insert(account_id); } { @@ -235,7 +238,7 @@ void account_member_index::object_modified(const object& after) std::inserter(removed, removed.end())); for( auto itr = removed.begin(); itr != removed.end(); ++itr ) - account_to_address_memberships[*itr].erase(after.id); + account_to_address_memberships[*itr].erase(account_id); vector
added; added.reserve(after_address_members.size()); std::set_difference(after_address_members.begin(), after_address_members.end(), @@ -243,7 +246,7 @@ void account_member_index::object_modified(const object& after) std::inserter(added, added.end())); for( auto itr = added.begin(); itr != added.end(); ++itr ) - account_to_address_memberships[*itr].insert(after.id); + account_to_address_memberships[*itr].insert(account_id); } } diff --git a/libraries/chain/asset_evaluator.cpp b/libraries/chain/asset_evaluator.cpp index e17b4b8fdf..17ab836be9 100644 --- a/libraries/chain/asset_evaluator.cpp +++ b/libraries/chain/asset_evaluator.cpp @@ -220,9 +220,9 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o // Check that all authorities do exist for( auto id : op.common_options.whitelist_authorities ) - d.get_object(id); + d.get(id); for( auto id : op.common_options.blacklist_authorities ) - d.get_object(id); + d.get(id); auto& asset_indx = d.get_index_type().indices().get(); auto asset_symbol_itr = asset_indx.find( op.symbol ); @@ -428,7 +428,7 @@ void_result asset_fund_fee_pool_evaluator::do_apply(const asset_fund_fee_pool_op static void validate_new_issuer( const database& d, const asset_object& a, account_id_type new_issuer ) { try { - FC_ASSERT(d.find_object(new_issuer)); + FC_ASSERT(d.find(new_issuer), "New issuer account does not exist"); if( a.is_market_issued() && new_issuer == GRAPHENE_COMMITTEE_ACCOUNT ) { const asset_object& backing = a.bitasset_data(d).options.short_backing_asset(d); @@ -587,10 +587,10 @@ void_result asset_update_evaluator::do_evaluate(const asset_update_operation& o) FC_ASSERT( o.new_options.whitelist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities ); for( auto id : o.new_options.whitelist_authorities ) - d.get_object(id); + d.get(id); FC_ASSERT( o.new_options.blacklist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities ); for( auto id : o.new_options.blacklist_authorities ) - d.get_object(id); + d.get(id); return void_result(); } FC_CAPTURE_AND_RETHROW((o)) } @@ -1064,7 +1064,7 @@ void_result asset_update_feed_producers_evaluator::do_evaluate(const asset_updat // Make sure all producers exist. Check these after asset because account lookup is more expensive for( auto id : o.new_feed_producers ) - d.get_object(id); + d.get(id); return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } diff --git a/libraries/chain/db_balance.cpp b/libraries/chain/db_balance.cpp index bcd5d1004a..990e17e990 100644 --- a/libraries/chain/db_balance.cpp +++ b/libraries/chain/db_balance.cpp @@ -207,9 +207,11 @@ void database::deposit_cashback(const account_object& acct, share_type amount, b if( amount == 0 ) return; - if( acct.get_id() == GRAPHENE_COMMITTEE_ACCOUNT || acct.get_id() == GRAPHENE_WITNESS_ACCOUNT || - acct.get_id() == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT || acct.get_id() == GRAPHENE_NULL_ACCOUNT || - acct.get_id() == GRAPHENE_TEMP_ACCOUNT ) + account_id_type acct_id = acct.get_id(); + + if( acct_id == GRAPHENE_COMMITTEE_ACCOUNT || acct_id == GRAPHENE_WITNESS_ACCOUNT || + acct_id == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT || acct_id == GRAPHENE_NULL_ACCOUNT || + acct_id == GRAPHENE_TEMP_ACCOUNT ) { // The blockchain's accounts do not get cashback; it simply goes to the reserve pool. modify( get_core_dynamic_data(), [amount](asset_dynamic_data_object& d) { @@ -223,7 +225,7 @@ void database::deposit_cashback(const account_object& acct, share_type amount, b amount, get_global_properties().parameters.cashback_vesting_period_seconds, vesting_balance_type::cashback, - acct.id, + acct_id, require_vesting ); if( new_vbid.valid() ) diff --git a/libraries/chain/db_debug.cpp b/libraries/chain/db_debug.cpp index 4ac73bc613..6f4a6872d1 100644 --- a/libraries/chain/db_debug.cpp +++ b/libraries/chain/db_debug.cpp @@ -90,7 +90,7 @@ void database::debug_dump() } for( const asset_object& asset_obj : db.get_index_type().indices() ) { - total_balances[asset_obj.id] += asset_obj.dynamic_asset_data_id(db).accumulated_fees; + total_balances[asset_obj.get_id()] += asset_obj.dynamic_asset_data_id(db).accumulated_fees; total_balances[asset_id_type()] += asset_obj.dynamic_asset_data_id(db).fee_pool; // edump((total_balances[asset_obj.id])(asset_obj.dynamic_asset_data_id(db).current_supply ) ); } diff --git a/libraries/chain/db_genesis.cpp b/libraries/chain/db_genesis.cpp index 4624c16d81..95411a3654 100644 --- a/libraries/chain/db_genesis.cpp +++ b/libraries/chain/db_genesis.cpp @@ -345,7 +345,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) // Create initial assets for( const genesis_state_type::initial_asset_type& asst : genesis_state.initial_assets ) { - asset_id_type new_asset_id = get_index_type().get_next_id(); + asset_id_type new_asset_id { get_index_type().get_next_id() }; total_supplies[ new_asset_id ] = 0; asset_dynamic_data_id_type dynamic_data_id; @@ -362,7 +362,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) cop.registrar = GRAPHENE_TEMP_ACCOUNT; cop.owner = authority(1, collateral_rec.owner, 1); cop.active = cop.owner; - account_id_type owner_account_id = apply_operation(genesis_eval_state, cop).get(); + account_id_type owner_account_id { apply_operation(genesis_eval_state, cop).get() }; modify( owner_account_id(*this).statistics(*this), [&collateral_rec]( account_statistics_object& o ) { o.total_core_in_orders = collateral_rec.collateral; @@ -374,7 +374,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) c.collateral = collateral_rec.collateral; c.debt = collateral_rec.debt; c.call_price = price::call_price(chain::asset(c.debt, new_asset_id), - chain::asset(c.collateral, core_asset.id), + chain::asset(c.collateral, core_asset.get_id()), GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); }); @@ -464,8 +464,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) { if( it->bitasset_data_id.valid() ) { - auto supply_itr = total_supplies.find( it->id ); - auto debt_itr = total_debts.find( it->id ); + auto supply_itr = total_supplies.find( it->get_id() ); + auto debt_itr = total_debts.find( it->get_id() ); FC_ASSERT( supply_itr != total_supplies.end() ); FC_ASSERT( debt_itr != total_debts.end() ); if( supply_itr->second != debt_itr->second ) diff --git a/libraries/chain/db_getter.cpp b/libraries/chain/db_getter.cpp index 8bdcfd8b30..f159b58e8e 100644 --- a/libraries/chain/db_getter.cpp +++ b/libraries/chain/db_getter.cpp @@ -117,10 +117,10 @@ vector database::get_viable_custom_authorities( if (result.success) results.emplace_back(cust_auth.get().auth); else if (rejected_authorities != nullptr) - rejected_authorities->insert(std::make_pair(cust_auth.get().id, std::move(result))); + rejected_authorities->insert(std::make_pair(cust_auth.get().get_id(), std::move(result))); } catch (fc::exception& e) { if (rejected_authorities != nullptr) - rejected_authorities->insert(std::make_pair(cust_auth.get().id, std::move(e))); + rejected_authorities->insert(std::make_pair(cust_auth.get().get_id(), std::move(e))); } } diff --git a/libraries/chain/db_maint.cpp b/libraries/chain/db_maint.cpp index cd4c36b46d..0eb0c22183 100644 --- a/libraries/chain/db_maint.cpp +++ b/libraries/chain/db_maint.cpp @@ -301,7 +301,7 @@ void database::update_active_witnesses() std::transform(wits.begin(), wits.end(), std::inserter(gp.active_witnesses, gp.active_witnesses.end()), [](const witness_object& w) { - return w.id; + return w.get_id(); }); }); @@ -410,7 +410,7 @@ void database::update_active_committee_members() gp.active_committee_members.clear(); std::transform(committee_members.begin(), committee_members.end(), std::inserter(gp.active_committee_members, gp.active_committee_members.begin()), - [](const committee_member_object& d) { return d.id; }); + [](const committee_member_object& d) { return d.get_id(); }); }); } FC_CAPTURE_AND_RETHROW() } @@ -734,7 +734,7 @@ void create_buyback_orders( database& db ) continue; } - for( const auto& entry : bal_idx.get_account_balances( buyback_account.id ) ) + for( const auto& entry : bal_idx.get_account_balances( buyback_account.get_id() ) ) { const auto* it = entry.second; asset_id_type asset_to_sell = it->asset_type; @@ -759,11 +759,11 @@ void create_buyback_orders( database& db ) create_vop.fee = asset( 0, asset_id_type() ); create_vop.seller = buyback_account.id; create_vop.amount_to_sell = asset( amount_to_sell, asset_to_sell ); - create_vop.min_to_receive = asset( 1, asset_to_buy.id ); + create_vop.min_to_receive = asset( 1, asset_to_buy.get_id() ); create_vop.expiration = time_point_sec::maximum(); create_vop.fill_or_kill = false; - limit_order_id_type order_id = db.apply_operation( buyback_context, create_vop ).get< object_id_type >(); + limit_order_id_type order_id{ db.apply_operation( buyback_context, create_vop ).get< object_id_type >() }; if( db.find( order_id ) != nullptr ) { diff --git a/libraries/chain/db_market.cpp b/libraries/chain/db_market.cpp index 09c9bbeca7..05c78c0b54 100644 --- a/libraries/chain/db_market.cpp +++ b/libraries/chain/db_market.cpp @@ -497,7 +497,7 @@ void database::cancel_settle_order( const force_settlement_object& order ) { adjust_balance(order.owner, order.balance); - push_applied_operation( asset_settle_cancel_operation( order.id, order.owner, order.balance ) ); + push_applied_operation( asset_settle_cancel_operation( order.get_id(), order.owner, order.balance ) ); remove(order); } diff --git a/libraries/chain/db_notify.cpp b/libraries/chain/db_notify.cpp index 47958336a7..1e43bcdc00 100644 --- a/libraries/chain/db_notify.cpp +++ b/libraries/chain/db_notify.cpp @@ -414,7 +414,7 @@ static void get_relevant_accounts( const object* obj, flat_set& case base_object_type: return; case account_object_type: - accounts.insert( obj->id ); + accounts.insert( account_id_type(obj->id) ); break; case asset_object_type:{ const auto* aobj = dynamic_cast(obj); diff --git a/libraries/chain/db_update.cpp b/libraries/chain/db_update.cpp index a6d27ee5c2..e13b51a9dc 100644 --- a/libraries/chain/db_update.cpp +++ b/libraries/chain/db_update.cpp @@ -588,7 +588,7 @@ void database::clear_expired_htlcs() const auto amount = asset(obj.transfer.amount, obj.transfer.asset_id); adjust_balance( obj.transfer.from, amount ); // notify related parties - htlc_refund_operation vop( obj.id, obj.transfer.from, obj.transfer.to, amount, + htlc_refund_operation vop( obj.get_id(), obj.transfer.from, obj.transfer.to, amount, obj.conditions.hash_lock.preimage_hash, obj.conditions.hash_lock.preimage_size ); push_applied_operation( vop ); remove( obj ); @@ -750,7 +750,7 @@ void database::update_credit_offers_and_deals() // Notify related parties push_applied_operation( credit_deal_expired_operation ( - deal.id, deal.offer_id, deal.offer_owner, deal.borrower, + deal.get_id(), deal.offer_id, deal.offer_owner, deal.borrower, asset( deal.debt_amount, deal.debt_asset ), asset( deal.collateral_amount, deal.collateral_asset ), deal.fee_rate ) ); diff --git a/libraries/chain/evaluator.cpp b/libraries/chain/evaluator.cpp index a793163fe5..42b7f82c1d 100644 --- a/libraries/chain/evaluator.cpp +++ b/libraries/chain/evaluator.cpp @@ -105,7 +105,7 @@ database& generic_evaluator::db()const { return trx_state->db(); } void generic_evaluator::pay_fba_fee( uint64_t fba_id ) { database& d = db(); - const fba_accumulator_object& fba = d.get< fba_accumulator_object >( fba_accumulator_id_type( fba_id ) ); + const fba_accumulator_object& fba = d.get( fba_accumulator_id_type( fba_id ) ); if( !fba.is_configured(d) ) { generic_evaluator::pay_fee(); diff --git a/libraries/chain/htlc_evaluator.cpp b/libraries/chain/htlc_evaluator.cpp index 339c2fc4bb..270b8d41ae 100644 --- a/libraries/chain/htlc_evaluator.cpp +++ b/libraries/chain/htlc_evaluator.cpp @@ -27,22 +27,22 @@ #include #include -namespace graphene { +namespace graphene { namespace chain { namespace detail { - void check_htlc_create_hf_bsip64(const fc::time_point_sec& block_time, + void check_htlc_create_hf_bsip64(const fc::time_point_sec& block_time, const htlc_create_operation& op, const asset_object& asset_to_transfer) { if (block_time < HARDFORK_CORE_BSIP64_TIME) { // memo field added at harfork BSIP64 // NOTE: both of these checks can be removed after hardfork time - FC_ASSERT( !op.extensions.value.memo.valid(), + FC_ASSERT( !op.extensions.value.memo.valid(), "Memo unavailable until after HARDFORK BSIP64"); // HASH160 added at hardfork BSIP64 FC_ASSERT( !op.preimage_hash.is_type(), - "HASH160 unavailable until after HARDFORK BSIP64" ); + "HASH160 unavailable until after HARDFORK BSIP64" ); } else { @@ -50,18 +50,18 @@ namespace graphene { // IF there were no restricted transfers before HF_BSIP64 FC_ASSERT( !asset_to_transfer.is_transfer_restricted() || op.from == asset_to_transfer.issuer || op.to == asset_to_transfer.issuer, - "Asset ${asset} cannot be transfered.", ("asset", asset_to_transfer.id) ); + "Asset ${asset} cannot be transfered.", ("asset", asset_to_transfer.id) ); } } - void check_htlc_redeem_hf_bsip64(const fc::time_point_sec& block_time, + void check_htlc_redeem_hf_bsip64(const fc::time_point_sec& block_time, const htlc_redeem_operation& op, const htlc_object* htlc_obj) { - // TODO: The hardfork portion of this check can be removed if no HTLC redemptions are + // TODO: The hardfork portion of this check can be removed if no HTLC redemptions are // attempted on an HTLC with a 0 preimage size before the hardfork date. - if ( htlc_obj->conditions.hash_lock.preimage_size > 0U || + if ( htlc_obj->conditions.hash_lock.preimage_size > 0U || block_time < HARDFORK_CORE_BSIP64_TIME ) - FC_ASSERT(op.preimage.size() == htlc_obj->conditions.hash_lock.preimage_size, + FC_ASSERT(op.preimage.size() == htlc_obj->conditions.hash_lock.preimage_size, "Preimage size mismatch."); } } // end of graphene::chain::details @@ -79,10 +79,10 @@ namespace graphene { FC_ASSERT(htlc_options, "HTLC Committee options are not set."); // make sure the expiration is reasonable - FC_ASSERT( o.claim_period_seconds <= htlc_options->max_timeout_secs, + FC_ASSERT( o.claim_period_seconds <= htlc_options->max_timeout_secs, "HTLC Timeout exceeds allowed length" ); // make sure the preimage length is reasonable - FC_ASSERT( o.preimage_size <= htlc_options->max_preimage_size, + FC_ASSERT( o.preimage_size <= htlc_options->max_preimage_size, "HTLC preimage length exceeds allowed length" ); // make sure the sender has the funds for the HTLC FC_ASSERT( d.get_balance( o.from, o.amount.asset_id) >= (o.amount), "Insufficient funds") ; @@ -90,11 +90,11 @@ namespace graphene { const auto& from_account = o.from( d ); const auto& to_account = o.to( d ); detail::check_htlc_create_hf_bsip64(d.head_block_time(), o, asset_to_transfer); - FC_ASSERT( is_authorized_asset( d, from_account, asset_to_transfer ), - "Asset ${asset} is not authorized for account ${acct}.", + FC_ASSERT( is_authorized_asset( d, from_account, asset_to_transfer ), + "Asset ${asset} is not authorized for account ${acct}.", ( "asset", asset_to_transfer.id )( "acct", from_account.id ) ); - FC_ASSERT( is_authorized_asset( d, to_account, asset_to_transfer ), - "Asset ${asset} is not authorized for account ${acct}.", + FC_ASSERT( is_authorized_asset( d, to_account, asset_to_transfer ), + "Asset ${asset} is not authorized for account ${acct}.", ( "asset", asset_to_transfer.id )( "acct", to_account.id ) ); return void_result(); } @@ -141,11 +141,11 @@ namespace graphene { void_result htlc_redeem_evaluator::do_evaluate(const htlc_redeem_operation& o) { auto& d = db(); - htlc_obj = &d.get(o.htlc_id); + htlc_obj = &d.get(o.htlc_id); detail::check_htlc_redeem_hf_bsip64(d.head_block_time(), o, htlc_obj); const htlc_redeem_visitor vtor( o.preimage ); - FC_ASSERT( htlc_obj->conditions.hash_lock.preimage_hash.visit( vtor ), + FC_ASSERT( htlc_obj->conditions.hash_lock.preimage_hash.visit( vtor ), "Provided preimage does not generate correct hash."); return void_result(); @@ -156,7 +156,8 @@ namespace graphene { const auto amount = asset(htlc_obj->transfer.amount, htlc_obj->transfer.asset_id); db().adjust_balance(htlc_obj->transfer.to, amount); // notify related parties - htlc_redeemed_operation virt_op( htlc_obj->id, htlc_obj->transfer.from, htlc_obj->transfer.to, o.redeemer, + htlc_redeemed_operation virt_op( htlc_obj->get_id(), htlc_obj->transfer.from, htlc_obj->transfer.to, + o.redeemer, amount, htlc_obj->conditions.hash_lock.preimage_hash, htlc_obj->conditions.hash_lock.preimage_size, o.preimage ); db().push_applied_operation( virt_op ); @@ -166,14 +167,14 @@ namespace graphene { void_result htlc_extend_evaluator::do_evaluate(const htlc_extend_operation& o) { - htlc_obj = &db().get(o.htlc_id); + htlc_obj = &db().get(o.htlc_id); FC_ASSERT(o.update_issuer == htlc_obj->transfer.from, "HTLC may only be extended by its creator."); optional htlc_options = get_committee_htlc_options(db()); - FC_ASSERT( htlc_obj->conditions.time_lock.expiration.sec_since_epoch() - + static_cast(o.seconds_to_add) < fc::time_point_sec::maximum().sec_since_epoch(), + FC_ASSERT( htlc_obj->conditions.time_lock.expiration.sec_since_epoch() + + static_cast(o.seconds_to_add) < fc::time_point_sec::maximum().sec_since_epoch(), "Extension would cause an invalid date"); FC_ASSERT( htlc_obj->conditions.time_lock.expiration + o.seconds_to_add - <= db().head_block_time() + htlc_options->max_timeout_secs, + <= db().head_block_time() + htlc_options->max_timeout_secs, "Extension pushes contract too far into the future" ); return void_result(); } diff --git a/libraries/chain/include/graphene/chain/account_object.hpp b/libraries/chain/include/graphene/chain/account_object.hpp index 66aefa2dcd..b25d6f48d7 100644 --- a/libraries/chain/include/graphene/chain/account_object.hpp +++ b/libraries/chain/include/graphene/chain/account_object.hpp @@ -43,12 +43,10 @@ namespace graphene { namespace chain { * separating the account data that changes frequently from the account data that is mostly static, which will * minimize the amount of data that must be backed up as part of the undo history everytime a transfer is made. */ - class account_statistics_object : public graphene::db::abstract_object + class account_statistics_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_account_statistics_object_type; - account_id_type owner; string name; ///< redundantly store account name here for better maintenance performance @@ -155,12 +153,10 @@ namespace graphene { namespace chain { * This object is indexed on owner and asset_type so that black swan * events in asset_type can be processed quickly. */ - class account_balance_object : public abstract_object + class account_balance_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_account_balance_object_type; - account_id_type owner; asset_id_type asset_type; share_type balance; @@ -179,12 +175,9 @@ namespace graphene { namespace chain { * Accounts are the primary unit of authority on the graphene system. Users must have an account in order to use * assets, trade in the markets, vote for committee_members, etc. */ - class account_object : public graphene::db::abstract_object + class account_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = account_object_type; - /** * The time at which this account's membership expires. * If set to any time in the past, the account is a basic account. @@ -326,8 +319,6 @@ namespace graphene { namespace chain { { return !is_basic_account(now); } - - account_id_type get_id()const { return id; } }; /** diff --git a/libraries/chain/include/graphene/chain/asset_object.hpp b/libraries/chain/include/graphene/chain/asset_object.hpp index 3dd11bb344..f93a3e4993 100644 --- a/libraries/chain/include/graphene/chain/asset_object.hpp +++ b/libraries/chain/include/graphene/chain/asset_object.hpp @@ -53,12 +53,10 @@ namespace graphene { namespace chain { * This object exists as an implementation detail and its ID should never be referenced by * a blockchain operation. */ - class asset_dynamic_data_object : public abstract_object + class asset_dynamic_data_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_asset_dynamic_data_object_type; - /// The number of shares currently in existence share_type current_supply; share_type confidential_supply; ///< total asset held in confidential balances @@ -74,12 +72,9 @@ namespace graphene { namespace chain { * All assets have a globally unique symbol name that controls how they are traded and an issuer who * has authority over the parameters of the asset. */ - class asset_object : public graphene::db::abstract_object + class asset_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = asset_object_type; - /// This function does not check if any registered asset has this symbol or not; it simply checks whether the /// symbol would be valid. /// @return true if symbol is a valid ticker symbol; false otherwise. @@ -116,7 +111,7 @@ namespace graphene { namespace chain { bool can_bid_collateral()const { return (0 == (options.flags & disable_collateral_bidding)); } /// Helper function to get an asset object with the given amount in this asset's type - asset amount(share_type a)const { return asset(a, id); } + asset amount(share_type a)const { return asset(a, asset_id_type(id)); } /// Convert a string amount (i.e. "123.45") to an asset object with this asset's type /// The string may have a decimal and/or a negative sign. asset amount_from_string(string amount_string)const; @@ -156,8 +151,6 @@ namespace graphene { namespace chain { /// The time when the asset object was created time_point_sec creation_time; - asset_id_type get_id()const { return id; } - void validate()const { // UIAs may not be prediction markets, have force settlement, or global settlements @@ -259,12 +252,10 @@ namespace graphene { namespace chain { * @ingroup object * @ingroup implementation */ - class asset_bitasset_data_object : public abstract_object + class asset_bitasset_data_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_asset_bitasset_data_object_type; - /// The asset this object belong to asset_id_type asset_id; diff --git a/libraries/chain/include/graphene/chain/balance_object.hpp b/libraries/chain/include/graphene/chain/balance_object.hpp index 478f78be39..0dafb72d9c 100644 --- a/libraries/chain/include/graphene/chain/balance_object.hpp +++ b/libraries/chain/include/graphene/chain/balance_object.hpp @@ -27,12 +27,9 @@ namespace graphene { namespace chain { - class balance_object : public abstract_object + class balance_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = balance_object_type; - bool is_vesting_balance()const { return vesting_policy.valid(); } asset available(fc::time_point_sec now)const diff --git a/libraries/chain/include/graphene/chain/block_summary_object.hpp b/libraries/chain/include/graphene/chain/block_summary_object.hpp index 3486b993b4..fa13bf1369 100644 --- a/libraries/chain/include/graphene/chain/block_summary_object.hpp +++ b/libraries/chain/include/graphene/chain/block_summary_object.hpp @@ -37,12 +37,10 @@ namespace graphene { namespace chain { * so we can calculate whether the current transaction is valid and at * what time it should expire. */ - class block_summary_object : public abstract_object + class block_summary_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_block_summary_object_type; - block_id_type block_id; }; diff --git a/libraries/chain/include/graphene/chain/budget_record_object.hpp b/libraries/chain/include/graphene/chain/budget_record_object.hpp index 84ada7a816..192ed7e9af 100644 --- a/libraries/chain/include/graphene/chain/budget_record_object.hpp +++ b/libraries/chain/include/graphene/chain/budget_record_object.hpp @@ -63,12 +63,10 @@ struct budget_record share_type current_supply; }; -class budget_record_object : public graphene::db::abstract_object +class budget_record_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_budget_record_object_type; - fc::time_point_sec time; budget_record record; }; diff --git a/libraries/chain/include/graphene/chain/buyback_object.hpp b/libraries/chain/include/graphene/chain/buyback_object.hpp index 6724c9453e..5db9df8313 100644 --- a/libraries/chain/include/graphene/chain/buyback_object.hpp +++ b/libraries/chain/include/graphene/chain/buyback_object.hpp @@ -40,12 +40,10 @@ namespace graphene { namespace chain { * This class is an implementation detail. */ -class buyback_object : public graphene::db::abstract_object< buyback_object > +class buyback_object : public graphene::db::abstract_object< buyback_object, + implementation_ids, impl_buyback_object_type > { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_buyback_object_type; - asset_id_type asset_to_buy; }; diff --git a/libraries/chain/include/graphene/chain/chain_property_object.hpp b/libraries/chain/include/graphene/chain/chain_property_object.hpp index 28542c231e..b697ba6c9c 100644 --- a/libraries/chain/include/graphene/chain/chain_property_object.hpp +++ b/libraries/chain/include/graphene/chain/chain_property_object.hpp @@ -30,12 +30,10 @@ namespace graphene { namespace chain { /** * Contains invariants which are set at genesis and never changed. */ -class chain_property_object : public abstract_object +class chain_property_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_chain_property_object_type; - chain_id_type chain_id; immutable_chain_parameters immutable_parameters; }; diff --git a/libraries/chain/include/graphene/chain/committee_member_object.hpp b/libraries/chain/include/graphene/chain/committee_member_object.hpp index 9c0cef0816..f0a81a8a08 100644 --- a/libraries/chain/include/graphene/chain/committee_member_object.hpp +++ b/libraries/chain/include/graphene/chain/committee_member_object.hpp @@ -40,12 +40,10 @@ namespace graphene { namespace chain { * committee_members were separated into a separate object to make iterating over * the set of committee_member easy. */ - class committee_member_object : public abstract_object + class committee_member_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = committee_member_object_type; - account_id_type committee_member_account; vote_id_type vote_id; uint64_t total_votes = 0; diff --git a/libraries/chain/include/graphene/chain/confidential_object.hpp b/libraries/chain/include/graphene/chain/confidential_object.hpp index 02057e46b5..bc9d849dfc 100644 --- a/libraries/chain/include/graphene/chain/confidential_object.hpp +++ b/libraries/chain/include/graphene/chain/confidential_object.hpp @@ -38,12 +38,10 @@ namespace graphene { namespace chain { * @ingroup object * @ingroup protocol */ -class blinded_balance_object : public graphene::db::abstract_object +class blinded_balance_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_blinded_balance_object_type; - fc::ecc::commitment_type commitment; asset_id_type asset_id; authority owner; diff --git a/libraries/chain/include/graphene/chain/credit_offer_object.hpp b/libraries/chain/include/graphene/chain/credit_offer_object.hpp index 24b26479f0..a8d427e7ce 100644 --- a/libraries/chain/include/graphene/chain/credit_offer_object.hpp +++ b/libraries/chain/include/graphene/chain/credit_offer_object.hpp @@ -36,12 +36,9 @@ namespace graphene { namespace chain { * @ingroup protocol * */ -class credit_offer_object : public abstract_object +class credit_offer_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = credit_offer_object_type; - account_id_type owner_account; ///< Owner of the fund asset_id_type asset_type; ///< Asset type in the fund share_type total_balance; ///< Total size of the fund @@ -104,12 +101,9 @@ using credit_offer_index = generic_index +class credit_deal_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = credit_deal_object_type; - account_id_type borrower; ///< Borrower credit_offer_id_type offer_id; ///< ID of the credit offer account_id_type offer_owner; ///< Owner of the credit offer, redundant info for ease of querying @@ -186,12 +180,10 @@ using credit_deal_index = generic_index +class credit_deal_summary_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_credit_deal_summary_object_type; - account_id_type borrower; ///< Borrower credit_offer_id_type offer_id; ///< ID of the credit offer account_id_type offer_owner; ///< Owner of the credit offer, redundant info for ease of querying diff --git a/libraries/chain/include/graphene/chain/custom_authority_object.hpp b/libraries/chain/include/graphene/chain/custom_authority_object.hpp index d73194a11a..e38404f0ed 100644 --- a/libraries/chain/include/graphene/chain/custom_authority_object.hpp +++ b/libraries/chain/include/graphene/chain/custom_authority_object.hpp @@ -36,15 +36,14 @@ namespace graphene { namespace chain { * @ingroup object * */ - class custom_authority_object : public abstract_object { + class custom_authority_object : public abstract_object + { /// Unreflected field to store a cache of the predicate function /// Note that this cache can be modified when the object is const! mutable optional predicate_cache; public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = custom_authority_object_type; - account_id_type account; bool enabled; time_point_sec valid_from; diff --git a/libraries/chain/include/graphene/chain/fba_object.hpp b/libraries/chain/include/graphene/chain/fba_object.hpp index 3b5b2f7eb1..8397ad235a 100644 --- a/libraries/chain/include/graphene/chain/fba_object.hpp +++ b/libraries/chain/include/graphene/chain/fba_object.hpp @@ -34,12 +34,10 @@ class database; * fba_accumulator_object accumulates fees to be paid out via buyback or other FBA mechanism. */ -class fba_accumulator_object : public graphene::db::abstract_object< fba_accumulator_object > +class fba_accumulator_object : public graphene::db::abstract_object< fba_accumulator_object, + implementation_ids, impl_fba_accumulator_object_type > { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_fba_accumulator_object_type; - share_type accumulated_fba_fees; optional< asset_id_type > designated_asset; diff --git a/libraries/chain/include/graphene/chain/global_property_object.hpp b/libraries/chain/include/graphene/chain/global_property_object.hpp index a7fe629872..9d7079e624 100644 --- a/libraries/chain/include/graphene/chain/global_property_object.hpp +++ b/libraries/chain/include/graphene/chain/global_property_object.hpp @@ -37,12 +37,10 @@ namespace graphene { namespace chain { * * This is an implementation detail. The values here are set by committee_members to tune the blockchain parameters. */ - class global_property_object : public graphene::db::abstract_object + class global_property_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_global_property_object_type; - chain_parameters parameters; optional pending_parameters; @@ -61,12 +59,10 @@ namespace graphene { namespace chain { * This is an implementation detail. The values here are calculated during normal chain operations and reflect the * current values of global blockchain properties. */ - class dynamic_global_property_object : public abstract_object + class dynamic_global_property_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_dynamic_global_property_object_type; - uint32_t head_block_number = 0; block_id_type head_block_id; time_point_sec time; diff --git a/libraries/chain/include/graphene/chain/htlc_object.hpp b/libraries/chain/include/graphene/chain/htlc_object.hpp index ab84d58565..47934f6418 100644 --- a/libraries/chain/include/graphene/chain/htlc_object.hpp +++ b/libraries/chain/include/graphene/chain/htlc_object.hpp @@ -37,12 +37,9 @@ namespace graphene { namespace chain { * This object is stored in the database while an HTLC is active. The HTLC will * become inactive at expiration or when unlocked via the preimage. */ - class htlc_object : public graphene::db::abstract_object { + class htlc_object : public graphene::db::abstract_object + { public: - // uniquely identify this object in the database - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = htlc_object_type; - struct transfer_info { account_id_type from; account_id_type to; diff --git a/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp b/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp index 2853e6c087..7769a33fca 100644 --- a/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp +++ b/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp @@ -41,12 +41,9 @@ using namespace graphene::db; * @ingroup protocol * */ -class liquidity_pool_object : public abstract_object +class liquidity_pool_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = liquidity_pool_object_type; - asset_id_type asset_a; ///< Type of the first asset in the pool asset_id_type asset_b; ///< Type of the second asset in the pool share_type balance_a; ///< The balance of the first asset in the pool diff --git a/libraries/chain/include/graphene/chain/market_object.hpp b/libraries/chain/include/graphene/chain/market_object.hpp index 3f62130a50..724be9c25b 100644 --- a/libraries/chain/include/graphene/chain/market_object.hpp +++ b/libraries/chain/include/graphene/chain/market_object.hpp @@ -41,12 +41,9 @@ using namespace graphene::db; * * The objects are indexed by @ref expiration and are automatically deleted on the first block after expiration. */ -class limit_order_object : public abstract_object +class limit_order_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = limit_order_object_type; - time_point_sec expiration; account_id_type seller; share_type for_sale; ///< asset id is sell_price.base.asset_id @@ -125,12 +122,9 @@ typedef generic_index limit_or * There should only be one call_order_object per asset pair per account and * they will all have the same call price. */ -class call_order_object : public abstract_object +class call_order_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = call_order_object_type; - asset get_collateral()const { return asset( collateral, call_price.base.asset_id ); } asset get_debt()const { return asset( debt, debt_type() ); } asset amount_to_receive()const { return get_debt(); } @@ -175,12 +169,10 @@ class call_order_object : public abstract_object * On the @ref settlement_date the @ref balance will be converted to the collateral asset * and paid to @ref owner and then this object will be deleted. */ -class force_settlement_object : public abstract_object +class force_settlement_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = force_settlement_object_type; - account_id_type owner; asset balance; time_point_sec settlement_date; @@ -196,12 +188,10 @@ class force_settlement_object : public abstract_object * There should only be one collateral_bid_object per asset per account, and * only for smartcoin assets that have a global settlement_price. */ -class collateral_bid_object : public abstract_object +class collateral_bid_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_collateral_bid_object_type; - asset get_additional_collateral()const { return inv_swan_price.base; } asset get_debt_covered()const { return inv_swan_price.quote; } asset_id_type debt_type()const { return inv_swan_price.quote.asset_id; } diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index df959152dc..3cf82787d0 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -45,14 +45,12 @@ namespace graphene { namespace chain { * * @note this object is READ ONLY it can never be modified */ - class operation_history_object : public abstract_object + class operation_history_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = operation_history_object_type; - explicit operation_history_object( const operation& o ):op(o){} - operation_history_object(){} + operation_history_object() = default; operation_history_object( const operation& o, uint32_t bn, uint16_t tib, uint16_t oit, uint32_t vo, bool iv, const time_point_sec& bt ) : op(o), block_num(bn), trx_in_block(tib), op_in_trx(oit), virtual_op(vo), is_virtual(iv), block_time(bt) {} @@ -94,11 +92,10 @@ namespace graphene { namespace chain { * linked list can be traversed with relatively effecient disk access because * of the use of a memory mapped stack. */ - class account_history_object : public abstract_object + class account_history_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_account_history_object_type; account_id_type account; /// the account this operation applies to operation_history_id_type operation_id; uint64_t sequence = 0; /// the operation position within the given account diff --git a/libraries/chain/include/graphene/chain/proposal_object.hpp b/libraries/chain/include/graphene/chain/proposal_object.hpp index 0a49bc05da..dde9cedf74 100644 --- a/libraries/chain/include/graphene/chain/proposal_object.hpp +++ b/libraries/chain/include/graphene/chain/proposal_object.hpp @@ -37,12 +37,9 @@ namespace graphene { namespace chain { * @ingroup object * @ingroup protocol */ -class proposal_object : public abstract_object +class proposal_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = proposal_object_type; - time_point_sec expiration_time; optional review_period_time; transaction proposed_transaction; diff --git a/libraries/chain/include/graphene/chain/samet_fund_object.hpp b/libraries/chain/include/graphene/chain/samet_fund_object.hpp index f1301d0822..2c6e2dbaf9 100644 --- a/libraries/chain/include/graphene/chain/samet_fund_object.hpp +++ b/libraries/chain/include/graphene/chain/samet_fund_object.hpp @@ -36,12 +36,9 @@ namespace graphene { namespace chain { * @ingroup protocol * */ -class samet_fund_object : public abstract_object +class samet_fund_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = samet_fund_object_type; - account_id_type owner_account; ///< Owner of the fund asset_id_type asset_type; ///< Asset type in the fund share_type balance; ///< Usable amount in the fund diff --git a/libraries/chain/include/graphene/chain/special_authority_object.hpp b/libraries/chain/include/graphene/chain/special_authority_object.hpp index cbd8103eb4..2b58f42528 100644 --- a/libraries/chain/include/graphene/chain/special_authority_object.hpp +++ b/libraries/chain/include/graphene/chain/special_authority_object.hpp @@ -39,12 +39,10 @@ namespace graphene { namespace chain { * This class is an implementation detail. */ -class special_authority_object : public graphene::db::abstract_object +class special_authority_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_special_authority_object_type; - account_id_type account; }; diff --git a/libraries/chain/include/graphene/chain/ticket_object.hpp b/libraries/chain/include/graphene/chain/ticket_object.hpp index 16610dccf7..d42858c417 100644 --- a/libraries/chain/include/graphene/chain/ticket_object.hpp +++ b/libraries/chain/include/graphene/chain/ticket_object.hpp @@ -59,12 +59,9 @@ enum ticket_version * @ingroup protocol * */ -class ticket_object : public abstract_object +class ticket_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = ticket_object_type; - account_id_type account; ///< The account who owns the ticket ticket_type target_type; ///< The target type of the ticket asset amount; ///< The token type and amount in the ticket diff --git a/libraries/chain/include/graphene/chain/transaction_history_object.hpp b/libraries/chain/include/graphene/chain/transaction_history_object.hpp index b5844c2f82..237988bbde 100644 --- a/libraries/chain/include/graphene/chain/transaction_history_object.hpp +++ b/libraries/chain/include/graphene/chain/transaction_history_object.hpp @@ -41,12 +41,10 @@ namespace graphene { namespace chain { * in a block a transaction_history_object is added. At the end of block processing all transaction_history_objects that * have expired can be removed from the index. */ - class transaction_history_object : public abstract_object + class transaction_history_object : public abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_transaction_history_object_type; - signed_transaction trx; transaction_id_type trx_id; diff --git a/libraries/chain/include/graphene/chain/vesting_balance_object.hpp b/libraries/chain/include/graphene/chain/vesting_balance_object.hpp index 3e91d9cf9c..1ec58bfb50 100644 --- a/libraries/chain/include/graphene/chain/vesting_balance_object.hpp +++ b/libraries/chain/include/graphene/chain/vesting_balance_object.hpp @@ -148,12 +148,10 @@ namespace graphene { namespace chain { /** * Vesting balance object is a balance that is locked by the blockchain for a period of time. */ - class vesting_balance_object : public abstract_object + class vesting_balance_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = vesting_balance_object_type; - /// Account which owns and may withdraw from this vesting balance account_id_type owner; /// Total amount remaining in this vesting balance diff --git a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp index a16bcd1156..3ba50a2932 100644 --- a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp +++ b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp @@ -42,12 +42,10 @@ namespace graphene { namespace chain { * this. Any number of withdrawals may be made so long as the total amount withdrawn per period does not exceed the * limit for any given period. */ - class withdraw_permission_object : public graphene::db::abstract_object + class withdraw_permission_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = withdraw_permission_object_type; - /// The account authorizing @ref authorized_account to withdraw from it account_id_type withdraw_from_account; /// The account authorized to make withdrawals from @ref withdraw_from_account diff --git a/libraries/chain/include/graphene/chain/witness_object.hpp b/libraries/chain/include/graphene/chain/witness_object.hpp index 29810eed2d..fbc3c483be 100644 --- a/libraries/chain/include/graphene/chain/witness_object.hpp +++ b/libraries/chain/include/graphene/chain/witness_object.hpp @@ -29,23 +29,18 @@ namespace graphene { namespace chain { using namespace graphene::db; - class witness_object : public abstract_object + class witness_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = witness_object_type; - account_id_type witness_account; uint64_t last_aslot = 0; public_key_type signing_key; optional< vesting_balance_id_type > pay_vb; - vote_id_type vote_id; + vote_id_type vote_id { vote_id_type::witness }; uint64_t total_votes = 0; string url; int64_t total_missed = 0; uint32_t last_confirmed_block_num = 0; - - witness_object() : vote_id(vote_id_type::witness) {} }; struct by_account; diff --git a/libraries/chain/include/graphene/chain/witness_schedule_object.hpp b/libraries/chain/include/graphene/chain/witness_schedule_object.hpp index 080e76551d..8509c68d5a 100644 --- a/libraries/chain/include/graphene/chain/witness_schedule_object.hpp +++ b/libraries/chain/include/graphene/chain/witness_schedule_object.hpp @@ -27,12 +27,10 @@ namespace graphene { namespace chain { -class witness_schedule_object : public graphene::db::abstract_object +class witness_schedule_object : public graphene::db::abstract_object { public: - static constexpr uint8_t space_id = implementation_ids; - static constexpr uint8_t type_id = impl_witness_schedule_object_type; - vector< witness_id_type > current_shuffled_witnesses; }; diff --git a/libraries/chain/include/graphene/chain/worker_object.hpp b/libraries/chain/include/graphene/chain/worker_object.hpp index 2836b0a382..2acccb4b2e 100644 --- a/libraries/chain/include/graphene/chain/worker_object.hpp +++ b/libraries/chain/include/graphene/chain/worker_object.hpp @@ -104,12 +104,9 @@ typedef static_variant< /** * @brief Worker object contains the details of a blockchain worker. See @ref workers for details. */ -class worker_object : public abstract_object +class worker_object : public abstract_object { public: - static constexpr uint8_t space_id = protocol_ids; - static constexpr uint8_t type_id = worker_object_type; - /// ID of the account which owns this worker account_id_type worker_account; /// Time at which this worker begins receiving pay, if elected diff --git a/libraries/chain/is_authorized_asset.cpp b/libraries/chain/is_authorized_asset.cpp index 8eeff66a1c..e35eb29318 100644 --- a/libraries/chain/is_authorized_asset.cpp +++ b/libraries/chain/is_authorized_asset.cpp @@ -47,7 +47,7 @@ bool _is_authorized_asset( if( acct.allowed_assets.valid() ) { - if( acct.allowed_assets->find( asset_obj.id ) == acct.allowed_assets->end() ) + if( acct.allowed_assets->find( asset_obj.get_id() ) == acct.allowed_assets->end() ) return false; // must still pass other checks even if it is in allowed_assets } diff --git a/libraries/chain/market_evaluator.cpp b/libraries/chain/market_evaluator.cpp index 28fe567ec4..11aee840a5 100644 --- a/libraries/chain/market_evaluator.cpp +++ b/libraries/chain/market_evaluator.cpp @@ -47,14 +47,14 @@ void_result limit_order_create_evaluator::do_evaluate(const limit_order_create_o if( _sell_asset->options.whitelist_markets.size() ) { - GRAPHENE_ASSERT( _sell_asset->options.whitelist_markets.find(_receive_asset->id) + GRAPHENE_ASSERT( _sell_asset->options.whitelist_markets.find(_receive_asset->get_id()) != _sell_asset->options.whitelist_markets.end(), limit_order_create_market_not_whitelisted, "This market has not been whitelisted by the selling asset", ); } if( _sell_asset->options.blacklist_markets.size() ) { - GRAPHENE_ASSERT( _sell_asset->options.blacklist_markets.find(_receive_asset->id) + GRAPHENE_ASSERT( _sell_asset->options.blacklist_markets.find(_receive_asset->get_id()) == _sell_asset->options.blacklist_markets.end(), limit_order_create_market_blacklisted, "This market has been blacklisted by the selling asset", ); @@ -119,7 +119,7 @@ object_id_type limit_order_create_evaluator::do_apply(const limit_order_create_o obj.deferred_fee = _deferred_fee; obj.deferred_paid_fee = _deferred_paid_fee; }); - limit_order_id_type order_id = new_order_object.id; // save this because we may remove the object by filling it + object_id_type order_id = new_order_object.id; // save this because we may remove the object by filling it bool filled; if( db().get_dynamic_global_properties().next_maintenance_time <= HARDFORK_CORE_625_TIME ) filled = db().apply_order_before_hardfork_625( new_order_object ); @@ -346,7 +346,7 @@ object_id_type call_order_update_evaluator::do_apply(const call_order_update_ope }); } - call_order_id_type call_order_id = call_ptr->id; + object_id_type call_order_id = call_ptr->id; if( _bitasset_data->is_prediction_market ) return call_order_id; @@ -381,7 +381,7 @@ object_id_type call_order_update_evaluator::do_apply(const call_order_update_ope // the first call order may be unable to be updated if the second one is undercollateralized. // Note: check call orders, don't allow black swan, not for new limit order bool called_some = d.check_call_orders( *_debt_asset, false, false, _bitasset_data ); - call_ptr = d.find(call_order_id); + call_ptr = d.find(call_order_id); if( called_some ) { // before hard fork core-583: if we filled at least one call order, we are OK if we totally filled. diff --git a/libraries/chain/proposal_object.cpp b/libraries/chain/proposal_object.cpp index 3c5244d62c..9659d32b65 100644 --- a/libraries/chain/proposal_object.cpp +++ b/libraries/chain/proposal_object.cpp @@ -61,15 +61,16 @@ void required_approval_index::object_inserted( const object& obj ) { assert( dynamic_cast(&obj) ); const proposal_object& p = static_cast(obj); + const proposal_id_type proposal_id = p.get_id(); for( const auto& a : p.required_active_approvals ) - _account_to_proposals[a].insert( p.id ); + _account_to_proposals[a].insert( proposal_id ); for( const auto& a : p.required_owner_approvals ) - _account_to_proposals[a].insert( p.id ); + _account_to_proposals[a].insert( proposal_id ); for( const auto& a : p.available_active_approvals ) - _account_to_proposals[a].insert( p.id ); + _account_to_proposals[a].insert( proposal_id ); for( const auto& a : p.available_owner_approvals ) - _account_to_proposals[a].insert( p.id ); + _account_to_proposals[a].insert( proposal_id ); } void required_approval_index::remove( account_id_type a, proposal_id_type p ) @@ -87,15 +88,16 @@ void required_approval_index::object_removed( const object& obj ) { assert( dynamic_cast(&obj) ); const proposal_object& p = static_cast(obj); + const proposal_id_type proposal_id = p.get_id(); for( const auto& a : p.required_active_approvals ) - remove( a, p.id ); + remove( a, proposal_id ); for( const auto& a : p.required_owner_approvals ) - remove( a, p.id ); + remove( a, proposal_id ); for( const auto& a : p.available_active_approvals ) - remove( a, p.id ); + remove( a, proposal_id ); for( const auto& a : p.available_owner_approvals ) - remove( a, p.id ); + remove( a, proposal_id ); } void required_approval_index::insert_or_remove_delta( proposal_id_type p, @@ -134,8 +136,9 @@ void required_approval_index::about_to_modify( const object& before ) void required_approval_index::object_modified( const object& after ) { const proposal_object& p = static_cast(after); - insert_or_remove_delta( p.id, available_active_before_modify, p.available_active_approvals ); - insert_or_remove_delta( p.id, available_owner_before_modify, p.available_owner_approvals ); + const proposal_id_type proposal_id = p.get_id(); + insert_or_remove_delta( proposal_id, available_active_before_modify, p.available_active_approvals ); + insert_or_remove_delta( proposal_id, available_owner_before_modify, p.available_owner_approvals ); } } } // graphene::chain diff --git a/libraries/chain/withdraw_permission_evaluator.cpp b/libraries/chain/withdraw_permission_evaluator.cpp index ce3981348e..eaf8566e56 100644 --- a/libraries/chain/withdraw_permission_evaluator.cpp +++ b/libraries/chain/withdraw_permission_evaluator.cpp @@ -34,9 +34,9 @@ namespace graphene { namespace chain { void_result withdraw_permission_create_evaluator::do_evaluate(const operation_type& op) { try { database& d = db(); - FC_ASSERT(d.find_object(op.withdraw_from_account)); - FC_ASSERT(d.find_object(op.authorized_account)); - FC_ASSERT(d.find_object(op.withdrawal_limit.asset_id)); + FC_ASSERT(d.find(op.withdraw_from_account)); + FC_ASSERT(d.find(op.authorized_account)); + FC_ASSERT(d.find(op.withdrawal_limit.asset_id)); FC_ASSERT(op.period_start_time > d.head_block_time()); FC_ASSERT(op.period_start_time + op.periods_until_expiration * op.withdrawal_period_sec > d.head_block_time()); FC_ASSERT(op.withdrawal_period_sec >= d.get_global_properties().parameters.block_interval); @@ -56,7 +56,8 @@ object_id_type withdraw_permission_create_evaluator::do_apply(const operation_ty }).id; } FC_CAPTURE_AND_RETHROW( (op) ) } -void_result withdraw_permission_claim_evaluator::do_evaluate(const withdraw_permission_claim_evaluator::operation_type& op) +void_result withdraw_permission_claim_evaluator::do_evaluate( + const withdraw_permission_claim_evaluator::operation_type& op) { try { const database& d = db(); time_point_sec head_block_time = d.head_block_time(); @@ -79,8 +80,8 @@ void_result withdraw_permission_claim_evaluator::do_evaluate(const withdraw_perm const account_object& to = permit.authorized_account(d); FC_ASSERT( is_authorized_asset( d, to, _asset ), - "Account ${acct} '${name}' is unauthorized to transact asset ${a} '${sym}' due to whitelist / blacklist", - ("acct", to.id)("name", to.name)("a", _asset.id)("sym", _asset.symbol) ); + "Account ${acct} '${name}' is unauthorized to transact asset ${a} '${sym}' due to whitelist / blacklist", + ("acct", to.id)("name", to.name)("a", _asset.id)("sym", _asset.symbol) ); const account_object& from = op.withdraw_from_account(d); bool from_is_authorized = ( is_authorized_asset( d, from, _asset ) ); @@ -91,7 +92,8 @@ void_result withdraw_permission_claim_evaluator::do_evaluate(const withdraw_perm return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } -void_result withdraw_permission_claim_evaluator::do_apply(const withdraw_permission_claim_evaluator::operation_type& op) +void_result withdraw_permission_claim_evaluator::do_apply( + const withdraw_permission_claim_evaluator::operation_type& op) { try { database& d = db(); @@ -111,14 +113,15 @@ void_result withdraw_permission_claim_evaluator::do_apply(const withdraw_permiss return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } -void_result withdraw_permission_update_evaluator::do_evaluate(const withdraw_permission_update_evaluator::operation_type& op) +void_result withdraw_permission_update_evaluator::do_evaluate( + const withdraw_permission_update_evaluator::operation_type& op) { try { database& d = db(); const withdraw_permission_object& permit = op.permission_to_update(d); FC_ASSERT(permit.authorized_account == op.authorized_account); FC_ASSERT(permit.withdraw_from_account == op.withdraw_from_account); - FC_ASSERT(d.find_object(op.withdrawal_limit.asset_id)); + FC_ASSERT(d.find(op.withdrawal_limit.asset_id)); FC_ASSERT(op.period_start_time >= d.head_block_time()); FC_ASSERT(op.period_start_time + op.periods_until_expiration * op.withdrawal_period_sec > d.head_block_time()); FC_ASSERT(op.withdrawal_period_sec >= d.get_global_properties().parameters.block_interval); @@ -126,7 +129,8 @@ void_result withdraw_permission_update_evaluator::do_evaluate(const withdraw_per return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } -void_result withdraw_permission_update_evaluator::do_apply(const withdraw_permission_update_evaluator::operation_type& op) +void_result withdraw_permission_update_evaluator::do_apply( + const withdraw_permission_update_evaluator::operation_type& op) { try { database& d = db(); @@ -140,7 +144,8 @@ void_result withdraw_permission_update_evaluator::do_apply(const withdraw_permis return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } -void_result withdraw_permission_delete_evaluator::do_evaluate(const withdraw_permission_delete_evaluator::operation_type& op) +void_result withdraw_permission_delete_evaluator::do_evaluate( + const withdraw_permission_delete_evaluator::operation_type& op) { try { database& d = db(); @@ -151,7 +156,8 @@ void_result withdraw_permission_delete_evaluator::do_evaluate(const withdraw_per return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } -void_result withdraw_permission_delete_evaluator::do_apply(const withdraw_permission_delete_evaluator::operation_type& op) +void_result withdraw_permission_delete_evaluator::do_apply( + const withdraw_permission_delete_evaluator::operation_type& op) { try { db().remove(db().get(op.withdrawal_permission)); return void_result(); diff --git a/libraries/db/include/graphene/db/index.hpp b/libraries/db/include/graphene/db/index.hpp index e6cf0d8cbf..7e431a5605 100644 --- a/libraries/db/include/graphene/db/index.hpp +++ b/libraries/db/include/graphene/db/index.hpp @@ -34,7 +34,6 @@ namespace graphene { namespace db { class object_database; - using fc::path; /** * @class index_observer @@ -126,14 +125,15 @@ namespace graphene { namespace db { */ template void modify( const Object& obj, const Lambda& l ) { - modify( static_cast(obj), std::function( [&]( object& o ){ l( static_cast(o) ); } ) ); + modify( static_cast(obj), + std::function( [&l]( object& o ){ l( static_cast(o) ); } ) ); } - virtual void inspect_all_objects(std::function inspector)const = 0; - virtual void add_observer( const shared_ptr& ) = 0; + virtual void inspect_all_objects(std::function inspector)const = 0; + virtual void add_observer( const std::shared_ptr& ) = 0; - virtual void object_from_variant( const fc::variant& var, object& obj, uint32_t max_depth )const = 0; - virtual void object_default( object& obj )const = 0; + virtual void object_from_variant( const fc::variant& var, object& obj, uint32_t max_depth )const = 0; + virtual void object_default( object& obj )const = 0; }; class secondary_index @@ -185,8 +185,8 @@ namespace graphene { namespace db { } protected: - vector< shared_ptr > _observers; - vector< unique_ptr > _sindex; + std::vector< std::shared_ptr > _observers; + std::vector< std::unique_ptr > _sindex; private: object_database& _db; @@ -210,7 +210,7 @@ namespace graphene { namespace db { static const size_t MAX_HOLE = 100; static const size_t _mask = ((1ULL << chunkbits) - 1); uint64_t next = 0; - vector< vector< const Object* > > content; + std::vector< std::vector< const Object* > > content; std::stack< object_id_type > ids_being_modified; public: @@ -233,10 +233,12 @@ namespace graphene { namespace db { next++; } else if( instance < next ) - FC_ASSERT( !content[instance >> chunkbits][instance & _mask], "Overwriting insert at {id}!", ("id",obj.id) ); + FC_ASSERT( !content[instance >> chunkbits][instance & _mask], + "Overwriting insert at {id}!", ("id",obj.id) ); else // instance > next, allow small "holes" { - FC_ASSERT( instance <= next + MAX_HOLE, "Out-of-order insert: {id} > {next}!", ("id",obj.id)("next",next) ); + FC_ASSERT( instance <= next + MAX_HOLE, + "Out-of-order insert: {id} > {next}!", ("id",obj.id)("next",next) ); if( 0 == (next & _mask) || (next & (~_mask)) != (instance & (~_mask)) ) { content.resize((instance >> chunkbits) + 1); @@ -257,7 +259,8 @@ namespace graphene { namespace db { FC_ASSERT( nullptr != dynamic_cast(&obj), "Wrong object type!" ); uint64_t instance = obj.id.instance(); FC_ASSERT( instance < next, "Removing out-of-range object: {id} > {next}!", ("id",obj.id)("next",next) ); - FC_ASSERT( content[instance >> chunkbits][instance & _mask], "Removing non-existent object {id}!", ("id",obj.id) ); + FC_ASSERT( content[instance >> chunkbits][instance & _mask], + "Removing non-existent object {id}!", ("id",obj.id) ); content[instance >> chunkbits][instance & _mask] = nullptr; } @@ -342,8 +345,8 @@ namespace graphene { namespace db { return fc::sha256::hash(desc); } - virtual void open( const path& db )override - { + virtual void open( const fc::path& db )override + { if( !fc::exists( db ) ) return; fc::file_mapping fm( db.generic_string().c_str(), fc::read_only ); fc::mapped_region mr( fm, fc::read_only, 0, fc::file_size(db) ); @@ -352,8 +355,9 @@ namespace graphene { namespace db { fc::raw::unpack(ds, _next_id); fc::raw::unpack(ds, open_ver); - FC_ASSERT( open_ver == get_object_version(), "Incompatible Version, the serialization of objects in this index has changed" ); - vector tmp; + FC_ASSERT( open_ver == get_object_version(), + "Incompatible Version, the serialization of objects in this index has changed" ); + std::vector tmp; while( ds.remaining() > 0 ) { fc::raw::unpack( ds, tmp ); @@ -361,15 +365,15 @@ namespace graphene { namespace db { } } - virtual void save( const path& db ) override + virtual void save( const fc::path& db ) override { - std::ofstream out( db.generic_string(), + std::ofstream out( db.generic_string(), std::ofstream::binary | std::ofstream::out | std::ofstream::trunc ); FC_ASSERT( out ); auto ver = get_object_version(); fc::raw::pack( out, _next_id ); fc::raw::pack( out, ver ); - this->inspect_all_objects( [&]( const object& o ) { + this->inspect_all_objects( [&out]( const object& o ) { auto vec = fc::raw::pack( static_cast(o) ); auto packed_vec = fc::raw::pack( vec ); out.write( packed_vec.data(), packed_vec.size() ); @@ -422,7 +426,7 @@ namespace graphene { namespace db { on_modify( obj ); } - virtual void add_observer( const shared_ptr& o ) override + virtual void add_observer( const std::shared_ptr& o ) override { _observers.emplace_back( o ); } diff --git a/libraries/db/include/graphene/db/object.hpp b/libraries/db/include/graphene/db/object.hpp index 125ff4293e..747c0e6b72 100644 --- a/libraries/db/include/graphene/db/object.hpp +++ b/libraries/db/include/graphene/db/object.hpp @@ -30,7 +30,6 @@ #define MAX_NESTING (200) namespace graphene { namespace db { - /** * @brief base for all database objects * @@ -62,51 +61,66 @@ namespace graphene { namespace db { class object { public: - object(){} - virtual ~object(){} + object() = default; + object( uint8_t space_id, uint8_t type_id ) : id( space_id, type_id, 0 ) {} + virtual ~object() = default; // serialized object_id_type id; - /// these methods are implemented for derived classes by inheriting abstract_object - virtual unique_ptr clone()const = 0; - virtual void move_from( object& obj ) = 0; - virtual variant to_variant()const = 0; - virtual vector pack()const = 0; + /// these methods are implemented for derived classes by inheriting base_abstract_object + /// @{ + virtual std::unique_ptr clone()const = 0; + virtual void move_from( object& obj ) = 0; + virtual fc::variant to_variant()const = 0; + virtual std::vector pack()const = 0; + /// @} }; /** - * @class abstract_object + * @class base_abstract_object * @brief Use the Curiously Recurring Template Pattern to automatically add the ability to * clone, serialize, and move objects polymorphically. * * http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */ template - class abstract_object : public object + class base_abstract_object : public object { public: - virtual unique_ptr clone()const + using object::object; // constructors + virtual std::unique_ptr clone()const { - return unique_ptr( std::make_unique( *static_cast(this) ) ); + return std::make_unique( *static_cast(this) ); } virtual void move_from( object& obj ) { static_cast(*this) = std::move( static_cast(obj) ); } - virtual variant to_variant()const { return variant( static_cast(*this), MAX_NESTING ); } - virtual vector pack()const { return fc::raw::pack( static_cast(*this) ); } + virtual fc::variant to_variant()const + { return fc::variant( static_cast(*this), MAX_NESTING ); } + virtual std::vector pack()const { return fc::raw::pack( static_cast(*this) ); } + }; + + template + class abstract_object : public base_abstract_object + { + public: + static constexpr uint8_t space_id = SpaceID; + static constexpr uint8_t type_id = TypeID; + abstract_object() : base_abstract_object( space_id, type_id ) {} + object_id get_id() const { return object_id( this->id ); } }; - typedef flat_map annotation_map; + using annotation_map = fc::flat_map; /** * @class annotated_object * @brief An object that is easily extended by providing pointers to other objects, one for each space. */ template - class annotated_object : public abstract_object + class annotated_object : public base_abstract_object { public: /** return object_id_type() if no anotation is found for id_space */ @@ -139,4 +153,5 @@ struct is_restricted_conversion : public mpl::true_ {}; FC_REFLECT_TYPENAME( graphene::db::annotation_map ) FC_REFLECT( graphene::db::object, (id) ) -FC_REFLECT_DERIVED_TEMPLATE( (typename Derived), graphene::db::annotated_object, (graphene::db::object), (annotations) ) +FC_REFLECT_DERIVED_TEMPLATE( (typename Derived), graphene::db::annotated_object, (graphene::db::object), + (annotations) ) diff --git a/libraries/db/include/graphene/db/object_database.hpp b/libraries/db/include/graphene/db/object_database.hpp index d189cd1b1a..2ab8bad8e4 100644 --- a/libraries/db/include/graphene/db/object_database.hpp +++ b/libraries/db/include/graphene/db/object_database.hpp @@ -64,23 +64,26 @@ namespace graphene { namespace db { } )); } - ///These methods are used to retrieve indexes on the object_database. All public index accessors are const-access only. + /// These methods are used to retrieve indexes on the object_database. All public index accessors are + /// const-access only. /// @{ template const IndexType& get_index_type()const { static_assert( std::is_base_of::value, "Type must be an index type" ); - return static_cast( get_index( IndexType::object_type::space_id, IndexType::object_type::type_id ) ); + return static_cast( get_index( IndexType::object_type::space_id, + IndexType::object_type::type_id ) ); } template const index& get_index()const { return get_index(T::space_id,T::type_id); } const index& get_index(uint8_t space_id, uint8_t type_id)const; - const index& get_index(object_id_type id)const { return get_index(id.space(),id.type()); } + const index& get_index(const object_id_type& id)const { return get_index(id.space(),id.type()); } /// @} - const object& get_object( object_id_type id )const; - const object* find_object( object_id_type id )const; + const object& get_object( const object_id_type& id )const; + const object* find_object( const object_id_type& id )const; - /// These methods are mutators of the object_database. You must use these methods to make changes to the object_database, + /// These methods are mutators of the object_database. + /// You must use these methods to make changes to the object_database, /// in order to maintain proper undo history. ///@{ @@ -107,14 +110,14 @@ namespace graphene { namespace db { } template - const T& get( object_id_type id )const + const T& get( const object_id_type& id )const { const object& obj = get_object( id ); assert( nullptr != dynamic_cast(&obj) ); return static_cast(obj); } template - const T* find( object_id_type id )const + const T* find( const object_id_type& id )const { const object* obj = find_object( id ); assert( !obj || nullptr != dynamic_cast(obj) ); @@ -122,13 +125,13 @@ namespace graphene { namespace db { } template - auto find( object_id id )const -> const object_downcast_t* { - return find>(id); + auto find( const object_id& id )const -> const object_downcast_t* { + return find>(object_id_type(id)); } template - auto get( object_id id )const -> const object_downcast_t& { - return get>(id); + auto get( const object_id& id )const -> const object_downcast_t& { + return get>(object_id_type(id)); } template @@ -138,7 +141,7 @@ namespace graphene { namespace db { if( _index[ObjectType::space_id].size() <= ObjectType::type_id ) _index[ObjectType::space_id].resize( 255 ); assert(!_index[ObjectType::space_id][ObjectType::type_id]); - unique_ptr indexptr( std::make_unique(*this) ); + std::unique_ptr indexptr( std::make_unique(*this) ); _index[ObjectType::space_id][ObjectType::type_id] = std::move(indexptr); return static_cast(_index[ObjectType::space_id][ObjectType::type_id].get()); } @@ -146,7 +149,8 @@ namespace graphene { namespace db { template SecondaryIndexType* add_secondary_index( Args... args ) { - return get_mutable_index_type().template add_secondary_index(args...); + return get_mutable_index_type().template + add_secondary_index(args...); } void pop_undo(); @@ -159,11 +163,12 @@ namespace graphene { namespace db { template IndexType& get_mutable_index_type() { static_assert( std::is_base_of::value, "Type must be an index type" ); - return static_cast( get_mutable_index( IndexType::object_type::space_id, IndexType::object_type::type_id ) ); + return static_cast( get_mutable_index( IndexType::object_type::space_id, + IndexType::object_type::type_id ) ); } template - index& get_mutable_index() { return get_mutable_index(T::space_id,T::type_id); } - index& get_mutable_index(object_id_type id) { return get_mutable_index(id.space(),id.type()); } + index& get_mutable_index() { return get_mutable_index(T::space_id,T::type_id); } + index& get_mutable_index(const object_id_type& id) { return get_mutable_index(id.space(),id.type()); } index& get_mutable_index(uint8_t space_id, uint8_t type_id); private: @@ -175,7 +180,7 @@ namespace graphene { namespace db { void save_undo_remove( const object& obj ); fc::path _data_dir; - vector< vector< unique_ptr > > _index; + std::vector< std::vector< std::unique_ptr > > _index; }; } } // graphene::db diff --git a/libraries/db/include/graphene/db/simple_index.hpp b/libraries/db/include/graphene/db/simple_index.hpp index 7cbdb538f1..3d80fe77fd 100644 --- a/libraries/db/include/graphene/db/simple_index.hpp +++ b/libraries/db/include/graphene/db/simple_index.hpp @@ -38,7 +38,7 @@ namespace graphene { namespace db { class simple_index : public index { public: - typedef T object_type; + using object_type = T; virtual const object& create( const std::function& constructor ) override { @@ -102,10 +102,10 @@ namespace graphene { namespace db { class const_iterator { public: - const_iterator( const vector>& objects ):_objects(objects) {} + explicit const_iterator( const std::vector>& objects ):_objects(objects) {} const_iterator( - const vector>& objects, - const vector>::const_iterator& a ):_itr(a),_objects(objects){} + const std::vector>& objects, + const std::vector>::const_iterator& a ):_itr(a),_objects(objects){} friend bool operator==( const const_iterator& a, const const_iterator& b ) { return a._itr == b._itr; } friend bool operator!=( const const_iterator& a, const const_iterator& b ) { return a._itr != b._itr; } const T& operator*()const { return static_cast(*_itr->get()); } @@ -122,21 +122,21 @@ namespace graphene { namespace db { ++_itr; return *this; } - typedef std::forward_iterator_tag iterator_category; - typedef vector >::value_type value_type; - typedef vector >::difference_type difference_type; - typedef vector >::pointer pointer; - typedef vector >::reference reference; + using iterator_category = std::forward_iterator_tag; + using value_type = std::vector >::value_type; + using difference_type = std::vector >::difference_type; + using pointer = std::vector >::pointer; + using reference = std::vector >::reference; private: - vector>::const_iterator _itr; - const vector>& _objects; + std::vector>::const_iterator _itr; + const std::vector>& _objects; }; const_iterator begin()const { return const_iterator(_objects, _objects.begin()); } const_iterator end()const { return const_iterator(_objects, _objects.end()); } size_t size()const { return _objects.size(); } private: - vector< unique_ptr > _objects; + std::vector< std::unique_ptr > _objects; }; } } // graphene::db diff --git a/libraries/db/include/graphene/db/undo_database.hpp b/libraries/db/include/graphene/db/undo_database.hpp index 5234ac65aa..5ca4cd3838 100644 --- a/libraries/db/include/graphene/db/undo_database.hpp +++ b/libraries/db/include/graphene/db/undo_database.hpp @@ -28,16 +28,14 @@ namespace graphene { namespace db { - using std::unordered_map; - using fc::flat_set; class object_database; struct undo_state { - unordered_map > old_values; - unordered_map old_index_next_ids; - std::unordered_set new_ids; - unordered_map > removed; + std::unordered_map > old_values; + std::unordered_map old_index_next_ids; + std::unordered_set new_ids; + std::unordered_map > removed; }; diff --git a/libraries/db/object_database.cpp b/libraries/db/object_database.cpp index 5e91c7c87d..47c664345a 100644 --- a/libraries/db/object_database.cpp +++ b/libraries/db/object_database.cpp @@ -42,11 +42,11 @@ void object_database::close() { } -const object* object_database::find_object( object_id_type id )const +const object* object_database::find_object( const object_id_type& id )const { return get_index(id.space(),id.type()).find( id ); } -const object& object_database::get_object( object_id_type id )const +const object& object_database::get_object( const object_id_type& id )const { return get_index(id.space(),id.type()).get( id ); } diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 58a6119c63..de00dfff02 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -161,7 +161,7 @@ void account_history_plugin_impl::update_account_histories( const signed_block& MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) ); if( op.op.is_type< account_create_operation >() ) - impacted.insert( op.result.get() ); + impacted.insert( account_id_type( op.result.get() ) ); // https://github.com/bitshares/bitshares-core/issues/265 if( HARDFORK_CORE_265_PASSED(b.timestamp) || !op.op.is_type< account_create_operation >() ) @@ -306,7 +306,7 @@ void account_history_plugin_impl::check_and_remove_op_history_obj( const operati graphene::chain::database& db = database(); const auto& his_idx = db.get_index_type(); const auto& by_opid_idx = his_idx.indices().get(); - if( by_opid_idx.find( op.id ) == by_opid_idx.end() ) + if( by_opid_idx.find( op.get_id() ) == by_opid_idx.end() ) { // if no reference, remove db.remove( op ); diff --git a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp index d0cd598281..68ef3ed58c 100644 --- a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp +++ b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp @@ -50,11 +50,9 @@ enum account_history_object_type }; /// This struct tracks accounts that have exceeded the max-ops-per-account limit -struct exceeded_account_object : public abstract_object +struct exceeded_account_object : public abstract_object { - static constexpr uint8_t space_id = ACCOUNT_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = exceeded_account_object_type; - /// The ID of the account account_id_type account_id; /// The height of the block containing the oldest (not yet removed) operation related to this account diff --git a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp index 2e1530f2cb..ca02447b02 100644 --- a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp +++ b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp @@ -97,15 +97,17 @@ share_type amount_in_collateral_index::get_backing_collateral( const asset_id_ty void asset_in_liquidity_pools_index::object_inserted( const object& objct ) { try { const auto& o = static_cast( objct ); - asset_in_pools_map[ o.asset_a ].insert( o.id ); // Note: [] operator will create an entry if not found - asset_in_pools_map[ o.asset_b ].insert( o.id ); + const liquidity_pool_id_type pool_id = o.get_id(); + asset_in_pools_map[ o.asset_a ].insert( pool_id ); // Note: [] operator will create an entry if not found + asset_in_pools_map[ o.asset_b ].insert( pool_id ); } FC_CAPTURE_AND_RETHROW( (objct) ) } void asset_in_liquidity_pools_index::object_removed( const object& objct ) { try { const auto& o = static_cast( objct ); - asset_in_pools_map[ o.asset_a ].erase( o.id ); - asset_in_pools_map[ o.asset_b ].erase( o.id ); + const liquidity_pool_id_type pool_id = o.get_id(); + asset_in_pools_map[ o.asset_a ].erase( pool_id ); + asset_in_pools_map[ o.asset_b ].erase( pool_id ); // Note: do not erase entries with an empty set from the map in order to avoid read/write race conditions } FC_CAPTURE_AND_RETHROW( (objct) ) } diff --git a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp index 2a550ac46d..81351cea8d 100644 --- a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp +++ b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp @@ -38,11 +38,9 @@ enum class custom_operations_object_types { account_map = 0 }; -struct account_storage_object : public abstract_object +struct account_storage_object : public abstract_object( custom_operations_object_types::account_map )> { - static constexpr uint8_t space_id = CUSTOM_OPERATIONS_SPACE_ID; - static constexpr uint8_t type_id = static_cast( custom_operations_object_types::account_map ); - account_id_type account; string catalog; string key; diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index c62c3ef87c..2831b6bc5f 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -101,7 +101,7 @@ class elasticsearch_plugin_impl void doBlock(uint32_t trx_in_block, const signed_block& b, block_struct& bs) const; void doVisitor(const optional & oho, visitor_struct& vs) const; void checkState(const fc::time_point_sec& block_time); - void cleanObjects(const account_history_id_type& ath, const account_id_type& account_id); + void cleanObjects(const account_history_object& ath, const account_id_type& account_id); void init_program_options(const boost::program_options::variables_map& options); }; @@ -181,7 +181,7 @@ void elasticsearch_plugin_impl::update_account_histories( const signed_block& b MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) ); if( op.op.is_type< account_create_operation >() ) - impacted.insert( op.result.get() ); + impacted.insert( account_id_type( op.result.get() ) ); // https://github.com/bitshares/bitshares-core/issues/265 if( HARDFORK_CORE_265_PASSED(b.timestamp) || !op.op.is_type< account_create_operation >() ) @@ -438,10 +438,10 @@ void elasticsearch_plugin_impl::add_elasticsearch( const account_id_type& accoun if( bulk_lines.size() >= limit_documents ) send_bulk(); } - cleanObjects(ath.id, account_id); + cleanObjects(ath, account_id); } -void elasticsearch_plugin_impl::cleanObjects( const account_history_id_type& ath_id, +void elasticsearch_plugin_impl::cleanObjects( const account_history_object& ath, const account_id_type& account_id ) { graphene::chain::database& db = database(); @@ -449,7 +449,7 @@ void elasticsearch_plugin_impl::cleanObjects( const account_history_id_type& ath const auto &his_idx = db.get_index_type(); const auto &by_seq_idx = his_idx.indices().get(); auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0)); - if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath_id) { + if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath.id) { // if found, remove the entry const auto remove_op_id = itr->operation_id; const auto itr_remove = itr; @@ -459,7 +459,7 @@ void elasticsearch_plugin_impl::cleanObjects( const account_history_id_type& ath // this should be always true, but just have a check here if( itr != by_seq_idx.end() && itr->account == account_id ) { - db.modify( *itr, [&]( account_history_object& obj ){ + db.modify( *itr, []( account_history_object& obj ){ obj.next = account_history_id_type(); }); } diff --git a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp index bc70ae8794..2020caa5ef 100644 --- a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp +++ b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp @@ -81,11 +81,8 @@ struct bucket_key } }; -struct bucket_object : public abstract_object +struct bucket_object : public abstract_object { - static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = bucket_object_type; - price high()const { return asset( high_base, key.base ) / asset( high_quote, key.quote ); } price low()const { return asset( low_base, key.base ) / asset( low_quote, key.quote ); } @@ -114,11 +111,9 @@ struct history_key { return std::tie( a.base, a.quote, a.sequence ) == std::tie( b.base, b.quote, b.sequence ); } }; -struct order_history_object : public abstract_object +struct order_history_object : public abstract_object { - static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = order_history_object_type; - history_key key; fc::time_point_sec time; fill_order_operation op; @@ -139,11 +134,9 @@ struct order_history_object_key_sequence_extractor result_type operator()(const order_history_object& o)const { return o.key.sequence; } }; -struct market_ticker_object : public abstract_object +struct market_ticker_object : public abstract_object { - static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = market_ticker_object_type; - asset_id_type base; asset_id_type quote; share_type last_day_base; @@ -154,11 +147,9 @@ struct market_ticker_object : public abstract_object fc::uint128_t quote_volume; }; -struct market_ticker_meta_object : public abstract_object +struct market_ticker_meta_object : public abstract_object { - static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = market_ticker_meta_object_type; - object_id_type rolling_min_order_his_id; bool skip_min_order_his_id = false; }; @@ -222,11 +213,9 @@ typedef generic_index +struct liquidity_pool_history_object : public abstract_object { - static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = liquidity_pool_history_object_type; - liquidity_pool_id_type pool; uint64_t sequence = 0; fc::time_point_sec time; @@ -299,11 +288,9 @@ typedef generic_index< liquidity_pool_history_object, /// Stores meta data for liquidity pool tickers -struct liquidity_pool_ticker_meta_object : public abstract_object +struct liquidity_pool_ticker_meta_object : public abstract_object { - static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = liquidity_pool_ticker_meta_object_type; - object_id_type rolling_min_lp_his_id; bool skip_min_lp_his_id = false; }; @@ -311,11 +298,9 @@ struct liquidity_pool_ticker_meta_object : public abstract_object; /// Stores ticker data for liquidity pools -struct liquidity_pool_ticker_object : public abstract_object +struct liquidity_pool_ticker_object : public abstract_object { - static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static constexpr uint8_t type_id = liquidity_pool_ticker_object_type; - uint32_t _24h_deposit_count = 0; fc::uint128_t _24h_deposit_amount_a = 0; fc::uint128_t _24h_deposit_amount_b = 0; @@ -402,6 +387,9 @@ class market_history_plugin : public graphene::app::plugin } } //graphene::market_history +// Other objects aren't mapped here because they are not used +MAP_OBJECT_ID_TO_TYPE( graphene::market_history::liquidity_pool_ticker_object ) + FC_REFLECT( graphene::market_history::history_key, (base)(quote)(sequence) ) FC_REFLECT_DERIVED( graphene::market_history::order_history_object, (graphene::db::object), (key)(time)(op) ) FC_REFLECT( graphene::market_history::bucket_key, (base)(quote)(seconds)(open) ) diff --git a/libraries/plugins/market_history/market_history_plugin.cpp b/libraries/plugins/market_history/market_history_plugin.cpp index 29699b1eba..4aee6da8ff 100644 --- a/libraries/plugins/market_history/market_history_plugin.cpp +++ b/libraries/plugins/market_history/market_history_plugin.cpp @@ -407,7 +407,7 @@ void market_history_plugin_impl::update_market_histories( const signed_block& b else { liquidity_pool_ticker_id_type ticker_id( history_itr->pool.instance ); - const liquidity_pool_ticker_object* ticker = db.find( ticker_id ); + const liquidity_pool_ticker_object* ticker = db.find( ticker_id ); if( ticker != nullptr ) // should always be true { const operation_history_object& oho = history_itr->op; @@ -632,7 +632,7 @@ void market_history_plugin_impl::update_liquidity_pool_histories( else { liquidity_pool_ticker_id_type ticker_id( pool->instance ); - const liquidity_pool_ticker_object* ticker = db.find( ticker_id ); + const liquidity_pool_ticker_object* ticker = db.find( ticker_id ); if( ticker != nullptr ) { if( oho.op.is_type< liquidity_pool_deposit_operation >() ) diff --git a/libraries/protocol/asset_ops.cpp b/libraries/protocol/asset_ops.cpp index 0e7c3bafff..09a7bab33c 100644 --- a/libraries/protocol/asset_ops.cpp +++ b/libraries/protocol/asset_ops.cpp @@ -80,7 +80,7 @@ share_type asset_issue_operation::calculate_fee(const fee_parameters_type& k)con } share_type asset_create_operation::calculate_fee( const asset_create_operation::fee_parameters_type& param, - optional sub_asset_creation_fee )const + const optional& sub_asset_creation_fee )const { share_type core_fee_required = param.long_symbol; diff --git a/libraries/protocol/include/graphene/protocol/asset_ops.hpp b/libraries/protocol/include/graphene/protocol/asset_ops.hpp index 180c10fce0..f2515f4fed 100644 --- a/libraries/protocol/include/graphene/protocol/asset_ops.hpp +++ b/libraries/protocol/include/graphene/protocol/asset_ops.hpp @@ -220,7 +220,8 @@ namespace graphene { namespace protocol { account_id_type fee_payer()const { return issuer; } void validate()const; - share_type calculate_fee( const fee_parameters_type& k, optional sub_asset_creation_fee )const; + share_type calculate_fee( const fee_parameters_type& k, + const optional& sub_asset_creation_fee )const; }; /** diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index ba04e079d3..417e400d39 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -24,31 +24,30 @@ #pragma once #include #include -#include -#define GRAPHENE_DB_MAX_INSTANCE_ID (uint64_t(-1)>>16) namespace graphene { namespace db { - using std::shared_ptr; - using std::unique_ptr; - using std::vector; - using fc::flat_map; - using fc::variant; - using fc::unsigned_int; struct object_id_type { - object_id_type( uint8_t s, uint8_t t, uint64_t i ) + static constexpr uint8_t instance_bits = 48; + static constexpr uint8_t type_and_instance_bits = 56; + static constexpr uint64_t one_byte_mask = 0x00ff; + static constexpr uint64_t max_instance = 0x0000ffffffffffff; + + object_id_type() = default; + object_id_type( uint8_t s, uint8_t t, uint64_t i ){ reset( s, t, i ); } + + void reset( uint8_t s, uint8_t t, uint64_t i ) { - FC_ASSERT( i >> 48 == 0, "instance overflow", ("instance",i) ); - number = (uint64_t(s)<<56) | (uint64_t(t)<<48) | i; + FC_ASSERT( i >> instance_bits == 0, "instance overflow", ("instance",i) ); + number = ( (uint64_t(s) << type_and_instance_bits) | (uint64_t(t) << instance_bits) ) | i; } - object_id_type(){ number = 0; } - uint8_t space()const { return number >> 56; } - uint8_t type()const { return number >> 48 & 0x00ff; } - uint16_t space_type()const { return number >> 48; } - uint64_t instance()const { return number & GRAPHENE_DB_MAX_INSTANCE_ID; } - bool is_null()const { return number == 0; } + uint8_t space()const { return number >> type_and_instance_bits; } + uint8_t type()const { return (number >> instance_bits) & one_byte_mask; } + uint16_t space_type()const { return number >> instance_bits; } + uint64_t instance()const { return number & max_instance; } + bool is_null()const { return 0 == number; } explicit operator uint64_t()const { return number; } friend bool operator == ( const object_id_type& a, const object_id_type& b ) { return a.number == b.number; } @@ -61,18 +60,18 @@ namespace graphene { namespace db { friend object_id_type operator+(const object_id_type& a, int64_t delta ) { return object_id_type( a.space(), a.type(), a.instance() + delta ); } - friend size_t hash_value( object_id_type v ) { return std::hash()(v.number); } + friend size_t hash_value( const object_id_type& v ) { return std::hash()(v.number); } template< typename T > bool is() const { - return (number >> 48) == ( ((uint64_t)(((uint64_t)T::space_id) << 8)) | ((uint64_t)(T::type_id)) ); + return space_type() == T::space_type; } + template< typename T > T as() const { - FC_ASSERT( is() ); return T( *this ); } @@ -81,7 +80,7 @@ namespace graphene { namespace db { return fc::to_string(space()) + "." + fc::to_string(type()) + "." + fc::to_string(instance()); } - uint64_t number; + uint64_t number = 0; }; class object; @@ -94,8 +93,8 @@ namespace graphene { namespace db { #define MAP_OBJECT_ID_TO_TYPE(OBJECT) \ namespace graphene { namespace db { \ template<> \ - struct object_downcast> { using type = OBJECT; }; \ + struct object_downcast&> { using type = OBJECT; }; \ } } template using object_downcast_t = typename object_downcast::type; @@ -103,29 +102,45 @@ namespace graphene { namespace db { template struct object_id { + static constexpr uint8_t type_bits = 8; + static constexpr uint8_t instance_bits = 48; + static constexpr uint64_t max_instance = 0x0000ffffffffffff; + static constexpr uint8_t space_id = SpaceID; static constexpr uint8_t type_id = TypeID; - static constexpr uint16_t space_type = uint16_t(uint16_t(space_id) << 8) | uint16_t(type_id); + static constexpr uint16_t space_type = uint16_t(uint16_t(space_id) << type_bits) | uint16_t(type_id); static constexpr object_id max() { - return object_id( 0xffffffffffff ); + return object_id( max_instance ); } object_id() = default; - object_id( unsigned_int i ):instance(i){} + explicit object_id( const fc::unsigned_int& i ):instance(i) + { + FC_ASSERT( (i.value >> instance_bits) == 0, "instance overflow", ("instance",i) ); + } explicit object_id( uint64_t i ):instance(i) { - FC_ASSERT( (i >> 48) == 0 ); + FC_ASSERT( (i >> instance_bits) == 0, "instance overflow", ("instance",i) ); + } + explicit object_id( const object_id_type& id ):instance(id.instance()) + { + // Won't overflow, but need to check space and type + FC_ASSERT( id.is>(), "space or type mismatch" ); } - object_id( object_id_type id ):instance(id.instance()) + + object_id& operator=( const object_id_type& o ) { + *this = object_id(o); + return *this; } - friend object_id operator+(const object_id a, int64_t delta ) { return object_id( uint64_t(a.instance.value+delta) ); } + friend object_id operator+(const object_id& a, int64_t delta ) + { return object_id( uint64_t(a.instance.value+delta) ); } - operator object_id_type()const { return object_id_type( SpaceID, TypeID, instance.value ); } + explicit operator object_id_type()const { return object_id_type( SpaceID, TypeID, instance.value ); } explicit operator uint64_t()const { return object_id_type( *this ).number; } template @@ -142,17 +157,19 @@ namespace graphene { namespace db { friend bool operator == ( const fc::unsigned_int& a, const object_id& b ) { return a == b.instance; } friend bool operator != ( const fc::unsigned_int& a, const object_id& b ) { return a != b.instance; } - friend bool operator < ( const object_id& a, const object_id& b ) { return a.instance.value < b.instance.value; } - friend bool operator > ( const object_id& a, const object_id& b ) { return a.instance.value > b.instance.value; } + friend bool operator < ( const object_id& a, const object_id& b ) + { return a.instance.value < b.instance.value; } + friend bool operator > ( const object_id& a, const object_id& b ) + { return a.instance.value > b.instance.value; } - friend size_t hash_value( object_id v ) { return std::hash()(v.instance.value); } + friend size_t hash_value( const object_id& v ) { return std::hash()(v.instance.value); } explicit operator std::string() const { return fc::to_string(space_id) + "." + fc::to_string(type_id) + "." + fc::to_string(instance.value); } - unsigned_int instance; + fc::unsigned_int instance; // default is 0 }; } } // graphene::db @@ -166,7 +183,8 @@ struct get_typename> { static const char* name() { return typeid(get_typename).name(); - static std::string _str = string("graphene::db::object_id<")+fc::to_string(SpaceID) + ":" + fc::to_string(TypeID)+">"; + static std::string _str = string("graphene::db::object_id<") + fc::to_string(SpaceID) + ":" + + fc::to_string(TypeID) + ">"; return _str.c_str(); } }; @@ -174,8 +192,8 @@ struct get_typename> template struct reflector > { - typedef graphene::db::object_id type; - typedef std::true_type is_defined; + using type = graphene::db::object_id; + using is_defined = std::true_type; using native_members = typelist::list>; using inherited_members = typelist::list<>; using members = native_members; @@ -187,7 +205,7 @@ struct reflector > template static inline void visit( const Visitor& visitor ) { - typedef decltype(((type*)nullptr)->instance) member_type; + using member_type = decltype(((type*)nullptr)->instance); visitor.TEMPLATE operator()( "instance" ); } }; @@ -204,19 +222,20 @@ struct member_name, 0> { static constexpr const cha inline void from_variant( const fc::variant& var, graphene::db::object_id_type& vo, uint32_t max_depth = 1 ) { try { - vo.number = 0; const auto& s = var.get_string(); auto first_dot = s.find('.'); + FC_ASSERT( first_dot != std::string::npos, "Missing the first dot" ); + FC_ASSERT( first_dot != 0, "Missing the space part" ); auto second_dot = s.find('.',first_dot+1); - FC_ASSERT( first_dot != second_dot ); - FC_ASSERT( first_dot != 0 && first_dot != std::string::npos ); - vo.number = fc::to_uint64(s.substr( second_dot+1 )); - FC_ASSERT( vo.number <= GRAPHENE_DB_MAX_INSTANCE_ID ); + FC_ASSERT( second_dot != std::string::npos, "Missing the second dot" ); + FC_ASSERT( second_dot != first_dot+1, "Missing the type part" ); + auto instance = fc::to_uint64(s.substr( second_dot+1 )); + FC_ASSERT( instance <= vo.max_instance, "instance overflow" ); auto space_id = fc::to_uint64( s.substr( 0, first_dot ) ); - FC_ASSERT( space_id <= 0xff ); - auto type_id = fc::to_uint64( s.substr( first_dot+1, second_dot-first_dot-1 ) ); - FC_ASSERT( type_id <= 0xff ); - vo.number |= (space_id << 56) | (type_id << 48); + FC_ASSERT( space_id <= vo.one_byte_mask, "space overflow" ); + auto type_id = fc::to_uint64( s.substr( first_dot+1, (second_dot-first_dot)-1 ) ); + FC_ASSERT( type_id <= vo.one_byte_mask, "type overflow"); + vo.reset( space_id, type_id, instance ); } FC_CAPTURE_AND_RETHROW( (var) ) } template void to_variant( const graphene::db::object_id& var, fc::variant& vo, uint32_t max_depth = 1 ) @@ -228,12 +247,15 @@ struct member_name, 0> { static constexpr const cha { try { const auto& s = var.get_string(); auto first_dot = s.find('.'); + FC_ASSERT( first_dot != std::string::npos, "Missing the first dot" ); + FC_ASSERT( first_dot != 0, "Missing the space part" ); auto second_dot = s.find('.',first_dot+1); - FC_ASSERT( first_dot != second_dot ); - FC_ASSERT( first_dot != 0 && first_dot != std::string::npos ); + FC_ASSERT( second_dot != std::string::npos, "Missing the second dot" ); + FC_ASSERT( second_dot != first_dot+1, "Missing the type part" ); FC_ASSERT( fc::to_uint64( s.substr( 0, first_dot ) ) == SpaceID && - fc::to_uint64( s.substr( first_dot+1, second_dot-first_dot-1 ) ) == TypeID, - "Space.Type.0 (${SpaceID}.${TypeID}.0) doesn't match expected value ${var}", ("TypeID",TypeID)("SpaceID",SpaceID)("var",var) ); + fc::to_uint64( s.substr( first_dot+1, (second_dot-first_dot)-1 ) ) == TypeID, + "Space.Type.0 (${SpaceID}.${TypeID}.0) doesn't match expected value ${var}", + ("TypeID",TypeID)("SpaceID",SpaceID)("var",var) ); vo.instance = fc::to_uint64(s.substr( second_dot+1 )); } FC_CAPTURE_AND_RETHROW( (var) ) } diff --git a/libraries/protocol/include/graphene/protocol/vote.hpp b/libraries/protocol/include/graphene/protocol/vote.hpp index de1e4bce86..cc52d08bd7 100644 --- a/libraries/protocol/include/graphene/protocol/vote.hpp +++ b/libraries/protocol/include/graphene/protocol/vote.hpp @@ -50,8 +50,9 @@ namespace graphene { namespace protocol { */ struct vote_id_type { - /// Lower 8 bits are type; upper 24 bits are instance - uint32_t content; + /// Lower 8 bits are type; upper 24 bits are instance. + /// By default type and instance are both set to 0. + uint32_t content = 0; friend size_t hash_value( vote_id_type v ) { return std::hash()(v.content); } enum vote_type @@ -62,10 +63,9 @@ struct vote_id_type VOTE_TYPE_COUNT }; - /// Default constructor. Sets type and instance to 0 - vote_id_type():content(0){} + vote_id_type() = default; /// Construct this vote_id_type with provided type and instance - vote_id_type(vote_type type, uint32_t instance = 0) + explicit vote_id_type(vote_type type, uint32_t instance = 0) : content(instance<<8 | type) {} /// Construct this vote_id_type from a serial string in the form "type:instance" diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index f19a0ef4d9..14a50e9d4e 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -279,7 +279,7 @@ class wallet_api * @param htlc_id the id of the HTLC object. * @returns the information about the HTLC object */ - fc::optional get_htlc(string htlc_id) const; + fc::optional get_htlc(const htlc_id_type& htlc_id) const; /** Lookup the id of a named account. * @param account_name_or_id the name or ID of the account to look up @@ -1062,7 +1062,7 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction canceling the order */ - signed_transaction cancel_order(object_id_type order_id, bool broadcast = false); + signed_transaction cancel_order(const limit_order_id_type& order_id, bool broadcast = false); /** Creates a new user-issued or market-issued asset. * @@ -1477,9 +1477,10 @@ class wallet_api * @param broadcast true if you wish to broadcast the transaction * @return the signed transaction */ - signed_transaction htlc_create( string source, string destination, string amount, string asset_symbol_or_id, - string hash_algorithm, const std::string& preimage_hash, uint32_t preimage_size, - const uint32_t claim_period_seconds, const std::string& memo, bool broadcast = false ); + signed_transaction htlc_create( const string& source, const string& destination, + const string& amount, const string& asset_symbol_or_id, const string& hash_algorithm, + const string& preimage_hash, uint32_t preimage_size, + uint32_t claim_period_seconds, const string& memo, bool broadcast = false); /**** * Update a hashed time lock contract @@ -1489,7 +1490,7 @@ class wallet_api * @param preimage the preimage that should evaluate to the preimage_hash * @return the signed transaction */ - signed_transaction htlc_redeem( string htlc_id, string issuer, const std::string& preimage, + signed_transaction htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, const std::string& preimage, bool broadcast = false ); /***** @@ -1501,7 +1502,7 @@ class wallet_api * @param broadcast true to broadcast to the network * @return the signed transaction */ - signed_transaction htlc_extend(string htlc_id, string issuer, const uint32_t seconds_to_add, + signed_transaction htlc_extend( const htlc_id_type& htlc_id, const string& issuer, uint32_t seconds_to_add, bool broadcast = false); /** diff --git a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp index 4c1a19fb7d..7d5b33dff9 100644 --- a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp @@ -166,7 +166,7 @@ struct wallet_data bool update_account(const account_object& acct) { auto& idx = my_accounts.get(); - auto itr = idx.find(acct.get_id()); + auto itr = idx.find(acct.id); if( itr != idx.end() ) { idx.replace(itr, acct); diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 5ae5507406..42d7cdca6d 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -199,15 +199,16 @@ uint64_t wallet_api::get_asset_count()const return my->_remote_db->get_asset_count(); } -signed_transaction wallet_api::htlc_create( string source, string destination, string amount, string asset_symbol, - string hash_algorithm, const std::string& preimage_hash, uint32_t preimage_size, - const uint32_t claim_period_seconds, const std::string& memo, bool broadcast) +signed_transaction wallet_api::htlc_create( const string& source, const string& destination, + const string& amount, const string& asset_symbol, const string& hash_algorithm, + const string& preimage_hash, uint32_t preimage_size, + uint32_t claim_period_seconds, const string& memo, bool broadcast ) { return my->htlc_create(source, destination, amount, asset_symbol, hash_algorithm, preimage_hash, preimage_size, claim_period_seconds, memo, broadcast); } -fc::optional wallet_api::get_htlc(std::string htlc_id) const +fc::optional wallet_api::get_htlc(const htlc_id_type& htlc_id) const { fc::optional optional_obj = my->get_htlc(htlc_id); if ( optional_obj.valid() ) @@ -263,15 +264,15 @@ fc::optional wallet_api::get_htlc(std::string htlc_id) const return fc::optional(); } -signed_transaction wallet_api::htlc_redeem( std::string htlc_id, std::string issuer, const std::string& preimage, - bool broadcast) +signed_transaction wallet_api::htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, + const string& preimage, bool broadcast) { return my->htlc_redeem(htlc_id, issuer, std::vector(preimage.begin(), preimage.end()), broadcast); } -signed_transaction wallet_api::htlc_extend ( std::string htlc_id, std::string issuer, const uint32_t seconds_to_add, - bool broadcast) +signed_transaction wallet_api::htlc_extend( const htlc_id_type& htlc_id, const string& issuer, + uint32_t seconds_to_add, bool broadcast) { return my->htlc_extend(htlc_id, issuer, seconds_to_add, broadcast); } @@ -1327,7 +1328,7 @@ signed_transaction wallet_api::borrow_asset_ext( string seller_name, string amou amount_of_collateral, extensions, broadcast); } -signed_transaction wallet_api::cancel_order(object_id_type order_id, bool broadcast) +signed_transaction wallet_api::cancel_order(const limit_order_id_type& order_id, bool broadcast) { FC_ASSERT(!is_locked()); return my->cancel_order(order_id, broadcast); diff --git a/libraries/wallet/wallet_account.cpp b/libraries/wallet/wallet_account.cpp index cb1b782e72..cd17b08bec 100644 --- a/libraries/wallet/wallet_account.cpp +++ b/libraries/wallet/wallet_account.cpp @@ -61,7 +61,7 @@ namespace graphene { namespace wallet { namespace detail { this->get_account( registrar_account ); FC_ASSERT( registrar_account_object.is_lifetime_member() ); - account_id_type registrar_account_id = registrar_account_object.id; + account_id_type registrar_account_id = registrar_account_object.get_id(); account_object referrer_account_object = this->get_account( referrer_account ); @@ -122,7 +122,7 @@ namespace graphene { namespace wallet { namespace detail { { FC_ASSERT( !self.is_locked() ); - account_id_type account_id = get_account(account).id; + account_id_type account_id = get_account(account).get_id(); custom_operation op; account_storage_map store; @@ -241,7 +241,7 @@ namespace graphene { namespace wallet { namespace detail { account_object registrar_account_object = get_account( registrar_account ); - account_id_type registrar_account_id = registrar_account_object.id; + account_id_type registrar_account_id = registrar_account_object.get_id(); account_object referrer_account_object = get_account( referrer_account ); account_create_op.referrer = referrer_account_object.id; @@ -329,7 +329,7 @@ namespace graphene { namespace wallet { namespace detail { { if( has_wildcard ) continue; - for( const public_key_type& pub : _wallet.extra_keys[ claimer.id ] ) + for( const public_key_type& pub : _wallet.extra_keys[ claimer.get_id() ] ) { addrs.push_back( address(pub) ); auto it = _keys.find( pub ); diff --git a/libraries/wallet/wallet_api_impl.cpp b/libraries/wallet/wallet_api_impl.cpp index c246e3b22a..aadb2da8ce 100644 --- a/libraries/wallet/wallet_api_impl.cpp +++ b/libraries/wallet/wallet_api_impl.cpp @@ -341,7 +341,7 @@ namespace graphene { namespace wallet { namespace detail { for( const fc::optional& optional_account : owner_account_objects ) if (optional_account) { - std::string account_id = account_id_to_string(optional_account->id); + std::string account_id = account_id_to_string(optional_account->get_id()); fc::optional witness_obj = _remote_db->get_witness_by_account(account_id); if (witness_obj) claim_registered_witness(optional_account->name); @@ -386,7 +386,7 @@ namespace graphene { namespace wallet { namespace detail { { assert( it != _wallet.my_accounts.end() ); old_accounts.push_back( *it ); - std::string account_id = account_id_to_string(old_accounts.back().id); + std::string account_id = account_id_to_string(old_accounts.back().get_id()); account_ids_to_send.push_back( account_id ); ++it; } diff --git a/libraries/wallet/wallet_api_impl.hpp b/libraries/wallet/wallet_api_impl.hpp index 8964c9576e..7e53071869 100644 --- a/libraries/wallet/wallet_api_impl.hpp +++ b/libraries/wallet/wallet_api_impl.hpp @@ -126,10 +126,10 @@ class wallet_api_impl bool is_locked()const; template - graphene::db::object_downcast_t get_object(ID id)const + graphene::db::object_downcast_t get_object(const ID& id)const { - auto ob = _remote_db->get_objects({id}, {}).front(); - return ob.template as>( GRAPHENE_MAX_NESTED_OBJECTS ); + auto ob = _remote_db->get_objects({object_id_type(id)}, {}).front(); + return ob.template as>( GRAPHENE_MAX_NESTED_OBJECTS ); } /*** @@ -167,7 +167,7 @@ class wallet_api_impl extended_asset_object get_asset(string asset_symbol_or_id)const; - fc::optional get_htlc(string htlc_id) const; + fc::optional get_htlc(const htlc_id_type& htlc_id) const; asset_id_type get_asset_id(const string& asset_symbol_or_id) const; @@ -297,14 +297,16 @@ class wallet_api_impl signed_transaction update_worker_votes( string account, worker_vote_delta delta, bool broadcast ); - signed_transaction htlc_create( string source, string destination, string amount, string asset_symbol, - string hash_algorithm, const std::string& preimage_hash, uint32_t preimage_size, - const uint32_t claim_period_seconds, const std::string& memo, bool broadcast = false ); + signed_transaction htlc_create( const string& source, const string& destination, + const string& amount, const string& asset_symbol, const string& hash_algorithm, + const string& preimage_hash, uint32_t preimage_size, + uint32_t claim_period_seconds, const string& memo, bool broadcast = false); - signed_transaction htlc_redeem( string htlc_id, string issuer, const std::vector& preimage, - bool broadcast ); + signed_transaction htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, + const std::vector& preimage, bool broadcast ); - signed_transaction htlc_extend ( string htlc_id, string issuer, const uint32_t seconds_to_add, bool broadcast); + signed_transaction htlc_extend( const htlc_id_type& htlc_id, const string& issuer, + uint32_t seconds_to_add, bool broadcast); signed_transaction account_store_map(string account, string catalog, bool remove, flat_map> key_values, bool broadcast); @@ -360,7 +362,7 @@ class wallet_api_impl string amount_of_collateral, call_order_update_operation::extensions_type extensions, bool broadcast = false); - signed_transaction cancel_order(limit_order_id_type order_id, bool broadcast = false); + signed_transaction cancel_order(const limit_order_id_type& order_id, bool broadcast = false); signed_transaction transfer(string from, string to, string amount, string asset_symbol, string memo, bool broadcast = false); diff --git a/libraries/wallet/wallet_asset.cpp b/libraries/wallet/wallet_asset.cpp index 80bd89e2c1..d1d9046ec9 100644 --- a/libraries/wallet/wallet_asset.cpp +++ b/libraries/wallet/wallet_asset.cpp @@ -82,7 +82,7 @@ namespace graphene { namespace wallet { namespace detail { return fc::variant(asset_symbol_or_id, 1).as( 1 ); opt_asset = _remote_db->lookup_asset_symbols( {asset_symbol_or_id} ); FC_ASSERT( (opt_asset.size() > 0) && (opt_asset[0].valid()) ); - return opt_asset[0]->id; + return opt_asset[0]->get_id(); } signed_transaction wallet_api_impl::create_asset(string issuer, string symbol, diff --git a/libraries/wallet/wallet_sign.cpp b/libraries/wallet/wallet_sign.cpp index 7ce982e622..3b24a691c8 100644 --- a/libraries/wallet/wallet_sign.cpp +++ b/libraries/wallet/wallet_sign.cpp @@ -442,7 +442,7 @@ namespace graphene { namespace wallet { namespace detail { _wallet.update_account(account); - _wallet.extra_keys[account.id].insert(wif_pub_key); + _wallet.extra_keys[account.get_id()].insert(wif_pub_key); return all_keys_for_account.find(wif_pub_key) != all_keys_for_account.end(); } @@ -496,13 +496,13 @@ namespace graphene { namespace wallet { namespace detail { get_object( update_op.proposal ); for( const std::string& name : delta.active_approvals_to_add ) - update_op.active_approvals_to_add.insert( get_account( name ).id ); + update_op.active_approvals_to_add.insert( get_account( name ).get_id() ); for( const std::string& name : delta.active_approvals_to_remove ) - update_op.active_approvals_to_remove.insert( get_account( name ).id ); + update_op.active_approvals_to_remove.insert( get_account( name ).get_id() ); for( const std::string& name : delta.owner_approvals_to_add ) - update_op.owner_approvals_to_add.insert( get_account( name ).id ); + update_op.owner_approvals_to_add.insert( get_account( name ).get_id() ); for( const std::string& name : delta.owner_approvals_to_remove ) - update_op.owner_approvals_to_remove.insert( get_account( name ).id ); + update_op.owner_approvals_to_remove.insert( get_account( name ).get_id() ); for( const std::string& k : delta.key_approvals_to_add ) update_op.key_approvals_to_add.insert( public_key_type( k ) ); for( const std::string& k : delta.key_approvals_to_remove ) diff --git a/libraries/wallet/wallet_transfer.cpp b/libraries/wallet/wallet_transfer.cpp index 524764500e..c157b29433 100644 --- a/libraries/wallet/wallet_transfer.cpp +++ b/libraries/wallet/wallet_transfer.cpp @@ -57,8 +57,8 @@ namespace graphene { namespace wallet { namespace detail { account_object from_account = get_account(from); account_object to_account = get_account(to); - account_id_type from_id = from_account.id; - account_id_type to_id = to_account.id; + account_id_type from_id = from_account.get_id(); + account_id_type to_id = to_account.get_id(); transfer_operation xfer_op; @@ -83,9 +83,10 @@ namespace graphene { namespace wallet { namespace detail { return sign_transaction(tx, broadcast); } FC_CAPTURE_AND_RETHROW( (from)(to)(amount)(asset_symbol)(memo)(broadcast) ) } - signed_transaction wallet_api_impl::htlc_create( string source, string destination, string amount, - string asset_symbol, string hash_algorithm, const std::string& preimage_hash, uint32_t preimage_size, - const uint32_t claim_period_seconds, const std::string& memo, bool broadcast ) + signed_transaction wallet_api_impl::htlc_create( const string& source, const string& destination, + const string& amount, const string& asset_symbol, const string& hash_algorithm, + const string& preimage_hash, uint32_t preimage_size, + uint32_t claim_period_seconds, const string& memo, bool broadcast ) { try { @@ -122,7 +123,7 @@ namespace graphene { namespace wallet { namespace detail { (preimage_hash)(preimage_size)(claim_period_seconds)(broadcast) ) } - signed_transaction wallet_api_impl::htlc_redeem( string htlc_id, string issuer, + signed_transaction wallet_api_impl::htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, const std::vector& preimage, bool broadcast ) { try @@ -147,8 +148,8 @@ namespace graphene { namespace wallet { namespace detail { } FC_CAPTURE_AND_RETHROW( (htlc_id)(issuer)(preimage)(broadcast) ) } - signed_transaction wallet_api_impl::htlc_extend ( string htlc_id, string issuer, const uint32_t seconds_to_add, - bool broadcast) + signed_transaction wallet_api_impl::htlc_extend ( const htlc_id_type& htlc_id, const string& issuer, + uint32_t seconds_to_add, bool broadcast) { try { @@ -172,11 +173,9 @@ namespace graphene { namespace wallet { namespace detail { } FC_CAPTURE_AND_RETHROW( (htlc_id)(issuer)(seconds_to_add)(broadcast) ) } - fc::optional wallet_api_impl::get_htlc(string htlc_id) const + fc::optional wallet_api_impl::get_htlc(const htlc_id_type& htlc_id) const { - htlc_id_type id; - fc::from_variant(htlc_id, id); - auto obj = _remote_db->get_objects( { id }, {}).front(); + auto obj = _remote_db->get_objects( { object_id_type(htlc_id) }, {}).front(); if ( !obj.is_null() ) { return fc::optional(obj.template as(GRAPHENE_MAX_NESTED_OBJECTS)); @@ -236,7 +235,7 @@ namespace graphene { namespace wallet { namespace detail { return sign_transaction(trx, broadcast); } - signed_transaction wallet_api_impl::cancel_order(limit_order_id_type order_id, bool broadcast ) + signed_transaction wallet_api_impl::cancel_order(const limit_order_id_type& order_id, bool broadcast ) { try { FC_ASSERT(!is_locked()); signed_transaction trx; diff --git a/libraries/wallet/wallet_voting.cpp b/libraries/wallet/wallet_voting.cpp index 1dc911e22b..a357d02fe6 100644 --- a/libraries/wallet/wallet_voting.cpp +++ b/libraries/wallet/wallet_voting.cpp @@ -70,7 +70,7 @@ namespace graphene { namespace wallet { namespace detail { vector< object_id_type > query_ids; for( const worker_id_type& wid : merged ) - query_ids.push_back( wid ); + query_ids.push_back( object_id_type(wid) ); flat_set new_votes( acct.options.votes ); @@ -78,15 +78,16 @@ namespace graphene { namespace wallet { namespace detail { for( const variant& obj : objects ) { worker_object wo; + worker_id_type wo_id { wo.id }; from_variant( obj, wo, GRAPHENE_MAX_NESTED_OBJECTS ); new_votes.erase( wo.vote_for ); new_votes.erase( wo.vote_against ); - if( delta.vote_for.find( wo.id ) != delta.vote_for.end() ) + if( delta.vote_for.find( wo_id ) != delta.vote_for.end() ) new_votes.insert( wo.vote_for ); - else if( delta.vote_against.find( wo.id ) != delta.vote_against.end() ) + else if( delta.vote_against.find( wo_id ) != delta.vote_against.end() ) new_votes.insert( wo.vote_against ); else - assert( delta.vote_abstain.find( wo.id ) != delta.vote_abstain.end() ); + assert( delta.vote_abstain.find( wo_id ) != delta.vote_abstain.end() ); } account_update_operation update_op; diff --git a/tests/app/main.cpp b/tests/app/main.cpp index f5e937250f..9da9b1449f 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -306,7 +306,8 @@ BOOST_AUTO_TEST_CASE( three_node_network ) BOOST_TEST_MESSAGE( "Creating transfer tx" ); graphene::chain::precomputable_transaction trx; { - account_id_type nathan_id = db2->get_index_type().indices().get().find( "nathan" )->id; + account_id_type nathan_id = db2->get_index_type().indices().get().find( "nathan" ) + ->get_id(); fc::ecc::private_key nathan_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan"))); balance_claim_operation claim_op; diff --git a/tests/cli/main.cpp b/tests/cli/main.cpp index 7e22d184d0..41b3b632c8 100644 --- a/tests/cli/main.cpp +++ b/tests/cli/main.cpp @@ -665,7 +665,7 @@ BOOST_FIXTURE_TEST_CASE( mpa_tests, cli_fixture ) { // Play with asset fee pool - auto objs = con.wallet_api_ptr->get_object( bobcoin.dynamic_asset_data_id ) + auto objs = con.wallet_api_ptr->get_object( object_id_type( bobcoin.dynamic_asset_data_id ) ) .as>( FC_PACK_MAX_DEPTH ); idump( (objs) ); BOOST_REQUIRE_EQUAL( objs.size(), 1u ); @@ -675,7 +675,7 @@ BOOST_FIXTURE_TEST_CASE( mpa_tests, cli_fixture ) BOOST_TEST_MESSAGE("Fund fee pool"); con.wallet_api_ptr->fund_asset_fee_pool("nathan", "BOBCOIN", "2", true); - objs = con.wallet_api_ptr->get_object( bobcoin.dynamic_asset_data_id ) + objs = con.wallet_api_ptr->get_object( object_id_type( bobcoin.dynamic_asset_data_id ) ) .as>( FC_PACK_MAX_DEPTH ); BOOST_REQUIRE_EQUAL( objs.size(), 1u ); bobcoin_dyn = objs[0]; @@ -687,7 +687,7 @@ BOOST_FIXTURE_TEST_CASE( mpa_tests, cli_fixture ) BOOST_TEST_MESSAGE("Claim fee pool"); con.wallet_api_ptr->claim_asset_fee_pool("BOBCOIN", "1", true); - objs = con.wallet_api_ptr->get_object( bobcoin.dynamic_asset_data_id ) + objs = con.wallet_api_ptr->get_object( object_id_type( bobcoin.dynamic_asset_data_id ) ) .as>( FC_PACK_MAX_DEPTH ); BOOST_REQUIRE_EQUAL( objs.size(), 1u ); bobcoin_dyn = objs[0]; @@ -708,7 +708,7 @@ BOOST_FIXTURE_TEST_CASE( mpa_tests, cli_fixture ) asset_update_feed_producers_operation aufp_op; aufp_op.issuer = nathan_acct.id; aufp_op.asset_to_update = bobcoin.id; - aufp_op.new_feed_producers = { nathan_acct.id }; + aufp_op.new_feed_producers = { nathan_acct.get_id() }; con.wallet_api_ptr->add_operation_to_builder_transaction( handle, aufp_op ); con.wallet_api_ptr->set_fees_on_builder_transaction( handle, "1.3.0" ); con.wallet_api_ptr->sign_builder_transaction( handle, true ); @@ -725,8 +725,8 @@ BOOST_FIXTURE_TEST_CASE( mpa_tests, cli_fixture ) // Publish price feed BOOST_TEST_MESSAGE("Publish price feed"); price_feed feed; - feed.settlement_price = price( asset(1,bobcoin.id), asset(2) ); - feed.core_exchange_rate = price( asset(1,bobcoin.id), asset(1) ); + feed.settlement_price = price( asset(1,bobcoin.get_id()), asset(2) ); + feed.core_exchange_rate = price( asset(1,bobcoin.get_id()), asset(1) ); con.wallet_api_ptr->publish_asset_feed( "nathan", "BOBCOIN", feed, true ); asset_bitasset_data_object bob_bitasset = con.wallet_api_ptr->get_bitasset_data( "BOBCOIN" ); BOOST_CHECK( bob_bitasset.current_feed.settlement_price == feed.settlement_price ); @@ -815,7 +815,7 @@ BOOST_FIXTURE_TEST_CASE( mpa_tests, cli_fixture ) orders = con.wallet_api_ptr->get_limit_orders( "BOBCOIN", "1.3.0", 10 ); BOOST_REQUIRE_EQUAL( orders.size(), 1u ); BOOST_CHECK_EQUAL( orders.front().for_sale.value, 100 * GRAPHENE_BLOCKCHAIN_PRECISION ); - limit_order_id_type nathan_order_id = orders.front().id; + limit_order_id_type nathan_order_id = orders.front().get_id(); BOOST_CHECK(generate_block(app1)); check_nathan_bobcoin_balance( 3000 ); @@ -944,7 +944,7 @@ BOOST_FIXTURE_TEST_CASE( cli_get_signed_transaction_signers, cli_fixture ) const auto &test_acc = con.wallet_api_ptr->get_account("test"); flat_set expected_signers = {test_bki.pub_key}; - vector > expected_key_refs{{test_acc.id, test_acc.id}}; + vector > expected_key_refs{{test_acc.get_id(), test_acc.get_id()}}; auto signers = con.wallet_api_ptr->get_transaction_signers(signed_trx); BOOST_CHECK(signers == expected_signers); @@ -1116,7 +1116,7 @@ BOOST_FIXTURE_TEST_CASE( cli_get_available_transaction_signers, cli_fixture ) vector > expected_key_refs; expected_key_refs.push_back(flat_set()); expected_key_refs.push_back(flat_set()); - expected_key_refs.push_back({test_acc.id}); + expected_key_refs.push_back({test_acc.get_id()}); auto key_refs = con.wallet_api_ptr->get_key_references({expected_signers.begin(), expected_signers.end()}); std::sort(key_refs.begin(), key_refs.end()); @@ -1650,6 +1650,7 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc ) // normally, a wallet would watch block production, and find the transaction. Here, we can cheat: std::string alice_htlc_id_as_string; + htlc_id_type alice_htlc_id; { BOOST_TEST_MESSAGE("The system is generating a block"); graphene::chain::signed_block result_block; @@ -1657,14 +1658,15 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc ) // get the ID: auto tmp_hist = con.wallet_api_ptr->get_account_history("alice", 1); - htlc_id_type htlc_id = tmp_hist[0].op.result.get(); + htlc_id_type htlc_id { tmp_hist[0].op.result.get() }; + alice_htlc_id = htlc_id; alice_htlc_id_as_string = (std::string)(object_id_type)htlc_id; BOOST_TEST_MESSAGE("Alice shares the HTLC ID with Bob. The HTLC ID is: " + alice_htlc_id_as_string); } // Bob can now look over Alice's HTLC, to see if it is what was agreed to. BOOST_TEST_MESSAGE("Bob retrieves the HTLC Object by ID to examine it."); - auto alice_htlc = con.wallet_api_ptr->get_htlc(alice_htlc_id_as_string); + auto alice_htlc = con.wallet_api_ptr->get_htlc(alice_htlc_id); BOOST_TEST_MESSAGE("The HTLC Object is: " + fc::json::to_pretty_string(alice_htlc)); // Bob likes what he sees, so he creates an HTLC, using the info he retrieved from Alice's HTLC @@ -1673,6 +1675,7 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc ) // normally, a wallet would watch block production, and find the transaction. Here, we can cheat: std::string bob_htlc_id_as_string; + htlc_id_type bob_htlc_id; { BOOST_TEST_MESSAGE("The system is generating a block"); graphene::chain::signed_block result_block; @@ -1680,21 +1683,22 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc ) // get the ID: auto tmp_hist = con.wallet_api_ptr->get_account_history("bob", 1); - htlc_id_type htlc_id = tmp_hist[0].op.result.get(); + htlc_id_type htlc_id { tmp_hist[0].op.result.get() }; + bob_htlc_id = htlc_id; bob_htlc_id_as_string = (std::string)(object_id_type)htlc_id; BOOST_TEST_MESSAGE("Bob shares the HTLC ID with Alice. The HTLC ID is: " + bob_htlc_id_as_string); } // Alice can now look over Bob's HTLC, to see if it is what was agreed to: BOOST_TEST_MESSAGE("Alice retrieves the HTLC Object by ID to examine it."); - auto bob_htlc = con.wallet_api_ptr->get_htlc(bob_htlc_id_as_string); + auto bob_htlc = con.wallet_api_ptr->get_htlc(bob_htlc_id); BOOST_TEST_MESSAGE("The HTLC Object is: " + fc::json::to_pretty_string(bob_htlc)); // Alice likes what she sees, so uses her preimage to get her BOBCOIN { BOOST_TEST_MESSAGE("Alice uses her preimage to retrieve the BOBCOIN"); std::string secret = "My Secret"; - con.wallet_api_ptr->htlc_redeem(bob_htlc_id_as_string, "alice", secret, true); + con.wallet_api_ptr->htlc_redeem(bob_htlc_id, "alice", secret, true); BOOST_TEST_MESSAGE("The system is generating a block"); BOOST_CHECK(generate_block(app1)); } @@ -1704,7 +1708,7 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc ) { BOOST_TEST_MESSAGE("Bob uses Alice's preimage to retrieve the BOBCOIN"); std::string secret = "My Secret"; - con.wallet_api_ptr->htlc_redeem(alice_htlc_id_as_string, "bob", secret, true); + con.wallet_api_ptr->htlc_redeem(alice_htlc_id, "bob", secret, true); BOOST_TEST_MESSAGE("The system is generating a block"); BOOST_CHECK(generate_block(app1)); } @@ -2197,6 +2201,7 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc_bsip64 ) // normally, a wallet would watch block production, and find the transaction. Here, we can cheat: std::string alice_htlc_id_as_string; + htlc_id_type alice_htlc_id; { BOOST_TEST_MESSAGE("The system is generating a block"); graphene::chain::signed_block result_block; @@ -2204,14 +2209,15 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc_bsip64 ) // get the ID: auto tmp_hist = con.wallet_api_ptr->get_account_history("alice", 1); - htlc_id_type htlc_id = tmp_hist[0].op.result.get(); + htlc_id_type htlc_id { tmp_hist[0].op.result.get() }; + alice_htlc_id = htlc_id; alice_htlc_id_as_string = (std::string)(object_id_type)htlc_id; BOOST_TEST_MESSAGE("Alice shares the HTLC ID with Bob. The HTLC ID is: " + alice_htlc_id_as_string); } // Bob can now look over Alice's HTLC, to see if it is what was agreed to. BOOST_TEST_MESSAGE("Bob retrieves the HTLC Object by ID to examine it."); - auto alice_htlc = con.wallet_api_ptr->get_htlc(alice_htlc_id_as_string); + auto alice_htlc = con.wallet_api_ptr->get_htlc(alice_htlc_id); BOOST_TEST_MESSAGE("The HTLC Object is: " + fc::json::to_pretty_string(alice_htlc)); // Bob likes what he sees, so he creates an HTLC, using the info he retrieved from Alice's HTLC @@ -2221,6 +2227,7 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc_bsip64 ) // normally, a wallet would watch block production, and find the transaction. Here, we can cheat: std::string bob_htlc_id_as_string; + htlc_id_type bob_htlc_id; { BOOST_TEST_MESSAGE("The system is generating a block"); graphene::chain::signed_block result_block; @@ -2228,20 +2235,21 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc_bsip64 ) // get the ID: auto tmp_hist = con.wallet_api_ptr->get_account_history("bob", 1); - htlc_id_type htlc_id = tmp_hist[0].op.result.get(); + htlc_id_type htlc_id { tmp_hist[0].op.result.get() }; + bob_htlc_id = htlc_id; bob_htlc_id_as_string = (std::string)(object_id_type)htlc_id; BOOST_TEST_MESSAGE("Bob shares the HTLC ID with Alice. The HTLC ID is: " + bob_htlc_id_as_string); } // Alice can now look over Bob's HTLC, to see if it is what was agreed to: BOOST_TEST_MESSAGE("Alice retrieves the HTLC Object by ID to examine it."); - auto bob_htlc = con.wallet_api_ptr->get_htlc(bob_htlc_id_as_string); + auto bob_htlc = con.wallet_api_ptr->get_htlc(bob_htlc_id); BOOST_TEST_MESSAGE("The HTLC Object is: " + fc::json::to_pretty_string(bob_htlc)); // Alice likes what she sees, so uses her preimage to get her BOBCOIN { BOOST_TEST_MESSAGE("Alice uses her preimage to retrieve the BOBCOIN"); - con.wallet_api_ptr->htlc_redeem(bob_htlc_id_as_string, "alice", preimage_string, true); + con.wallet_api_ptr->htlc_redeem(bob_htlc_id, "alice", preimage_string, true); BOOST_TEST_MESSAGE("The system is generating a block"); BOOST_CHECK(generate_block(app1)); } @@ -2263,7 +2271,7 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc_bsip64 ) // Bob can use the preimage to retrieve his BTS { BOOST_TEST_MESSAGE("Bob uses Alice's preimage to retrieve the BOBCOIN"); - con.wallet_api_ptr->htlc_redeem(alice_htlc_id_as_string, "bob", preimage_string, true); + con.wallet_api_ptr->htlc_redeem(alice_htlc_id, "bob", preimage_string, true); BOOST_TEST_MESSAGE("The system is generating a block"); BOOST_CHECK(generate_block(app1)); } diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index fb0e11bde6..a863964479 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -595,7 +595,7 @@ void database_fixture_base::verify_asset_supplies( const database& db ) for( const asset_object& asset_obj : db.get_index_type().indices() ) { const auto& dasset_obj = asset_obj.dynamic_asset_data_id(db); - total_balances[asset_obj.id] += dasset_obj.accumulated_fees; + total_balances[asset_obj.get_id()] += dasset_obj.accumulated_fees; total_balances[asset_id_type()] += dasset_obj.fee_pool; if( asset_obj.is_market_issued() ) { @@ -606,7 +606,7 @@ void database_fixture_base::verify_asset_supplies( const database& db ) if( !bad.has_settlement() ) // Note: if asset has been globally settled, do not check total debt total_debts[bad.asset_id] += bad.individual_settlement_debt; } - total_balances[asset_obj.id] += dasset_obj.confidential_supply.value; + total_balances[asset_obj.get_id()] += dasset_obj.confidential_supply.value; } for( const vesting_balance_object& vbo : db.get_index_type< vesting_balance_index >().indices() ) total_balances[ vbo.balance.asset_id ] += vbo.balance.amount; @@ -686,7 +686,8 @@ void database_fixture_base::verify_asset_supplies( const database& db ) for( const asset_object& asset_obj : db.get_index_type().indices() ) { - BOOST_CHECK_EQUAL(total_balances[asset_obj.id].value, asset_obj.dynamic_asset_data_id(db).current_supply.value); + BOOST_CHECK_EQUAL( total_balances[asset_obj.get_id()].value, + asset_obj.dynamic_asset_data_id(db).current_supply.value ); } BOOST_CHECK_EQUAL( core_in_orders.value , reported_core_in_orders.value ); @@ -1360,7 +1361,7 @@ const call_order_object* database_fixture_base::borrow( const account_object& wh verify_asset_supplies(db); auto& call_idx = db.get_index_type().indices().get(); - auto itr = call_idx.find( boost::make_tuple(who.id, what.asset_id) ); + auto itr = call_idx.find( boost::make_tuple(who.get_id(), what.asset_id) ); const call_order_object* call_obj = nullptr; if( itr != call_idx.end() ) @@ -2225,7 +2226,7 @@ void database_fixture_base::set_htlc_committee_parameters() trx.operations.push_back(cop); graphene::chain::processed_transaction proc_trx = db.push_transaction(trx); trx.clear(); - proposal_id_type good_proposal_id = proc_trx.operation_results[0].get(); + proposal_id_type good_proposal_id { proc_trx.operation_results[0].get() }; proposal_update_operation puo; puo.proposal = good_proposal_id; diff --git a/tests/common/database_fixture.hpp b/tests/common/database_fixture.hpp index bf88a7bc83..340031b38e 100644 --- a/tests/common/database_fixture.hpp +++ b/tests/common/database_fixture.hpp @@ -172,12 +172,12 @@ extern uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP; #define ACTOR(name) \ PREP_ACTOR(name) \ const auto name = create_account(BOOST_PP_STRINGIZE(name), name ## _public_key); \ - graphene::chain::account_id_type name ## _id = name.id; (void)name ## _id; + graphene::chain::account_id_type name ## _id = name.get_id(); (void)name ## _id; #define GET_ACTOR(name) \ fc::ecc::private_key name ## _private_key = generate_private_key(BOOST_PP_STRINGIZE(name)); \ const account_object& name = get_account(BOOST_PP_STRINGIZE(name)); \ - graphene::chain::account_id_type name ## _id = name.id; \ + graphene::chain::account_id_type name ## _id = name.get_id(); \ (void)name ##_id #define ACTORS_IMPL(r, data, elem) ACTOR(elem) diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 94e64233a2..6fb4fbb266 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -144,12 +144,12 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { asset_id_type core_id; const asset_object& usd = create_user_issued_asset( "MYUSD" ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( sam, usd.amount(init_amount) ); issue_uia( ted, usd.amount(init_amount) ); const asset_object& eur = create_user_issued_asset( "MYEUR", sam, white_list ); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); issue_uia( sam, eur.amount(init_amount) ); issue_uia( ted, eur.amount(init_amount) ); @@ -158,8 +158,8 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { flat_map collateral_map; collateral_map[usd_id] = price( asset(1), asset(1, usd_id) ); - credit_offer_create_operation cop = make_credit_offer_create_op( sam_id, core.id, 10000, 100, 3600, 0, - false, db.head_block_time() + fc::days(1), collateral_map, {} ); + credit_offer_create_operation cop = make_credit_offer_create_op( sam_id, core.get_id(), 10000, 100, 3600, + 0, false, db.head_block_time() + fc::days(1), collateral_map, {} ); propose( cop ); } @@ -170,7 +170,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { flat_map collateral_map1; collateral_map1[usd_id] = price( asset(1), asset(2, usd_id) ); - const credit_offer_object& coo1 = create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + const credit_offer_object& coo1 = create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, collateral_map1, {} ); BOOST_CHECK( coo1.owner_account == sam_id ); @@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { if(delete_objects) { // all records deleted // asset and bitasset - asset_id_type usd_id = create_bitasset("USD", account_id_type()).id; + asset_id_type usd_id = create_bitasset("USD", account_id_type()).get_id(); generate_block(); string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; @@ -382,11 +382,11 @@ BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { create_bitasset("USD", account_id_type()); // create op 0 const account_object& dan = create_account("dan"); // create op 1 - create_bitasset("CNY", dan.id); // create op 2 + create_bitasset("CNY", dan.get_id()); // create op 2 create_bitasset("BTC", account_id_type()); // create op 3 - create_bitasset("XMR", dan.id); // create op 4 + create_bitasset("XMR", dan.get_id()); // create op 4 create_bitasset("EUR", account_id_type()); // create op 5 - create_bitasset("OIL", dan.id); // create op 6 + create_bitasset("OIL", dan.get_id()); // create op 6 generate_block(); diff --git a/tests/tests/api_limit_tests.cpp b/tests/tests/api_limit_tests.cpp index 3dd518cf9c..40baf71808 100644 --- a/tests/tests/api_limit_tests.cpp +++ b/tests/tests/api_limit_tests.cpp @@ -138,7 +138,7 @@ BOOST_AUTO_TEST_CASE( api_limit_get_limit_orders ){ create_bitasset("USD", account_id_type()); create_account("dan"); create_account("bob"); - asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").get_id(); generate_block(); fc::usleep(fc::milliseconds(100)); GRAPHENE_CHECK_THROW(db_api.get_limit_orders(std::string(static_cast(asset_id_type())), @@ -176,10 +176,10 @@ BOOST_AUTO_TEST_CASE( api_limit_get_call_orders ){ graphene::app::database_api db_api( db, &( app.get_options() )); //account_id_type() do 3 ops auto nathan_private_key = generate_private_key("nathan"); - account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).id; + account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).get_id(); transfer(account_id_type(), nathan_id, asset(100)); asset_id_type bitusd_id = create_bitasset( - "USDBIT", nathan_id, 100, disable_force_settle).id; + "USDBIT", nathan_id, 100, disable_force_settle).get_id(); generate_block(); fc::usleep(fc::milliseconds(100)); BOOST_CHECK( bitusd_id(db).is_market_issued() ); @@ -198,10 +198,10 @@ BOOST_AUTO_TEST_CASE( api_limit_get_settle_orders ){ graphene::app::database_api db_api( db, &( app.get_options() )); //account_id_type() do 3 ops auto nathan_private_key = generate_private_key("nathan"); - account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).id; + account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).get_id(); transfer(account_id_type(), nathan_id, asset(100)); asset_id_type bitusd_id = create_bitasset( - "USDBIT", nathan_id, 100, disable_force_settle).id; + "USDBIT", nathan_id, 100, disable_force_settle).get_id(); generate_block(); fc::usleep(fc::milliseconds(100)); GRAPHENE_CHECK_THROW(db_api.get_settle_orders( @@ -219,12 +219,12 @@ BOOST_AUTO_TEST_CASE( api_limit_get_order_book ){ graphene::app::database_api db_api( db, &( app.get_options() )); auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); - account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).id; - account_id_type dan_id = create_account("dan", dan_private_key.get_public_key()).id; + account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).get_id(); + account_id_type dan_id = create_account("dan", dan_private_key.get_public_key()).get_id(); transfer(account_id_type(), nathan_id, asset(100)); transfer(account_id_type(), dan_id, asset(100)); - asset_id_type bitusd_id = create_user_issued_asset( "USDBIT", nathan_id(db), charge_market_fee).id; - asset_id_type bitdan_id = create_user_issued_asset( "DANBIT", dan_id(db), charge_market_fee).id; + asset_id_type bitusd_id = create_user_issued_asset( "USDBIT", nathan_id(db), charge_market_fee).get_id(); + asset_id_type bitdan_id = create_user_issued_asset( "DANBIT", dan_id(db), charge_market_fee).get_id(); issue_uia( nathan_id, asset(100, bitusd_id) ); issue_uia( dan_id, asset(100, bitdan_id) ); create_sell_order( nathan_id, asset(100, bitusd_id), asset(10000, bitdan_id) ); @@ -450,7 +450,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_collateral_bids) { for (int i=0; i<3; i++) { std::string acct_name = "mytempacct" + std::to_string(i); - account_id_type account_id=create_account(acct_name).id; + account_id_type account_id=create_account(acct_name).get_id(); transfer(committee_account, account_id, asset(init_balance)); bid_collateral(account_id(db), back(db).amount(10), swan(db).amount(1)); } @@ -476,7 +476,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_collateral_bids) { //limit= api_limit for (int i=3; i<255; i++) { std::string acct_name = "mytempacct" + std::to_string(i); - account_id_type account_id=create_account(acct_name).id; + account_id_type account_id=create_account(acct_name).get_id(); transfer(committee_account, account_id, asset(init_balance)); bid_collateral(account_id(db), back(db).amount(10), swan(db).amount(1)); } diff --git a/tests/tests/authority_tests.cpp b/tests/tests/authority_tests.cpp index f991559e03..8b4856fd9a 100644 --- a/tests/tests/authority_tests.cpp +++ b/tests/tests/authority_tests.cpp @@ -358,7 +358,7 @@ BOOST_AUTO_TEST_CASE( proposed_single_account ) pup.proposal = proposal.id; pup.fee_paying_account = nathan.id; BOOST_TEST_MESSAGE( "Updating the proposal to have nathan's authority" ); - pup.active_approvals_to_add.insert(nathan.id); + pup.active_approvals_to_add.insert(nathan.get_id()); trx.operations = {pup}; sign( trx, committee_key ); @@ -373,7 +373,7 @@ BOOST_AUTO_TEST_CASE( proposed_single_account ) trx.clear_signatures(); pup.active_approvals_to_add.clear(); - pup.active_approvals_to_add.insert(nathan.id); + pup.active_approvals_to_add.insert(nathan.get_id()); trx.operations = {pup}; sign( trx, nathan_key3 ); @@ -565,7 +565,7 @@ BOOST_FIXTURE_TEST_CASE( fired_committee_members, database_fixture ) pop.proposed_ops.emplace_back(top); trx.operations.push_back(pop); const proposal_object& prop = db.get(PUSH_TX( db, trx ).operation_results.front().get()); - proposal_id_type pid = prop.id; + proposal_id_type pid = prop.get_id(); BOOST_CHECK(!pid(db).is_authorized_to_execute(db)); ilog( "commitee member approves proposal" ); @@ -665,7 +665,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_two_accounts, database_fixture ) BOOST_CHECK(!prop.is_authorized_to_execute(db)); { - proposal_id_type pid = prop.id; + proposal_id_type pid = prop.get_id(); proposal_update_operation uop; uop.proposal = prop.id; uop.active_approvals_to_add.insert(nathan.get_id()); @@ -675,7 +675,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_two_accounts, database_fixture ) PUSH_TX( db, trx ); trx.clear(); - BOOST_CHECK(db.find_object(pid) != nullptr); + BOOST_CHECK(db.find(pid) != nullptr); BOOST_CHECK(!prop.is_authorized_to_execute(db)); uop.active_approvals_to_add = {dan.get_id()}; @@ -685,7 +685,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_two_accounts, database_fixture ) sign( trx, dan_key ); PUSH_TX( db, trx ); - BOOST_CHECK(db.find_object(pid) == nullptr); + BOOST_CHECK(db.find(pid) == nullptr); } } FC_LOG_AND_RETHROW() } @@ -748,14 +748,14 @@ BOOST_FIXTURE_TEST_CASE( proposal_delete, database_fixture ) } { - proposal_id_type pid = prop.id; + proposal_id_type pid = prop.get_id(); proposal_delete_operation dop; dop.fee_paying_account = nathan.get_id(); dop.proposal = pid; trx.operations.push_back(dop); sign( trx, nathan_key ); PUSH_TX( db, trx ); - BOOST_CHECK(db.find_object(pid) == nullptr); + BOOST_CHECK(db.find(pid) == nullptr); BOOST_CHECK_EQUAL(get_balance(nathan, asset_id_type()(db)), 100000); } } FC_LOG_AND_RETHROW() } @@ -824,7 +824,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture ) } { - proposal_id_type pid = prop.id; + proposal_id_type pid = prop.get_id(); proposal_delete_operation dop; dop.fee_paying_account = nathan.get_id(); dop.proposal = pid; @@ -832,7 +832,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture ) trx.operations.push_back(dop); sign( trx, nathan_key ); PUSH_TX( db, trx ); - BOOST_CHECK(db.find_object(pid) == nullptr); + BOOST_CHECK(db.find(pid) == nullptr); BOOST_CHECK_EQUAL(get_balance(nathan, asset_id_type()(db)), 100000); } } FC_LOG_AND_RETHROW() } @@ -880,7 +880,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture ) BOOST_CHECK(!prop.is_authorized_to_execute(db)); { - proposal_id_type pid = prop.id; + proposal_id_type pid = prop.get_id(); proposal_update_operation uop; uop.fee_paying_account = nathan.get_id(); uop.proposal = prop.id; @@ -921,7 +921,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture ) sign( trx, nathan_key ); PUSH_TX( db, trx ); trx.clear(); - BOOST_CHECK(db.find_object(pid) == nullptr); + BOOST_CHECK(db.find(pid) == nullptr); } } FC_LOG_AND_RETHROW() } @@ -1088,8 +1088,8 @@ BOOST_FIXTURE_TEST_CASE( voting_account, database_fixture ) ACTORS((nathan)(vikram)); upgrade_to_lifetime_member(nathan_id); upgrade_to_lifetime_member(vikram_id); - committee_member_id_type nathan_committee_member = create_committee_member(nathan_id(db)).id; - committee_member_id_type vikram_committee_member = create_committee_member(vikram_id(db)).id; + committee_member_id_type nathan_committee_member = create_committee_member(nathan_id(db)).get_id(); + committee_member_id_type vikram_committee_member = create_committee_member(vikram_id(db)).get_id(); //wdump((db.get_balance(account_id_type(), asset_id_type()))); generate_block(); @@ -1665,7 +1665,8 @@ BOOST_FIXTURE_TEST_CASE( parent_owner_test, database_fixture ) set_expiration( db, ptx ); sign( ptx, bob_active_key ); - return PUSH_TX( db, ptx, database::skip_transaction_dupe_check ).operation_results[0].get(); + return proposal_id_type { PUSH_TX( db, ptx, database::skip_transaction_dupe_check ).operation_results[0] + .get() }; }; auto approve_proposal = [&]( @@ -1908,7 +1909,7 @@ BOOST_AUTO_TEST_CASE( custom_operation_required_auths_before_fork ) { trx.operations = {pcop}; trx.signatures.clear(); sign(trx, alice_private_key); - proposal_id_type pid = db.push_transaction(trx).operation_results[0].get(); + proposal_id_type pid { db.push_transaction(trx).operation_results[0].get() }; // Check bob is not listed as a required approver BOOST_REQUIRE_EQUAL(pid(db).required_active_approvals.count(bob_id), 0); @@ -1965,7 +1966,7 @@ BOOST_AUTO_TEST_CASE( custom_operation_required_auths_after_fork ) { trx.operations = {pcop}; trx.signatures.clear(); sign(trx, alice_private_key); - proposal_id_type pid = db.push_transaction(trx).operation_results[0].get(); + proposal_id_type pid { db.push_transaction(trx).operation_results[0].get() }; // Check bob is listed as a required approver BOOST_REQUIRE_EQUAL(pid(db).required_active_approvals.count(bob_id), 1); @@ -2198,7 +2199,7 @@ BOOST_AUTO_TEST_CASE( nested_execution ) pup.active_approvals_to_add.insert( alice_id ); pco.proposed_ops.emplace_back( pup ); trx.operations.push_back( pco ); - nested.push_back( PUSH_TX( db, trx, ~0 ).operation_results.front().get() ); + nested.push_back( proposal_id_type { PUSH_TX( db, trx, ~0 ).operation_results.front().get() } ); trx.clear(); pco.proposed_ops.clear(); } @@ -2211,8 +2212,8 @@ BOOST_AUTO_TEST_CASE( nested_execution ) PUSH_TX( db, trx, ~0 ); for( size_t i = 1; i < nested.size(); i++ ) - BOOST_CHECK_THROW( db.get( nested[i] ), fc::assert_exception ); // executed successfully -> object removed - db.get( inner ); // wasn't executed -> object exists, doesn't throw + BOOST_CHECK_THROW( db.get( nested[i] ), fc::assert_exception ); // executed successfully -> object removed + db.get( inner ); // wasn't executed -> object exists, doesn't throw } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE( issue_214 ) @@ -2234,7 +2235,7 @@ BOOST_AUTO_TEST_CASE( issue_214 ) pop.expiration_time = db.head_block_time() + fc::days(1); trx.operations.push_back(pop); sign( trx, bob_private_key ); - const proposal_id_type pid1 = PUSH_TX( db, trx ).operation_results[0].get(); + const proposal_id_type pid1 { PUSH_TX( db, trx ).operation_results[0].get() }; trx.clear(); // Bob wants to propose that Alice confirm the first proposal @@ -2259,7 +2260,7 @@ BOOST_AUTO_TEST_CASE( issue_214 ) set_expiration( db, ntx ); ntx.operations.push_back(npop); sign( ntx, bob_private_key ); - const proposal_id_type pid1a = PUSH_TX( db, ntx ).operation_results[0].get(); + const proposal_id_type pid1a { PUSH_TX( db, ntx ).operation_results[0].get() }; ntx.clear(); // But execution after confirming it fails @@ -2272,14 +2273,14 @@ BOOST_AUTO_TEST_CASE( issue_214 ) PUSH_TX( db, ntx ); ntx.clear(); - db.get( pid1a ); // still exists + db.get( pid1a ); // still exists } generate_blocks( HARDFORK_CORE_214_TIME + fc::hours(1) ); set_expiration( db, trx ); sign( trx, bob_private_key ); // after the HF the previously failed tx works too - const proposal_id_type pid2 = PUSH_TX( db, trx ).operation_results[0].get(); + const proposal_id_type pid2 { PUSH_TX( db, trx ).operation_results[0].get() }; trx.clear(); // For completeness, Alice confirms Bob's second proposal @@ -2291,8 +2292,8 @@ BOOST_AUTO_TEST_CASE( issue_214 ) // Execution of the second proposal should have confirmed the first, // which should have been executed by now. - BOOST_CHECK_THROW( db.get(pid1), fc::assert_exception ); - BOOST_CHECK_THROW( db.get(pid2), fc::assert_exception ); + BOOST_CHECK_THROW( db.get(pid1), fc::assert_exception ); + BOOST_CHECK_THROW( db.get(pid2), fc::assert_exception ); BOOST_CHECK_EQUAL( top.amount.amount.value, get_balance( bob_id, top.amount.asset_id ) ); } FC_LOG_AND_RETHROW() } @@ -2352,16 +2353,16 @@ BOOST_AUTO_TEST_CASE( self_approving_proposal ) pop.fee_paying_account = alice_id; pop.expiration_time = db.head_block_time() + fc::days(1); trx.operations.push_back(pop); - const proposal_id_type pid1 = PUSH_TX( db, trx, ~0 ).operation_results[0].get(); + const proposal_id_type pid1 { PUSH_TX( db, trx, ~0 ).operation_results[0].get() }; trx.clear(); BOOST_REQUIRE_EQUAL( 0u, pid1.instance.value ); - db.get(pid1); + db.get(pid1); trx.operations.push_back(pup); PUSH_TX( db, trx, ~0 ); // Proposal failed and still exists - db.get(pid1); + db.get(pid1); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE( self_deleting_proposal ) @@ -2383,10 +2384,10 @@ BOOST_AUTO_TEST_CASE( self_deleting_proposal ) pop.fee_paying_account = alice_id; pop.expiration_time = db.head_block_time() + fc::days(1); trx.operations.push_back( pop ); - const proposal_id_type pid1 = PUSH_TX( db, trx, ~0 ).operation_results[0].get(); + const proposal_id_type pid1 { PUSH_TX( db, trx, ~0 ).operation_results[0].get() }; trx.clear(); BOOST_REQUIRE_EQUAL( 0u, pid1.instance.value ); - db.get(pid1); + db.get(pid1); proposal_update_operation pup; pup.fee_paying_account = alice_id; @@ -2396,7 +2397,7 @@ BOOST_AUTO_TEST_CASE( self_deleting_proposal ) PUSH_TX( db, trx, ~0 ); // Proposal failed and still exists - db.get(pid1); + db.get(pid1); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/bitasset_tests.cpp b/tests/tests/bitasset_tests.cpp index e0b7629d0b..32956b2983 100644 --- a/tests/tests/bitasset_tests.cpp +++ b/tests/tests/bitasset_tests.cpp @@ -157,7 +157,7 @@ BOOST_AUTO_TEST_CASE( reset_backing_asset_on_witness_asset ) trx.set_expiration(HARDFORK_CORE_868_890_TIME - fc::seconds(1)); BOOST_TEST_MESSAGE("Create USDBIT"); - asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + asset_id_type bit_usd_id = create_bitasset("USDBIT").get_id(); asset_id_type core_id = bit_usd_id(db).bitasset_data(db).options.short_backing_asset; { @@ -166,7 +166,7 @@ BOOST_AUTO_TEST_CASE( reset_backing_asset_on_witness_asset ) } BOOST_TEST_MESSAGE("Create JMJBIT based on USDBIT."); - asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").get_id(); { BOOST_TEST_MESSAGE("Update the JMJBIT asset options"); change_asset_options(*this, nathan_id, nathan_private_key, bit_jmj_id, true ); @@ -289,7 +289,7 @@ BOOST_AUTO_TEST_CASE( reset_backing_asset_on_non_witness_asset ) BOOST_TEST_MESSAGE("Create USDBIT"); - asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + asset_id_type bit_usd_id = create_bitasset("USDBIT").get_id(); asset_id_type core_id = bit_usd_id(db).bitasset_data(db).options.short_backing_asset; { @@ -298,7 +298,7 @@ BOOST_AUTO_TEST_CASE( reset_backing_asset_on_non_witness_asset ) } BOOST_TEST_MESSAGE("Create JMJBIT based on USDBIT."); - asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").get_id(); { BOOST_TEST_MESSAGE("Update the JMJBIT asset options"); change_asset_options(*this, nathan_id, nathan_private_key, bit_jmj_id, false ); @@ -479,7 +479,7 @@ BOOST_AUTO_TEST_CASE( hf_890_test ) transfer(committee_account, borrower_id, asset(init_balance)); const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); { // change feed lifetime @@ -523,7 +523,7 @@ BOOST_AUTO_TEST_CASE( hf_890_test ) BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price.is_null() ); // place a sell order, it won't be matched with the call order - limit_order_id_type sell_id = create_sell_order(seller_id, asset(10, usd_id), asset(1))->id; + limit_order_id_type sell_id = create_sell_order(seller_id, asset(10, usd_id), asset(1))->get_id(); { // change feed lifetime to longer @@ -545,7 +545,7 @@ BOOST_AUTO_TEST_CASE( hf_890_test ) if( i == 0 ) // before hard fork, median feed is still null, and limit order is still there { BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price.is_null() ); - BOOST_CHECK( db.find( sell_id ) ); + BOOST_CHECK( db.find( sell_id ) ); // go beyond hard fork blocks += generate_blocks(hf_time - mi, true, skip); @@ -555,7 +555,7 @@ BOOST_AUTO_TEST_CASE( hf_890_test ) // after hard fork, median feed should become valid, and the limit order should be filled { BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); - BOOST_CHECK( !db.find( sell_id ) ); + BOOST_CHECK( !db.find( sell_id ) ); } // undo above tx's and reset @@ -963,7 +963,7 @@ BOOST_AUTO_TEST_CASE( hf_1270_test ) transfer( committee_account, borrower_id, asset(init_balance) ); const auto& bitusd = create_bitasset( "USDBIT", feedproducer_id ); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); { // set a short feed lifetime @@ -1054,8 +1054,8 @@ BOOST_AUTO_TEST_CASE( hf_1270_test ) // when median MSSR changed to 125%, the call order will be matched, // then this limit order should be filled limit_order_id_type sell_id = ( i % 2 == 0 ) ? - create_sell_order( seller_id, asset(20, usd_id), asset(1) )->id : // for MCR test - create_sell_order( seller_id, asset(8, usd_id), asset(1) )->id; // for MSSR test + create_sell_order( seller_id, asset(20, usd_id), asset(1) )->get_id() : // for MCR test + create_sell_order( seller_id, asset(8, usd_id), asset(1) )->get_id(); // for MSSR test { // change feed lifetime to longer, let all 3 feeds be valid @@ -1082,7 +1082,7 @@ BOOST_AUTO_TEST_CASE( hf_1270_test ) BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 1750 ); BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maximum_short_squeeze_ratio, 1100 ); // limit order is still there - BOOST_CHECK( db.find( sell_id ) ); + BOOST_CHECK( db.find( sell_id ) ); // go beyond hard fork 890 blocks += generate_blocks( HARDFORK_CORE_868_890_TIME - mi, true, skip ); @@ -1107,10 +1107,10 @@ BOOST_AUTO_TEST_CASE( hf_1270_test ) if( affected_by_hf_343 ) // if updated bitasset before hf 890, and hf 343 executed after hf 890 // the limit order should have been filled - BOOST_CHECK( !db.find( sell_id ) ); + BOOST_CHECK( !db.find( sell_id ) ); else // if not affected by hf 343 // the limit order should be still there, because `check_call_order` was incorrectly skipped - BOOST_CHECK( db.find( sell_id ) ); + BOOST_CHECK( db.find( sell_id ) ); // go beyond hard fork 935 blocks += generate_blocks(HARDFORK_CORE_935_TIME - mi, true, skip); @@ -1126,13 +1126,13 @@ BOOST_AUTO_TEST_CASE( hf_1270_test ) if( i % 2 == 0) { // MCR test, median MCR should be 350% and order will not be filled except when i = 0 BOOST_CHECK_EQUAL(usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 3500); if( affected_by_hf_343 ) - BOOST_CHECK(!db.find(sell_id)); + BOOST_CHECK(!db.find(sell_id)); else - BOOST_CHECK(db.find(sell_id)); // MCR bug, order still there + BOOST_CHECK(db.find(sell_id)); // MCR bug, order still there } else { // MSSR test, MSSR should be 125% and order is filled BOOST_CHECK_EQUAL(usd_id(db).bitasset_data(db).current_feed.maximum_short_squeeze_ratio, 1250); - BOOST_CHECK(!db.find(sell_id)); // order filled + BOOST_CHECK(!db.find(sell_id)); // order filled } // go beyond hard fork 1270 @@ -1147,7 +1147,7 @@ BOOST_AUTO_TEST_CASE( hf_1270_test ) BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); if( i % 2 == 0 ) { // MCR test, order filled BOOST_CHECK_EQUAL(usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 3500); - BOOST_CHECK(!db.find(sell_id)); + BOOST_CHECK(!db.find(sell_id)); } } @@ -1254,7 +1254,7 @@ BOOST_AUTO_TEST_CASE( reset_backing_asset_switching_to_witness_fed ) BOOST_TEST_MESSAGE("Create USDBIT"); - asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + asset_id_type bit_usd_id = create_bitasset("USDBIT").get_id(); asset_id_type core_id = bit_usd_id(db).bitasset_data(db).options.short_backing_asset; { @@ -1263,7 +1263,7 @@ BOOST_AUTO_TEST_CASE( reset_backing_asset_switching_to_witness_fed ) } BOOST_TEST_MESSAGE("Create JMJBIT based on USDBIT."); - asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").get_id(); { BOOST_TEST_MESSAGE("Update the JMJBIT asset options"); change_asset_options(*this, nathan_id, nathan_private_key, bit_jmj_id, false ); @@ -1456,14 +1456,14 @@ BOOST_AUTO_TEST_CASE(hf_890_test_hf2481) // Create the smart asset backed by JCOIN const uint16_t smartbit_market_fee_percent = 2 * GRAPHENE_1_PERCENT; create_bitasset("SMARTBIT", smartissuer_id, smartbit_market_fee_percent, - charge_market_fee, 2, jillcoin.id); + charge_market_fee, 2, jillcoin.get_id()); // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); trx.clear(); set_expiration(db, trx); const asset_object &smartbit = get_asset("SMARTBIT"); const asset_bitasset_data_object& smartbit_bitasset_data = (*smartbit.bitasset_data_id)(db); // Confirm that the asset is to be backed by JCOIN - BOOST_CHECK(smartbit_bitasset_data.options.short_backing_asset == jillcoin.id); + BOOST_CHECK(smartbit_bitasset_data.options.short_backing_asset == jillcoin.get_id()); // Fund balances of the actors issue_uia(alice, jillcoin.amount(5000 * jillcoin_unit)); diff --git a/tests/tests/block_tests.cpp b/tests/tests/block_tests.cpp index c83ef29e7b..1cdfc4749c 100644 --- a/tests/tests/block_tests.cpp +++ b/tests/tests/block_tests.cpp @@ -592,7 +592,7 @@ BOOST_AUTO_TEST_CASE( undo_pending ) signed_transaction trx; set_expiration( db, trx ); - account_id_type nathan_id = account_idx.get_next_id(); + account_id_type nathan_id { account_idx.get_next_id() }; account_create_operation cop; cop.registrar = GRAPHENE_TEMP_ACCOUNT; cop.name = "nathan"; @@ -646,7 +646,7 @@ BOOST_AUTO_TEST_CASE( switch_forks_undo_create ) signed_transaction trx; set_expiration( db1, trx ); - account_id_type nathan_id = account_idx.get_next_id(); + account_id_type nathan_id { account_idx.get_next_id() }; account_create_operation cop; cop.registrar = GRAPHENE_TEMP_ACCOUNT; cop.name = "nathan"; @@ -707,7 +707,7 @@ BOOST_AUTO_TEST_CASE( duplicate_transactions ) signed_transaction trx; set_expiration( db1, trx ); - account_id_type nathan_id = account_idx.get_next_id(); + account_id_type nathan_id { account_idx.get_next_id() }; account_create_operation cop; cop.name = "nathan"; cop.owner = authority(1, init_account_pub_key, 1); @@ -760,7 +760,7 @@ BOOST_AUTO_TEST_CASE( tapos ) trx.set_expiration( db1.head_block_time() ); //db1.get_slot_time(1) ); trx.set_reference_block( db1.head_block_id() ); - account_id_type nathan_id = account_idx.get_next_id(); + account_id_type nathan_id { account_idx.get_next_id() }; account_create_operation cop; cop.registrar = init1.id; cop.name = "nathan"; @@ -1526,7 +1526,7 @@ BOOST_FIXTURE_TEST_CASE( update_account_keys, database_fixture ) account_object sam_account_object = create_account( "sam", sam_key ); // upgrade sam to LTM - upgrade_to_lifetime_member(sam_account_object.id); + upgrade_to_lifetime_member(sam_account_object.get_id()); //Get a sane head block time generate_block( skip_flags ); @@ -1632,9 +1632,9 @@ BOOST_FIXTURE_TEST_CASE( update_account_keys, database_fixture ) database::skip_transaction_dupe_check | database::skip_transaction_signatures ); - account_id_type alice_account_id = + account_id_type alice_account_id { ptx_create.operation_results[0] - .get< object_id_type >(); + .get< object_id_type >() }; generate_block( skip_flags ); for( const vector< int >& key_sched_after : possible_key_sched ) diff --git a/tests/tests/bsip48_75_tests.cpp b/tests/tests/bsip48_75_tests.cpp index 84f37106ec..c450461224 100644 --- a/tests/tests/bsip48_75_tests.cpp +++ b/tests/tests/bsip48_75_tests.cpp @@ -120,7 +120,7 @@ BOOST_AUTO_TEST_CASE( bsip48_75_hardfork_protection_test ) // Able to create asset without new data processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& samcoin = db.get(ptx.operation_results[0].get()); - asset_id_type samcoin_id = samcoin.id; + asset_id_type samcoin_id = samcoin.get_id(); BOOST_CHECK_EQUAL( samcoin.options.market_fee_percent, 100 ); BOOST_CHECK_EQUAL( samcoin.bitasset_data(db).options.minimum_feeds, 3 ); @@ -304,7 +304,7 @@ BOOST_AUTO_TEST_CASE( prediction_market_global_settle_permission ) // create a prediction market const asset_object& pm = create_prediction_market( "PDM", sam_id ); - asset_id_type pm_id = pm.id; + asset_id_type pm_id = pm.get_id(); BOOST_CHECK( pm_id(db).can_global_settle() ); @@ -376,7 +376,7 @@ BOOST_AUTO_TEST_CASE( update_max_supply ) // create a UIA const asset_object& uia = create_user_issued_asset( "UIATEST", sam, charge_market_fee ); - asset_id_type uia_id = uia.id; + asset_id_type uia_id = uia.get_id(); // issue some to Sam issue_uia( sam_id, uia.amount( GRAPHENE_MAX_SHARE_SUPPLY - 100 ) ); @@ -702,7 +702,7 @@ BOOST_AUTO_TEST_CASE( disable_new_supply_uia ) // create a UIA const asset_object& uia = create_user_issued_asset( "UIATEST", sam, charge_market_fee ); - asset_id_type uia_id = uia.id; + asset_id_type uia_id = uia.get_id(); BOOST_CHECK( uia_id(db).can_create_new_supply() ); BOOST_CHECK_EQUAL( uia_id(db).dynamic_data(db).current_supply.value, 0 ); @@ -811,7 +811,7 @@ BOOST_AUTO_TEST_CASE( disable_new_supply_pm ) // create a PM const asset_object& pm = create_prediction_market( "PDM", sam_id ); - asset_id_type pm_id = pm.id; + asset_id_type pm_id = pm.get_id(); BOOST_CHECK( pm_id(db).can_create_new_supply() ); BOOST_CHECK_EQUAL( pm_id(db).dynamic_data(db).current_supply.value, 0 ); @@ -918,7 +918,7 @@ BOOST_AUTO_TEST_CASE( skip_core_exchange_rate ) // create a UIA const asset_object& uia = create_user_issued_asset( "UIATEST", sam, charge_market_fee ); - asset_id_type uia_id = uia.id; + asset_id_type uia_id = uia.get_id(); BOOST_CHECK( uia_id(db).options.core_exchange_rate == price(asset(1, uia_id), asset(1)) ); @@ -1014,7 +1014,7 @@ BOOST_AUTO_TEST_CASE( invalid_flags_in_asset ) processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& samcoin = db.get(ptx.operation_results[0].get()); - asset_id_type samcoin_id = samcoin.id; + asset_id_type samcoin_id = samcoin.get_id(); // There are invalid bits in flags BOOST_CHECK( samcoin_id(db).options.flags & ~UIA_VALID_FLAGS_MASK ); @@ -1031,7 +1031,7 @@ BOOST_AUTO_TEST_CASE( invalid_flags_in_asset ) ptx = PUSH_TX(db, trx, ~0); const asset_object& sambit = db.get(ptx.operation_results[0].get()); - asset_id_type sambit_id = sambit.id; + asset_id_type sambit_id = sambit.get_id(); // There are invalid bits in flags BOOST_CHECK( sambit_id(db).options.flags & ~VALID_FLAGS_MASK ); @@ -1155,7 +1155,7 @@ BOOST_AUTO_TEST_CASE( invalid_flags_in_asset ) trx.operations.push_back( acop ); ptx = PUSH_TX(db, trx, ~0); const asset_object& newsamcoin = db.get(ptx.operation_results[0].get()); - asset_id_type newsamcoin_id = newsamcoin.id; + asset_id_type newsamcoin_id = newsamcoin.get_id(); BOOST_CHECK_EQUAL( newsamcoin_id(db).options.flags, UIA_VALID_FLAGS_MASK ); @@ -1186,7 +1186,7 @@ BOOST_AUTO_TEST_CASE( invalid_flags_in_asset ) trx.operations.push_back( acop2 ); ptx = PUSH_TX(db, trx, ~0); const asset_object& newsambit = db.get(ptx.operation_results[0].get()); - asset_id_type newsambit_id = newsambit.id; + asset_id_type newsambit_id = newsambit.get_id(); BOOST_CHECK_EQUAL( newsambit_id(db).options.flags, valid_bitflag ); @@ -1235,7 +1235,7 @@ BOOST_AUTO_TEST_CASE( update_asset_precision ) // create a prediction market const asset_object& pm = create_prediction_market( "PDM", sam_id ); - asset_id_type pm_id = pm.id; + asset_id_type pm_id = pm.get_id(); BOOST_CHECK_EQUAL( pm_id(db).precision, 5 ); @@ -1258,7 +1258,7 @@ BOOST_AUTO_TEST_CASE( update_asset_precision ) // create a UIA const asset_object& uia = create_user_issued_asset( "UIATEST", sam, charge_market_fee ); - asset_id_type uia_id = uia.id; + asset_id_type uia_id = uia.get_id(); BOOST_CHECK_EQUAL( uia_id(db).precision, 2 ); @@ -1321,7 +1321,7 @@ BOOST_AUTO_TEST_CASE( update_asset_precision ) // create a MPA which is backed by the UIA const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 10, charge_market_fee, 3, uia_id ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa_id(db).bitasset_data(db).options.short_backing_asset == uia_id ); @@ -1357,7 +1357,7 @@ BOOST_AUTO_TEST_CASE( asset_owner_permissions_update_icr_mcr_mssr ) // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa_id(db).can_owner_update_icr() ); BOOST_CHECK( mpa_id(db).can_owner_update_mcr() ); @@ -1747,7 +1747,7 @@ BOOST_AUTO_TEST_CASE( asset_owner_update_mcr_mssr ) // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); asset_id_type core_id = asset_id_type(); // add a price feed publisher and publish a feed @@ -1773,7 +1773,7 @@ BOOST_AUTO_TEST_CASE( asset_owner_update_mcr_mssr ) // borrower borrows some and sends to seller const call_order_object* call_ptr = borrow( borrower_id, asset(1000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id { call_ptr->id }; BOOST_CHECK_EQUAL( call_id(db).debt.value, 1000 ); BOOST_CHECK_EQUAL( call_id(db).collateral.value, 2000 ); @@ -1792,13 +1792,13 @@ BOOST_AUTO_TEST_CASE( asset_owner_update_mcr_mssr ) // seller places orders const limit_order_object* order1_ptr = create_sell_order( seller, asset(100, mpa_id), asset(105) ); BOOST_REQUIRE( order1_ptr ); - limit_order_id_type order1_id = order1_ptr->id; + limit_order_id_type order1_id = order1_ptr->get_id(); BOOST_CHECK_EQUAL( order1_id(db).for_sale.value, 100 ); BOOST_CHECK_EQUAL( order1_id(db).amount_to_receive().amount.value, 105 ); const limit_order_object* order2_ptr = create_sell_order( seller, asset(100, mpa_id), asset(115) ); BOOST_REQUIRE( order2_ptr ); - limit_order_id_type order2_id = order2_ptr->id; + limit_order_id_type order2_id = order2_ptr->get_id(); BOOST_CHECK_EQUAL( order2_id(db).for_sale.value, 100 ); BOOST_CHECK_EQUAL( order2_id(db).amount_to_receive().amount.value, 115 ); diff --git a/tests/tests/bsip85_tests.cpp b/tests/tests/bsip85_tests.cpp index 88eaab71e0..fc76ec70fd 100644 --- a/tests/tests/bsip85_tests.cpp +++ b/tests/tests/bsip85_tests.cpp @@ -92,7 +92,7 @@ BOOST_AUTO_TEST_CASE( hardfork_time_test ) // Should succeed processed_transaction ptx = PUSH_TX(db, trx, ~0); trx.operations.clear(); - proposal_id_type prop_id = ptx.operation_results[0].get(); + proposal_id_type prop_id { ptx.operation_results[0].get() }; // The maker fee discount percent is still 0 BOOST_CHECK_EQUAL( db.get_global_properties().parameters.get_maker_fee_discount_percent(), 0 ); @@ -137,7 +137,7 @@ BOOST_AUTO_TEST_CASE( bsip85_maker_fee_discount_test ) int64_t cer_usd_amount = 31; price tmp_cer( asset( cer_core_amount ), asset( cer_usd_amount, asset_id_type(1) ) ); const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee, tmp_cer ); - asset_id_type usd_id = usd_obj.id; + asset_id_type usd_id = usd_obj.get_id(); issue_uia( alice_id, asset( alice_b0, usd_id ) ); issue_uia( bob_id, asset( bob_b0, usd_id ) ); @@ -185,11 +185,11 @@ BOOST_AUTO_TEST_CASE( bsip85_maker_fee_discount_test ) BOOST_TEST_MESSAGE( "Creating ao1, then be filled by bo1" ); // pays fee in core const limit_order_object* ao1 = create_sell_order( alice_id, asset(1000), asset(200, usd_id) ); - const limit_order_id_type ao1id = ao1->id; + const limit_order_id_type ao1id = ao1->get_id(); // pays fee in usd const limit_order_object* bo1 = create_sell_order( bob_id, asset(200, usd_id), asset(1000), max_exp, cer ); - BOOST_CHECK( db.find( ao1id ) == nullptr ); + BOOST_CHECK( db.find( ao1id ) == nullptr ); BOOST_CHECK( bo1 == nullptr ); // data after order created @@ -218,11 +218,11 @@ BOOST_AUTO_TEST_CASE( bsip85_maker_fee_discount_test ) BOOST_TEST_MESSAGE( "Creating ao2, then be partially filled by bo2" ); // pays fee in usd const limit_order_object* ao2 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); - const limit_order_id_type ao2id = ao2->id; + const limit_order_id_type ao2id = ao2->get_id(); // pays fee in core const limit_order_object* bo2 = create_sell_order( bob_id, asset(100, usd_id), asset(500) ); - BOOST_CHECK( db.find( ao2id ) != nullptr ); + BOOST_CHECK( db.find( ao2id ) != nullptr ); BOOST_CHECK( bo2 == nullptr ); // data after order created diff --git a/tests/tests/bsip86_tests.cpp b/tests/tests/bsip86_tests.cpp index 739720e1ba..b740a09194 100644 --- a/tests/tests/bsip86_tests.cpp +++ b/tests/tests/bsip86_tests.cpp @@ -92,7 +92,7 @@ BOOST_AUTO_TEST_CASE( hardfork_time_test ) // Should succeed processed_transaction ptx = PUSH_TX(db, trx, ~0); trx.operations.clear(); - proposal_id_type prop_id = ptx.operation_results[0].get(); + proposal_id_type prop_id { ptx.operation_results[0].get() }; // The network fee percent is still 0 BOOST_CHECK_EQUAL( db.get_global_properties().parameters.get_market_fee_network_percent(), 0 ); @@ -131,8 +131,8 @@ BOOST_AUTO_TEST_CASE( fee_sharing_test ) cer, 4, market_fee_percent ); const asset_object& aliceusd = create_user_issued_asset( "ALICEUSD", alice_id(db), 0 ); - asset_id_type alicecoin_id = alicecoin.id; - asset_id_type aliceusd_id = aliceusd.id; + asset_id_type alicecoin_id = alicecoin.get_id(); + asset_id_type aliceusd_id = aliceusd.get_id(); // prepare users' balance issue_uia( alice, aliceusd.amount( 20000000 ) ); diff --git a/tests/tests/bsrm_basic_tests.cpp b/tests/tests/bsrm_basic_tests.cpp index d3ce89aa59..d6047adcc3 100644 --- a/tests/tests/bsrm_basic_tests.cpp +++ b/tests/tests/bsrm_basic_tests.cpp @@ -104,7 +104,7 @@ BOOST_AUTO_TEST_CASE( bsrm_hardfork_protection_test ) // Able to create asset without new data processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& samcoin = db.get(ptx.operation_results[0].get()); - asset_id_type samcoin_id = samcoin.id; + asset_id_type samcoin_id = samcoin.get_id(); BOOST_CHECK_EQUAL( samcoin.options.market_fee_percent, 100 ); BOOST_CHECK_EQUAL( samcoin.bitasset_data(db).options.minimum_feeds, 3 ); @@ -232,7 +232,7 @@ BOOST_AUTO_TEST_CASE( uia_issuer_permissions_update_test ) vector ops; - asset_id_type samcoin_id = create_user_issued_asset( "SAMCOIN", sam_id(db), uiaflag ).id; + asset_id_type samcoin_id = create_user_issued_asset( "SAMCOIN", sam_id(db), uiaflag ).get_id(); // Testing asset_update_operation asset_update_operation auop; @@ -411,15 +411,15 @@ BOOST_AUTO_TEST_CASE( bsrm_asset_permissions_flags_extensions_test ) // create a PM with a zero market_fee_percent const asset_object& pm = create_prediction_market( "TESTPM", sam_id, 0, charge_market_fee ); - asset_id_type pm_id = pm.id; + asset_id_type pm_id = pm.get_id(); // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); // create a UIA with a zero market_fee_percent const asset_object& uia = create_user_issued_asset( "TESTUIA", sam_id(db), charge_market_fee ); - asset_id_type uia_id = uia.id; + asset_id_type uia_id = uia.get_id(); // Prepare for asset update asset_update_operation auop; @@ -572,7 +572,7 @@ BOOST_AUTO_TEST_CASE( bsrm_asset_owner_permissions_update_bsrm ) // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa_id(db).can_owner_update_bsrm() ); @@ -719,7 +719,7 @@ BOOST_AUTO_TEST_CASE( close_debt_position_when_no_feed ) // Create asset // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); // add a price feed publisher and publish a feed update_feed_producers( mpa_id, { feeder_id } ); @@ -738,7 +738,7 @@ BOOST_AUTO_TEST_CASE( close_debt_position_when_no_feed ) // borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // update price feed publisher list so that there is no valid feed update_feed_producers( mpa_id, { sam_id } ); @@ -796,7 +796,7 @@ BOOST_AUTO_TEST_CASE( update_bsrm_after_gs ) // Create asset // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); using bsrm_type = bitasset_options::black_swan_response_type; @@ -821,7 +821,7 @@ BOOST_AUTO_TEST_CASE( update_bsrm_after_gs ) // borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized ilog( "Publish a new feed to trigger GS" ); @@ -940,7 +940,7 @@ BOOST_AUTO_TEST_CASE( update_bsrm_after_individual_settlement_to_fund ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::individual_settlement_to_fund ); @@ -964,10 +964,10 @@ BOOST_AUTO_TEST_CASE( update_bsrm_after_individual_settlement_to_fund ) // borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(8000) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized ilog( "Publish a new feed to trigger settlement" ); @@ -1090,7 +1090,7 @@ BOOST_AUTO_TEST_CASE( update_bsrm_after_individual_settlement_to_order ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::individual_settlement_to_order ); @@ -1114,10 +1114,10 @@ BOOST_AUTO_TEST_CASE( update_bsrm_after_individual_settlement_to_order ) // borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(8000) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized ilog( "Publish a new feed to trigger settlement" ); @@ -1248,7 +1248,7 @@ BOOST_AUTO_TEST_CASE( undercollateralized_and_update_bsrm_from_no_settlement ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::no_settlement ); @@ -1272,10 +1272,10 @@ BOOST_AUTO_TEST_CASE( undercollateralized_and_update_bsrm_from_no_settlement ) // borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(8000) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized ilog( "Publish a new feed so that the least collateralized short is undercollateralized" ); @@ -1394,7 +1394,7 @@ BOOST_AUTO_TEST_CASE( manual_gs_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == static_cast(i) ); BOOST_CHECK( !mpa_id(db).bitasset_data(db).has_individual_settlement() ); @@ -1417,10 +1417,10 @@ BOOST_AUTO_TEST_CASE( manual_gs_test ) // borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(8000) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized ilog( "Publish a new feed so that the least collateralized short is undercollateralized" ); diff --git a/tests/tests/bsrm_indvd_settlement_tests.cpp b/tests/tests/bsrm_indvd_settlement_tests.cpp index 598b740f34..26e567d3ad 100644 --- a/tests/tests/bsrm_indvd_settlement_tests.cpp +++ b/tests/tests/bsrm_indvd_settlement_tests.cpp @@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); if( 0 == i ) BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() @@ -128,31 +128,31 @@ BOOST_AUTO_TEST_CASE( individual_settlement_test ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // 100000 / 2100 = 47.619047619 // undercollateralization price = 100000:2100 * 1250:1000 = 100000:1680 const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // 100000 / 2200 = 45.454545455 // undercollateralization price = 100000:2200 * 1250:1000 = 100000:1760 const call_order_object* call3_ptr = borrow( borrower3, asset(100000, mpa_id), asset(2200) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // 100000 / 2500 = 40 // undercollateralization price = 100000:2500 * 1250:1000 = 100000:2000 const call_order_object* call4_ptr = borrow( borrower4, asset(100000, mpa_id), asset(2500) ); BOOST_REQUIRE( call4_ptr ); - call_order_id_type call4_id = call4_ptr->id; + call_order_id_type call4_id = call4_ptr->get_id(); // 100000 / 2240 = 44.642857143 // undercollateralization price = 100000:2240 * 1250:1000 = 100000:1792 const call_order_object* call5_ptr = borrow( borrower5, asset(1000000, mpa_id), asset(22400) ); BOOST_REQUIRE( call5_ptr ); - call_order_id_type call5_id = call5_ptr->id; + call_order_id_type call5_id = call5_ptr->get_id(); // Transfer funds to sellers transfer( borrower, seller, asset(100000,mpa_id) ); @@ -191,36 +191,36 @@ BOOST_AUTO_TEST_CASE( individual_settlement_test ) // seller sells some const limit_order_object* sell_low = create_sell_order( seller, asset(10000,mpa_id), asset(190) ); BOOST_REQUIRE( sell_low ); - limit_order_id_type sell_low_id = sell_low->id; + limit_order_id_type sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 10000 ); // seller sells some const limit_order_object* sell_mid = create_sell_order( seller, asset(100000,mpa_id), asset(2000) ); BOOST_REQUIRE( sell_mid ); - limit_order_id_type sell_mid_id = sell_mid->id; + limit_order_id_type sell_mid_id = sell_mid->get_id(); BOOST_CHECK_EQUAL( sell_mid_id(db).for_sale.value, 100000 ); // seller4 sells some const limit_order_object* sell_mid2 = create_sell_order( seller4, asset(20000,mpa_id), asset(439) ); BOOST_REQUIRE( sell_mid2 ); - limit_order_id_type sell_mid2_id = sell_mid2->id; + limit_order_id_type sell_mid2_id = sell_mid2->get_id(); BOOST_CHECK_EQUAL( sell_mid2_id(db).for_sale.value, 20000 ); // seller sells some const limit_order_object* sell_high = create_sell_order( seller, asset(100000,mpa_id), asset(2400) ); BOOST_REQUIRE( sell_high ); - limit_order_id_type sell_high_id = sell_high->id; + limit_order_id_type sell_high_id = sell_high->get_id(); BOOST_CHECK_EQUAL( sell_high_id(db).for_sale.value, 100000 ); // seller2 settles auto result = force_settle( seller2, asset(50000,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find( settle_id ) ); BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 50000 ); // seller3 settles result = force_settle( seller3, asset(10000,mpa_id) ); - force_settlement_id_type settle2_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle2_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find( settle2_id ) ); BOOST_CHECK_EQUAL( settle2_id(db).balance.amount.value, 10000 ); @@ -529,7 +529,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_test ) BOOST_TEST_MESSAGE( "Seller4 sells some" ); const limit_order_object* sell_mid3 = create_sell_order( seller4, asset(20000,mpa_id), asset(439) ); BOOST_REQUIRE( sell_mid3 ); - limit_order_id_type sell_mid3_id = sell_mid3->id; + limit_order_id_type sell_mid3_id = sell_mid3->get_id(); auto check_result_2 = [&] { @@ -589,7 +589,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_fund_and_disable_force_settle_tes // Create asset asset_id_type samcoin_id = create_user_issued_asset( "SAMCOIN", sam_id(db), charge_market_fee, price(asset(1, asset_id_type(1)), asset(1)), - 2, 100 ).id; // fee 1% + 2, 100 ).get_id(); // fee 1% issue_uia( borrower, asset(init_amount, samcoin_id) ); issue_uia( borrower2, asset(init_amount, samcoin_id) ); @@ -614,7 +614,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_fund_and_disable_force_settle_tes trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::individual_settlement_to_fund ); @@ -643,12 +643,12 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_fund_and_disable_force_settle_tes // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000, samcoin_id) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // undercollateralization price = 100000:2500 * 1250:1000 = 100000:2000 const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(2500, samcoin_id) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // Transfer funds to sellers transfer( borrower, seller, asset(100000,mpa_id) ); @@ -875,7 +875,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_fund_and_taking_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::individual_settlement_to_fund ); @@ -904,22 +904,22 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_fund_and_taking_test ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // undercollateralization price = 100000:2100 * 1250:1000 = 100000:1680 const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // undercollateralization price = 100000:2200 * 1250:1000 = 100000:1760 const call_order_object* call3_ptr = borrow( borrower3, asset(100000, mpa_id), asset(2200) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // undercollateralization price = 100000:2500 * 1250:1000 = 100000:2000 const call_order_object* call4_ptr = borrow( borrower4, asset(100000, mpa_id), asset(2500) ); BOOST_REQUIRE( call4_ptr ); - call_order_id_type call4_id = call4_ptr->id; + call_order_id_type call4_id = call4_ptr->get_id(); // Transfer funds to sellers transfer( borrower, seller, asset(100000,mpa_id) ); @@ -978,7 +978,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_fund_and_taking_test ) BOOST_CHECK_THROW( borrow( borrower5, asset(100000, mpa_id), asset(3135) ), fc::exception ); const call_order_object* call5_ptr = borrow( borrower5, asset(100000, mpa_id), asset(3136) ); BOOST_REQUIRE( call5_ptr ); - call_order_id_type call5_id = call5_ptr->id; + call_order_id_type call5_id = call5_ptr->get_id(); BOOST_CHECK_EQUAL( call5_id(db).debt.value, 100000 ); BOOST_CHECK_EQUAL( call5_id(db).collateral.value, 3136 ); @@ -1012,7 +1012,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_fund_and_taking_test ) limit_ptr = create_sell_order( seller, asset(100000,mpa_id), asset(2000) ); // the limit order is not filled BOOST_REQUIRE( limit_ptr ); - limit_order_id_type limit_id = limit_ptr->id; + limit_order_id_type limit_id = limit_ptr->get_id(); BOOST_CHECK_EQUAL( limit_ptr->for_sale.value, 100000 ); @@ -1177,7 +1177,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_order_and_taking_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::individual_settlement_to_order ); @@ -1205,22 +1205,22 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_order_and_taking_test ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // undercollateralization price = 100000:2100 * 1250:1000 = 100000:1680 const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // undercollateralization price = 100000:2200 * 1250:1000 = 100000:1760 const call_order_object* call3_ptr = borrow( borrower3, asset(100000, mpa_id), asset(2200) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // undercollateralization price = 100000:2500 * 1250:1000 = 100000:2000 const call_order_object* call4_ptr = borrow( borrower4, asset(100000, mpa_id), asset(2500) ); BOOST_REQUIRE( call4_ptr ); - call_order_id_type call4_id = call4_ptr->id; + call_order_id_type call4_id = call4_ptr->get_id(); // Transfer funds to sellers transfer( borrower, seller, asset(100000,mpa_id) ); @@ -1320,7 +1320,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_order_and_taking_test ) // borrower buys at higher price const limit_order_object* buy_high = create_sell_order( borrower, asset(10), asset(100,mpa_id) ); BOOST_CHECK( buy_high ); - limit_order_id_type buy_high_id = buy_high->id; + limit_order_id_type buy_high_id = buy_high->get_id(); // seller sells some, this will match buy_high, // and when it matches call4, it will be cancelled since it is too small @@ -1348,7 +1348,7 @@ BOOST_AUTO_TEST_CASE( individual_settlement_to_order_and_taking_test ) // borrower buys at higher price buy_high = create_sell_order( borrower, asset(10), asset(100,mpa_id) ); BOOST_CHECK( buy_high ); - buy_high_id = buy_high->id; + buy_high_id = buy_high->get_id(); // seller sells some, this will match buy_high, // and when it matches the settled debt, it will be cancelled since it is too small @@ -1473,7 +1473,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_no_debt_position ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::individual_settlement_to_order ); @@ -1484,7 +1484,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_no_debt_position ) trx.operations.push_back( acop ); ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa2 = db.get(ptx.operation_results[0].get()); - asset_id_type mpa2_id = mpa2.id; + asset_id_type mpa2_id = mpa2.get_id(); // add a price feed publisher and publish a feed update_feed_producers( mpa_id, { feeder_id } ); @@ -1518,12 +1518,12 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_no_debt_position ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // undercollateralization price = 100000:2100 * 1250:1000 = 100000:1680 const call_order_object* call2_ptr = borrow( borrower, asset(100000, mpa2_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // Transfer funds to sellers transfer( borrower, seller, asset(100000,mpa_id) ); @@ -1564,13 +1564,13 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_no_debt_position ) // seller settles some auto result = force_settle( seller, asset(11100,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find(settle_id) ); BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 11100 ); result = force_settle( seller, asset(11100,mpa2_id) ); - force_settlement_id_type settle2_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle2_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find(settle2_id) ); BOOST_CHECK_EQUAL( settle2_id(db).balance.amount.value, 11100 ); diff --git a/tests/tests/bsrm_no_settlement_tests.cpp b/tests/tests/bsrm_no_settlement_tests.cpp index 4487c9050b..db7d304da9 100644 --- a/tests/tests/bsrm_no_settlement_tests.cpp +++ b/tests/tests/bsrm_no_settlement_tests.cpp @@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_margin_call_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::no_settlement ); @@ -99,11 +99,11 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_margin_call_test ) // borrowers borrow some const call_order_object* call_ptr = borrow( borrower, asset(1000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(1000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized f.settlement_price = price( asset(10,mpa_id), asset(22) ); @@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_margin_call_test ) // borrower3 create debt position right above ICR const call_order_object* call3_ptr = borrow( borrower3, asset(1000, mpa_id), asset(4181) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // borrower adjust debt position to right at MSSR // 1000 * (22/10) * 1.25 = 2750 @@ -169,19 +169,19 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_margin_call_test ) // seller2 sells some, due to MCFR, this order won't be filled in the beginning, but will be filled later const limit_order_object* sell_mid = create_sell_order( seller2, asset(100,mpa_id), asset(210) ); BOOST_REQUIRE( sell_mid ); - limit_order_id_type sell_mid_id = sell_mid->id; + limit_order_id_type sell_mid_id = sell_mid->get_id(); BOOST_CHECK_EQUAL( sell_mid_id(db).for_sale.value, 100 ); // seller2 sells more, this order won't be filled in the beginning either const limit_order_object* sell_high = create_sell_order( seller2, asset(100,mpa_id), asset(275) ); BOOST_REQUIRE( sell_high ); - limit_order_id_type sell_high_id = sell_high->id; + limit_order_id_type sell_high_id = sell_high->get_id(); BOOST_CHECK_EQUAL( sell_high_id(db).for_sale.value, 100 ); // seller2 sells more, this order won't be filled const limit_order_object* sell_highest = create_sell_order( seller2, asset(100,mpa_id), asset(285) ); BOOST_REQUIRE( sell_highest ); - limit_order_id_type sell_highest_id = sell_highest->id; + limit_order_id_type sell_highest_id = sell_highest->get_id(); BOOST_CHECK_EQUAL( sell_highest_id(db).for_sale.value, 100 ); BOOST_CHECK_EQUAL( get_balance( seller_id, mpa_id ), 2500 ); @@ -264,7 +264,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_margin_call_test ) // seller sells more sell_low = create_sell_order( seller, asset(1000,mpa_id), asset(100) ); BOOST_REQUIRE( sell_low ); - limit_order_id_type sell_low_id = sell_low->id; + limit_order_id_type sell_low_id = sell_low->get_id(); auto final_check = [&] { @@ -345,7 +345,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_limit_taker_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::no_settlement ); @@ -368,11 +368,11 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_limit_taker_test ) // borrowers borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized f.settlement_price = price( asset(1000,mpa_id), asset(22) ); @@ -390,7 +390,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_limit_taker_test ) // borrower3 create debt position right above ICR const call_order_object* call3_ptr = borrow( borrower3, asset(100000, mpa_id), asset(4181) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // borrower adjust debt position to right at MSSR // 100000 * (22/1000) * 1.25 = 2750 @@ -438,19 +438,19 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_limit_taker_test ) // seller2 sells some, due to MCFR, this order won't be filled in the beginning, but will be filled later const limit_order_object* sell_mid = create_sell_order( seller2, asset(10000,mpa_id), asset(210) ); BOOST_REQUIRE( sell_mid ); - limit_order_id_type sell_mid_id = sell_mid->id; + limit_order_id_type sell_mid_id = sell_mid->get_id(); BOOST_CHECK_EQUAL( sell_mid_id(db).for_sale.value, 10000 ); // seller2 sells more, this order won't be filled in the beginning either const limit_order_object* sell_high = create_sell_order( seller2, asset(10000,mpa_id), asset(275) ); BOOST_REQUIRE( sell_high ); - limit_order_id_type sell_high_id = sell_high->id; + limit_order_id_type sell_high_id = sell_high->get_id(); BOOST_CHECK_EQUAL( sell_high_id(db).for_sale.value, 10000 ); // seller2 sells more, this order won't be filled const limit_order_object* sell_highest = create_sell_order( seller2, asset(10000,mpa_id), asset(285) ); BOOST_REQUIRE( sell_highest ); - limit_order_id_type sell_highest_id = sell_highest->id; + limit_order_id_type sell_highest_id = sell_highest->get_id(); BOOST_CHECK_EQUAL( sell_highest_id(db).for_sale.value, 10000 ); BOOST_CHECK_EQUAL( get_balance( seller_id, mpa_id ), 250000 ); @@ -610,7 +610,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_force_settle_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::no_settlement ); @@ -633,11 +633,11 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_force_settle_test ) // borrowers borrow some const call_order_object* call_ptr = borrow( borrower, asset(1000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(1000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized f.settlement_price = price( asset(10,mpa_id), asset(22) ); @@ -655,7 +655,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_force_settle_test ) // borrower3 create debt position right above ICR const call_order_object* call3_ptr = borrow( borrower3, asset(1000, mpa_id), asset(4181) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // borrower adjust debt position to right at MSSR // 1000 * (22/10) * 1.25 = 2750 @@ -703,19 +703,19 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_force_settle_test ) // seller2 sells some, due to MCFR, this order won't be filled in the beginning, but will be filled later const limit_order_object* sell_mid = create_sell_order( seller2, asset(100,mpa_id), asset(210) ); BOOST_REQUIRE( sell_mid ); - limit_order_id_type sell_mid_id = sell_mid->id; + limit_order_id_type sell_mid_id = sell_mid->get_id(); BOOST_CHECK_EQUAL( sell_mid_id(db).for_sale.value, 100 ); // seller2 sells more, this order won't be filled in the beginning either const limit_order_object* sell_high = create_sell_order( seller2, asset(100,mpa_id), asset(275) ); BOOST_REQUIRE( sell_high ); - limit_order_id_type sell_high_id = sell_high->id; + limit_order_id_type sell_high_id = sell_high->get_id(); BOOST_CHECK_EQUAL( sell_high_id(db).for_sale.value, 100 ); // seller2 sells more, this order won't be filled const limit_order_object* sell_highest = create_sell_order( seller2, asset(100,mpa_id), asset(285) ); BOOST_REQUIRE( sell_highest ); - limit_order_id_type sell_highest_id = sell_highest->id; + limit_order_id_type sell_highest_id = sell_highest->get_id(); BOOST_CHECK_EQUAL( sell_highest_id(db).for_sale.value, 100 ); BOOST_CHECK_EQUAL( get_balance( seller_id, mpa_id ), 2500 ); @@ -725,7 +725,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_force_settle_test ) // seller settles some auto result = force_settle( seller, asset(111,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( !db.find(settle_id) ); BOOST_CHECK_EQUAL( get_balance( seller_id, mpa_id ), 2389 ); // 2500 - 111 @@ -888,7 +888,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_settle_taker_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::no_settlement ); @@ -911,11 +911,11 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_settle_taker_test ) // borrowers borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized f.settlement_price = price( asset(1000,mpa_id), asset(22) ); @@ -933,7 +933,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_settle_taker_test ) // borrower3 create debt position right above ICR const call_order_object* call3_ptr = borrow( borrower3, asset(100000, mpa_id), asset(4181) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // borrower adjust debt position to right at MSSR // 100000 * (22/1000) * 1.25 = 2750 @@ -981,19 +981,19 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_settle_taker_test ) // seller2 sells some, due to MCFR, this order won't be filled in the beginning, but will be filled later const limit_order_object* sell_mid = create_sell_order( seller2, asset(10000,mpa_id), asset(210) ); BOOST_REQUIRE( sell_mid ); - limit_order_id_type sell_mid_id = sell_mid->id; + limit_order_id_type sell_mid_id = sell_mid->get_id(); BOOST_CHECK_EQUAL( sell_mid_id(db).for_sale.value, 10000 ); // seller2 sells more, this order won't be filled in the beginning either const limit_order_object* sell_high = create_sell_order( seller2, asset(10000,mpa_id), asset(275) ); BOOST_REQUIRE( sell_high ); - limit_order_id_type sell_high_id = sell_high->id; + limit_order_id_type sell_high_id = sell_high->get_id(); BOOST_CHECK_EQUAL( sell_high_id(db).for_sale.value, 10000 ); // seller2 sells more, this order won't be filled const limit_order_object* sell_highest = create_sell_order( seller2, asset(10000,mpa_id), asset(285) ); BOOST_REQUIRE( sell_highest ); - limit_order_id_type sell_highest_id = sell_highest->id; + limit_order_id_type sell_highest_id = sell_highest->get_id(); BOOST_CHECK_EQUAL( sell_highest_id(db).for_sale.value, 10000 ); BOOST_CHECK_EQUAL( get_balance( seller_id, mpa_id ), 250000 ); @@ -1003,7 +1003,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_maker_small_settle_taker_test ) // seller settles some auto result = force_settle( seller, asset(11100,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; auto final_check = [&] { @@ -1099,7 +1099,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::no_settlement ); @@ -1122,16 +1122,16 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) // borrowers borrow some const call_order_object* call_ptr = borrow( borrower, asset(1000, mpa_id), asset(2750) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(1000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // 1000 * (22/10) * 1.9 = 4180 const call_order_object* call3_ptr = borrow( borrower3, asset(1000, mpa_id), asset(4181) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // Transfer funds to sellers transfer( borrower, seller, asset(1000,mpa_id) ); @@ -1145,21 +1145,21 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) // seller2 sells some const limit_order_object* sell_highest = create_sell_order( seller2, asset(100,mpa_id), asset(285) ); BOOST_REQUIRE( sell_highest ); - limit_order_id_type sell_highest_id = sell_highest->id; + limit_order_id_type sell_highest_id = sell_highest->get_id(); BOOST_CHECK_EQUAL( sell_highest_id(db).for_sale.value, 100 ); expected_seller2_balance_mpa -= 100; // seller2 sells more const limit_order_object* sell_high = create_sell_order( seller2, asset(100,mpa_id), asset(275) ); BOOST_REQUIRE( sell_high ); - limit_order_id_type sell_high_id = sell_high->id; + limit_order_id_type sell_high_id = sell_high->get_id(); BOOST_CHECK_EQUAL( sell_high_id(db).for_sale.value, 100 ); expected_seller2_balance_mpa -= 100; // seller2 sells more, due to MCFR, this order won't be filled if no order is selling lower const limit_order_object* sell_mid = create_sell_order( seller2, asset(100,mpa_id), asset(210) ); BOOST_REQUIRE( sell_mid ); - limit_order_id_type sell_mid_id = sell_mid->id; + limit_order_id_type sell_mid_id = sell_mid->get_id(); BOOST_CHECK_EQUAL( sell_mid_id(db).for_sale.value, 100 ); expected_seller2_balance_mpa -= 100; @@ -1176,7 +1176,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(111,mpa_id), asset(230) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 111 ); expected_seller_balance_mpa -= 111; } @@ -1184,7 +1184,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(111,mpa_id), asset(210) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 111 ); expected_seller_balance_mpa -= 111; } @@ -1192,7 +1192,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(900,mpa_id), asset(1870) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 900 ); expected_seller_balance_mpa -= 900; } @@ -1200,7 +1200,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(920,mpa_id), asset(1870) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 920 ); expected_seller_balance_mpa -= 920; } @@ -1208,7 +1208,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(1000,mpa_id), asset(1870) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 1000 ); expected_seller_balance_mpa -= 1000; } @@ -1216,7 +1216,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(1050,mpa_id), asset(1870) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 1050 ); expected_seller_balance_mpa -= 1050; } @@ -1224,7 +1224,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(1800,mpa_id), asset(1870*2) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 1800 ); expected_seller_balance_mpa -= 1800; } @@ -1232,7 +1232,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_taker_test ) { sell_low = create_sell_order( seller, asset(2000,mpa_id), asset(1870) ); BOOST_REQUIRE( sell_low ); - sell_low_id = sell_low->id; + sell_low_id = sell_low->get_id(); BOOST_CHECK_EQUAL( sell_low_id(db).for_sale.value, 2000 ); expected_seller_balance_mpa -= 2000; } @@ -2218,7 +2218,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_update_debt_test ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa.bitasset_data(db).get_black_swan_response_method() == bsrm_type::no_settlement ); @@ -2241,11 +2241,11 @@ BOOST_AUTO_TEST_CASE( no_settlement_update_debt_test ) // borrowers borrow some const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); const call_order_object* call2_ptr = borrow( borrower2, asset(100000, mpa_id), asset(2100) ); BOOST_REQUIRE( call2_ptr ); - call_order_id_type call2_id = call2_ptr->id; + call_order_id_type call2_id = call2_ptr->get_id(); // publish a new feed so that borrower's debt position is undercollateralized f.settlement_price = price( asset(1000,mpa_id), asset(22) ); @@ -2263,7 +2263,7 @@ BOOST_AUTO_TEST_CASE( no_settlement_update_debt_test ) // borrower3 create debt position right above ICR const call_order_object* call3_ptr = borrow( borrower3, asset(100000, mpa_id), asset(4181) ); BOOST_REQUIRE( call3_ptr ); - call_order_id_type call3_id = call3_ptr->id; + call_order_id_type call3_id = call3_ptr->get_id(); // borrower adjust debt position to right at MSSR // 100000 * (22/1000) * 1.25 = 2750 @@ -2308,19 +2308,19 @@ BOOST_AUTO_TEST_CASE( no_settlement_update_debt_test ) // seller2 sells some, due to MCFR, this order won't be filled in the beginning, but will be filled later const limit_order_object* sell_mid = create_sell_order( seller2, asset(10000,mpa_id), asset(210) ); BOOST_REQUIRE( sell_mid ); - limit_order_id_type sell_mid_id = sell_mid->id; + limit_order_id_type sell_mid_id = sell_mid->get_id(); BOOST_CHECK_EQUAL( sell_mid_id(db).for_sale.value, 10000 ); // seller2 sells more, this order won't be filled in the beginning either const limit_order_object* sell_high = create_sell_order( seller2, asset(10000,mpa_id), asset(275) ); BOOST_REQUIRE( sell_high ); - limit_order_id_type sell_high_id = sell_high->id; + limit_order_id_type sell_high_id = sell_high->get_id(); BOOST_CHECK_EQUAL( sell_high_id(db).for_sale.value, 10000 ); // seller2 sells more, this order won't be filled const limit_order_object* sell_highest = create_sell_order( seller2, asset(10000,mpa_id), asset(285) ); BOOST_REQUIRE( sell_highest ); - limit_order_id_type sell_highest_id = sell_highest->id; + limit_order_id_type sell_highest_id = sell_highest->get_id(); BOOST_CHECK_EQUAL( sell_highest_id(db).for_sale.value, 10000 ); BOOST_CHECK_EQUAL( get_balance( seller2_id, mpa_id ), 20000 ); // 50000 - 10000 - 10000 - 10000 diff --git a/tests/tests/credit_offer_tests.cpp b/tests/tests/credit_offer_tests.cpp index 3df4e4fd10..1c3e0a11aa 100644 --- a/tests/tests/credit_offer_tests.cpp +++ b/tests/tests/credit_offer_tests.cpp @@ -54,14 +54,14 @@ BOOST_AUTO_TEST_CASE( credit_offer_hardfork_time_test ) const asset_object& core = asset_id_type()(db); const asset_object& usd = create_user_issued_asset( "MYUSD" ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( sam, usd.amount(init_amount) ); // Before the hard fork, unable to create a credit offer or transact against a credit offer or a credit deal, // or do any of them with proposals flat_map collateral_map; collateral_map[usd_id] = price( asset(1), asset(1, usd_id) ); - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, db.head_block_time() + fc::days(1), collateral_map, {} ), fc::exception ); @@ -75,8 +75,8 @@ BOOST_AUTO_TEST_CASE( credit_offer_hardfork_time_test ) BOOST_CHECK_THROW( repay_credit_deal( sam_id, tmp_cd_id, core.amount(100), core.amount(100) ), fc::exception ); - credit_offer_create_operation cop = make_credit_offer_create_op( sam_id, core.id, 10000, 100, 3600, 0, false, - db.head_block_time() + fc::days(1), collateral_map, {} ); + credit_offer_create_operation cop = make_credit_offer_create_op( sam_id, core.get_id(), 10000, 100, 3600, 0, + false, db.head_block_time() + fc::days(1), collateral_map, {} ); BOOST_CHECK_THROW( propose( cop ), fc::exception ); credit_offer_delete_operation dop = make_credit_offer_delete_op( sam_id, tmp_co_id ); @@ -130,12 +130,12 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) asset_id_type core_id; const asset_object& usd = create_user_issued_asset( "MYUSD" ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( sam, usd.amount(init_amount) ); issue_uia( ted, usd.amount(init_amount) ); const asset_object& eur = create_user_issued_asset( "MYEUR", sam, white_list ); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); issue_uia( sam, eur.amount(init_amount) ); issue_uia( ted, eur.amount(init_amount) ); // Make a whitelist @@ -177,8 +177,8 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) flat_map collateral_map; collateral_map[usd_id] = price( asset(1), asset(1, usd_id) ); - credit_offer_create_operation cop = make_credit_offer_create_op( sam_id, core.id, 10000, 100, 3600, 0, false, - db.head_block_time() + fc::days(1), collateral_map, {} ); + credit_offer_create_operation cop = make_credit_offer_create_op( sam_id, core.get_id(), 10000, 100, 3600, 0, + false, db.head_block_time() + fc::days(1), collateral_map, {} ); propose( cop ); credit_offer_delete_operation dop = make_credit_offer_delete_op( sam_id, tmp_co_id ); @@ -241,9 +241,9 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) flat_map collateral_map1; collateral_map1[usd_id] = price( asset(1), asset(2, usd_id) ); - const credit_offer_object& coo1 = create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + const credit_offer_object& coo1 = create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, collateral_map1, {} ); - credit_offer_id_type co1_id = coo1.id; + credit_offer_id_type co1_id = coo1.get_id(); BOOST_CHECK( coo1.owner_account == sam_id ); BOOST_CHECK( coo1.asset_type == core.id ); BOOST_CHECK( coo1.total_balance == 10000 ); @@ -274,7 +274,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) const credit_offer_object& coo2 = create_credit_offer( ted_id, usd_id, 1, 10000000u, duration2, 10000, true, disable_time2, collateral_map2, borrower_map2 ); - credit_offer_id_type co2_id = coo2.id; + credit_offer_id_type co2_id = coo2.get_id(); BOOST_CHECK( coo2.owner_account == ted_id ); BOOST_CHECK( coo2.asset_type == usd_id ); BOOST_CHECK( coo2.total_balance == 1 ); @@ -299,7 +299,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) const credit_offer_object& coo3 = create_credit_offer( sam_id, eur_id, 10, 1, 30, 1, false, disable_time3, collateral_map3, {} ); // Account is whitelisted - credit_offer_id_type co3_id = coo3.id; + credit_offer_id_type co3_id = coo3.get_id(); BOOST_CHECK( coo3.owner_account == sam_id ); BOOST_CHECK( coo3.asset_type == eur_id ); BOOST_CHECK( coo3.total_balance == 10 ); @@ -354,7 +354,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) invalid_borrower_map2_3[no_account_id] = 1; // account does not exist // Non-positive balance - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 0, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 0, 100, 3600, 0, false, disable_time1, collateral_map1, {} ), fc::exception ); BOOST_CHECK_THROW( create_credit_offer( ted_id, usd_id, -1, 10000000u, duration2, 10000, true, @@ -373,15 +373,15 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) disable_time2, collateral_map2, borrower_map2 ), fc::exception ); // Negative minimum deal amount - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, -1, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, -1, false, disable_time1, collateral_map1, {} ), fc::exception ); // Too big minimum deal amount - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, GRAPHENE_MAX_SHARE_SUPPLY + 1, false, - disable_time1, collateral_map1, {} ), + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, GRAPHENE_MAX_SHARE_SUPPLY + 1, + false, disable_time1, collateral_map1, {} ), fc::exception ); // Auto-disable time in the past and the offer is enabled - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, true, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, true, disable_time1, collateral_map1, {} ), fc::exception ); // Auto-disable time too late @@ -389,29 +389,29 @@ BOOST_AUTO_TEST_CASE( credit_offer_crud_and_proposal_test ) too_late_disable_time, collateral_map2, borrower_map2 ), fc::exception ); // Empty allowed collateral map - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, empty_collateral_map, {} ), fc::exception ); // Invalid allowed collateral map - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, invalid_collateral_map1_1, {} ), fc::exception ); - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, invalid_collateral_map1_2, {} ), fc::exception ); - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, invalid_collateral_map1_3, {} ), fc::exception ); - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, invalid_collateral_map1_4, {} ), fc::exception ); - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, invalid_collateral_map1_5, {} ), fc::exception ); - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, invalid_collateral_map1_6, {} ), fc::exception ); - BOOST_CHECK_THROW( create_credit_offer( sam_id, core.id, 10000, 100, 3600, 0, false, + BOOST_CHECK_THROW( create_credit_offer( sam_id, core.get_id(), 10000, 100, 3600, 0, false, disable_time1, invalid_collateral_map1_7, {} ), fc::exception ); // Invalid acceptable borrowers map @@ -693,19 +693,19 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) asset_id_type core_id; const asset_object& usd = create_user_issued_asset( "MYUSD", ted, white_list ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( ray, usd.amount(init_amount) ); issue_uia( sam, usd.amount(init_amount) ); issue_uia( ted, usd.amount(init_amount) ); const asset_object& eur = create_user_issued_asset( "MYEUR", sam, white_list ); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); issue_uia( ray, eur.amount(init_amount) ); issue_uia( sam, eur.amount(init_amount) ); issue_uia( ted, eur.amount(init_amount) ); const asset_object& cny = create_user_issued_asset( "MYCNY" ); - asset_id_type cny_id = cny.id; + asset_id_type cny_id = cny.get_id(); issue_uia( ray, cny.amount(init_amount) ); issue_uia( sam, cny.amount(init_amount) ); issue_uia( ted, cny.amount(init_amount) ); @@ -813,9 +813,9 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) collateral_map1[usd_id] = price( asset(1), asset(2, usd_id) ); collateral_map1[eur_id] = price( asset(1), asset(1, eur_id) ); - const credit_offer_object& coo1 = create_credit_offer( sam_id, core.id, 10000, 30000, 3600, 0, false, + const credit_offer_object& coo1 = create_credit_offer( sam_id, core.get_id(), 10000, 30000, 3600, 0, false, disable_time1, collateral_map1, {} ); - credit_offer_id_type co1_id = coo1.id; + credit_offer_id_type co1_id = coo1.get_id(); BOOST_CHECK( co1_id(db).owner_account == sam_id ); BOOST_CHECK( co1_id(db).asset_type == core.id ); BOOST_CHECK( co1_id(db).total_balance == 10000 ); @@ -842,7 +842,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Now able to borrow BOOST_TEST_MESSAGE( "Ray borrows" ); const credit_deal_object& cdo11 = borrow_from_credit_offer( ray_id, co1_id, asset(100), asset(200, usd_id) ); - credit_deal_id_type cd11_id = cdo11.id; + credit_deal_id_type cd11_id = cdo11.get_id(); time_point_sec expected_repay_time11 = db.head_block_time() + fc::seconds(3600); // 60 minutes after init BOOST_CHECK( cd11_id(db).borrower == ray_id ); @@ -916,7 +916,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) BOOST_TEST_MESSAGE( "Ray borrows more" ); const credit_deal_object& cdo12 = borrow_from_credit_offer( ray_id, co1_id, asset(100), asset(200, usd_id), ok_fee_rate, ok_duration ); - credit_deal_id_type cd12_id = cdo12.id; + credit_deal_id_type cd12_id = cdo12.get_id(); time_point_sec expected_repay_time12 = db.head_block_time() + fc::seconds(3600); // 60 minutes after init BOOST_CHECK( cd12_id(db).borrower == ray_id ); @@ -943,7 +943,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Able to borrow the same amount with more collateral BOOST_TEST_MESSAGE( "Ray borrows even more" ); const credit_deal_object& cdo13 = borrow_from_credit_offer( ray_id, co1_id, asset(100), asset(499, usd_id) ); - credit_deal_id_type cd13_id = cdo13.id; + credit_deal_id_type cd13_id = cdo13.get_id(); time_point_sec expected_repay_time13 = db.head_block_time() + fc::seconds(3600); // 65 minutes after init BOOST_CHECK( cd13_id(db).borrower == ray_id ); @@ -998,7 +998,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Ted is now able to borrow with CNY const credit_deal_object& cdo14 = borrow_from_credit_offer( ted_id, co1_id, asset(200), asset(200, cny_id) ); - credit_deal_id_type cd14_id = cdo14.id; + credit_deal_id_type cd14_id = cdo14.get_id(); time_point_sec expected_repay_time14 = db.head_block_time() + fc::seconds(600); // 15 minutes after init BOOST_CHECK( cd14_id(db).borrower == ted_id ); @@ -1027,7 +1027,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Ted is able to borrow less with CNY const credit_deal_object& cdo15 = borrow_from_credit_offer( ted_id, co1_id, asset(50), asset(100, cny_id) ); - credit_deal_id_type cd15_id = cdo15.id; + credit_deal_id_type cd15_id = cdo15.get_id(); time_point_sec expected_repay_time15 = db.head_block_time() + fc::seconds(600); // 15 minutes after init BOOST_CHECK( cd15_id(db).borrower == ted_id ); @@ -1076,7 +1076,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Now Ted is able to borrow 40 CORE with EUR const credit_deal_object& cdo16 = borrow_from_credit_offer( ted_id, co1_id, asset(40), asset(499, eur_id) ); - credit_deal_id_type cd16_id = cdo16.id; + credit_deal_id_type cd16_id = cdo16.get_id(); time_point_sec expected_repay_time16 = db.head_block_time() + fc::seconds(600); // 18 minutes after init BOOST_CHECK( cd16_id(db).borrower == ted_id ); @@ -1135,7 +1135,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) BOOST_REQUIRE( result.updated_objects.valid() ); BOOST_CHECK( result.updated_objects->size() == 2 ); - BOOST_CHECK( *result.updated_objects == flat_set({ co1_id, cd13_id }) ); + BOOST_CHECK( *result.updated_objects == flat_set({ co1_id(db).id, cd13_id(db).id }) ); BOOST_CHECK( !result.removed_objects.valid() ); @@ -1162,7 +1162,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Ted is able to borrow 2 CORE with EUR const credit_deal_object& cdo17 = borrow_from_credit_offer( ted_id, co1_id, asset(2), asset(49, eur_id) ); - credit_deal_id_type cd17_id = cdo17.id; + credit_deal_id_type cd17_id = cdo17.get_id(); time_point_sec expected_repay_time17 = db.head_block_time() + fc::seconds(600); // 22 minutes after init BOOST_CHECK( cd17_id(db).borrower == ted_id ); @@ -1192,7 +1192,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) BOOST_REQUIRE( result.updated_objects.valid() ); BOOST_CHECK( result.updated_objects->size() == 2 ); - BOOST_CHECK( *result.updated_objects == flat_set({ co1_id, cd13_id }) ); + BOOST_CHECK( *result.updated_objects == flat_set({ co1_id(db).id, cd13_id(db).id }) ); BOOST_CHECK( !result.removed_objects.valid() ); @@ -1269,7 +1269,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) collateral_map2[eur_id] = price( asset(10, usd_id), asset(10, eur_id) ); const credit_offer_object& coo2 = create_credit_offer( sam_id, usd_id, 10000, 70000, 1800, 0, true, disable_time2, collateral_map2, {} ); - credit_offer_id_type co2_id = coo2.id; + credit_offer_id_type co2_id = coo2.get_id(); BOOST_CHECK( co2_id(db).owner_account == sam_id ); BOOST_CHECK( co2_id(db).asset_type == usd_id ); BOOST_CHECK( co2_id(db).total_balance == 10000 ); @@ -1287,7 +1287,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Ray borrows from the new credit offer const auto& cdo21 = borrow_from_credit_offer( ray_id, co2_id, asset(1000, usd_id), asset(1200, cny_id) ); - credit_deal_id_type cd21_id = cdo21.id; + credit_deal_id_type cd21_id = cdo21.get_id(); time_point_sec expected_repay_time21 = db.head_block_time() + fc::seconds(1800); // 42 minutes after init BOOST_CHECK( cd21_id(db).borrower == ray_id ); @@ -1317,7 +1317,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) BOOST_REQUIRE( result.updated_objects.valid() ); BOOST_CHECK( result.updated_objects->size() == 2 ); - BOOST_CHECK( *result.updated_objects == flat_set({ co2_id, cd21_id }) ); + BOOST_CHECK( *result.updated_objects == flat_set({ co2_id(db).id, cd21_id(db).id }) ); BOOST_CHECK( !result.removed_objects.valid() ); @@ -1371,7 +1371,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) BOOST_REQUIRE( result.updated_objects.valid() ); BOOST_CHECK( result.updated_objects->size() == 2 ); - BOOST_CHECK( *result.updated_objects == flat_set({ co1_id, cd13_id }) ); + BOOST_CHECK( *result.updated_objects == flat_set({ co1_id(db).id, cd13_id(db).id }) ); BOOST_CHECK( !result.removed_objects.valid() ); @@ -1428,7 +1428,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Ted borrows from the new credit offer const auto& cdo22 = borrow_from_credit_offer( ted_id, co2_id, asset(1000, usd_id), asset(1100, eur_id) ); - credit_deal_id_type cd22_id = cdo22.id; + credit_deal_id_type cd22_id = cdo22.get_id(); time_point_sec expected_repay_time22 = db.head_block_time() + fc::seconds(1800); // 43 minutes after init BOOST_CHECK( cd22_id(db).borrower == ted_id ); @@ -1458,7 +1458,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) BOOST_REQUIRE( result.updated_objects.valid() ); BOOST_CHECK( result.updated_objects->size() == 2 ); - BOOST_CHECK( *result.updated_objects == flat_set({ co2_id, cd22_id }) ); + BOOST_CHECK( *result.updated_objects == flat_set({ co2_id(db).id, cd22_id(db).id }) ); BOOST_CHECK( !result.removed_objects.valid() ); @@ -1572,7 +1572,7 @@ BOOST_AUTO_TEST_CASE( credit_offer_borrow_repay_test ) // Ted borrows more const credit_deal_object& cdo18 = borrow_from_credit_offer( ted_id, co1_id, asset(10), asset(30, eur_id) ); - credit_deal_id_type cd18_id = cdo18.id; + credit_deal_id_type cd18_id = cdo18.get_id(); time_point_sec expected_repay_time18 = db.head_block_time() + fc::seconds(600); // 28 minutes after init BOOST_CHECK( cd18_id(db).borrower == ted_id ); @@ -1790,14 +1790,14 @@ BOOST_AUTO_TEST_CASE( credit_offer_apis_test ) asset_id_type core_id; const asset_object& usd = create_user_issued_asset( "MYUSD" ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( bob, usd.amount(init_amount) ); issue_uia( ray, usd.amount(init_amount) ); issue_uia( sam, usd.amount(init_amount) ); issue_uia( ted, usd.amount(init_amount) ); const asset_object& eur = create_user_issued_asset( "MYEUR", sam, white_list ); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); issue_uia( bob, eur.amount(init_amount) ); issue_uia( ray, eur.amount(init_amount) ); issue_uia( sam, eur.amount(init_amount) ); @@ -1816,27 +1816,27 @@ BOOST_AUTO_TEST_CASE( credit_offer_apis_test ) const credit_offer_object& coo1 = create_credit_offer( sam_id, core_id, 10000, 30000, 3600, 0, true, db.head_block_time() + fc::days(1), collateral_map_core, {} ); - credit_offer_id_type co1_id = coo1.id; + credit_offer_id_type co1_id = coo1.get_id(); const credit_offer_object& coo2 = create_credit_offer( ted_id, usd_id, 10000, 30000, 3600, 0, true, db.head_block_time() + fc::days(1), collateral_map_usd, {} ); - credit_offer_id_type co2_id = coo2.id; + credit_offer_id_type co2_id = coo2.get_id(); const credit_offer_object& coo3 = create_credit_offer( sam_id, eur_id, 10000, 30000, 3600, 0, true, db.head_block_time() + fc::days(1), collateral_map_eur, {} ); - credit_offer_id_type co3_id = coo3.id; + credit_offer_id_type co3_id = coo3.get_id(); const credit_offer_object& coo4 = create_credit_offer( sam_id, eur_id, 10000, 30000, 3600, 0, true, db.head_block_time() + fc::days(1), collateral_map_eur, {} ); - credit_offer_id_type co4_id = coo4.id; + credit_offer_id_type co4_id = coo4.get_id(); const credit_offer_object& coo5 = create_credit_offer( sam_id, usd_id, 10000, 30000, 3600, 0, true, db.head_block_time() + fc::days(1), collateral_map_usd, {} ); - credit_offer_id_type co5_id = coo5.id; + credit_offer_id_type co5_id = coo5.get_id(); const credit_offer_object& coo6 = create_credit_offer( ted_id, usd_id, 10000, 30000, 3600, 0, true, db.head_block_time() + fc::days(1), collateral_map_usd, {} ); - credit_offer_id_type co6_id = coo6.id; + credit_offer_id_type co6_id = coo6.get_id(); generate_block(); @@ -1927,29 +1927,29 @@ BOOST_AUTO_TEST_CASE( credit_offer_apis_test ) // Create credit deals // Offer owner : sam const credit_deal_object& cdo11 = borrow_from_credit_offer( ray_id, co1_id, asset(100), asset(200, usd_id) ); - credit_deal_id_type cd11_id = cdo11.id; + credit_deal_id_type cd11_id = cdo11.get_id(); // Offer owner : sam const credit_deal_object& cdo12 = borrow_from_credit_offer( ray_id, co1_id, asset(150), asset(400, eur_id) ); - credit_deal_id_type cd12_id = cdo12.id; + credit_deal_id_type cd12_id = cdo12.get_id(); // Offer owner : sam const credit_deal_object& cdo13 = borrow_from_credit_offer( bob_id, co1_id, asset(200), asset(600, eur_id) ); - credit_deal_id_type cd13_id = cdo13.id; + credit_deal_id_type cd13_id = cdo13.get_id(); // Offer owner : ted const credit_deal_object& cdo21 = borrow_from_credit_offer( bob_id, co2_id, asset(500, usd_id), asset(500, eur_id) ); - credit_deal_id_type cd21_id = cdo21.id; + credit_deal_id_type cd21_id = cdo21.get_id(); // Offer owner : sam const credit_deal_object& cdo31 = borrow_from_credit_offer( bob_id, co3_id, asset(500, eur_id), asset(5000) ); - credit_deal_id_type cd31_id = cdo31.id; + credit_deal_id_type cd31_id = cdo31.get_id(); // Offer owner : sam const credit_deal_object& cdo51 = borrow_from_credit_offer( ray_id, co5_id, asset(400, usd_id), asset(800, eur_id) ); - credit_deal_id_type cd51_id = cdo51.id; + credit_deal_id_type cd51_id = cdo51.get_id(); generate_block(); diff --git a/tests/tests/custom_authority_tests.cpp b/tests/tests/custom_authority_tests.cpp index fc0867434e..278fc1debd 100644 --- a/tests/tests/custom_authority_tests.cpp +++ b/tests/tests/custom_authority_tests.cpp @@ -729,7 +729,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { PUSH_TX(db, trx); custom_authority_id_type auth_id = - db.get_index_type().indices().get().find(alice_id)->id; + db.get_index_type().indices().get().find(alice_id)->get_id(); ////// // Bob attempts to transfer 99 CORE from Alice's account @@ -915,7 +915,8 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { PUSH_TX(db, trx); custom_authority_id_type ca_bob_transfers_from_alice_to_charlie = - db.get_index_type().indices().get().find(alice_id)->id; + db.get_index_type().indices().get().find(alice_id) + ->get_id(); ////// // Bob attempts to transfer 100 CORE from Alice's account to Charlie @@ -996,8 +997,8 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { auto iter = ca_alice_range.first; custom_authority_id_type *ca_charlie_transfers_from_alice_to_diana = nullptr; while (iter != ca_index.end()) { - custom_authority_id_type ca_id = iter->id; - const custom_authority_object *ca = db.find(ca_id); + custom_authority_id_type ca_id = iter->get_id(); + const custom_authority_object *ca = db.find(ca_id); flat_map ca_authorities = ca->auth.account_auths; BOOST_CHECK_EQUAL(1, ca_authorities.size()); if (ca_authorities.find(charlie.get_id()) != ca_authorities.end()) { @@ -1124,7 +1125,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { generate_blocks(1); const auto& bitusd = *db.get_index_type().indices().get().find("USDBIT"); const auto &core = asset_id_type()(db); - update_feed_producers(bitusd, {feedproducer.id}); + update_feed_producers(bitusd, {feedproducer.get_id()}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1180,7 +1181,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { auto caa = db.get_index_type().indices().get().find(alice.get_id()); - custom_authority_id_type auth_id = caa->id; + custom_authority_id_type auth_id = caa->get_id(); custom_authority_create_operation authorize_limit_order_cancellations; authorize_limit_order_cancellations.account = alice.get_id(); @@ -1364,11 +1365,11 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // assetissuer issues A1, B1, and C1 to alice asset_issue_operation issue_a1_to_alice_op - = issue_amount_to(assetissuer.get_id(), asset(1000, acoin1.id), alice.get_id()); + = issue_amount_to(assetissuer.get_id(), asset(1000, acoin1.get_id()), alice.get_id()); asset_issue_operation issue_b1_to_alice_op - = issue_amount_to(assetissuer.get_id(), asset(2000, bcoin1.id), alice.get_id()); + = issue_amount_to(assetissuer.get_id(), asset(2000, bcoin1.get_id()), alice.get_id()); asset_issue_operation issue_c1_to_alice_op - = issue_amount_to(assetissuer.get_id(), asset(2000, ccoin1.id), alice.get_id()); + = issue_amount_to(assetissuer.get_id(), asset(2000, ccoin1.get_id()), alice.get_id()); trx.clear(); trx.operations = {issue_a1_to_alice_op, issue_b1_to_alice_op, issue_c1_to_alice_op}; sign(trx, assetissuer_private_key); @@ -1414,9 +1415,9 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Define the two set of assets: ACOINs and BCOINs restriction is_acoin_rx = restriction(asset_id_index, FUNC(in), - flat_set{acoin1.id}); + flat_set{acoin1.get_id()}); restriction is_bcoin_rx = restriction(asset_id_index, FUNC(in), - flat_set{bcoin1.id, bcoin2.id, bcoin3.id}); + flat_set{bcoin1.get_id(), bcoin2.get_id(), bcoin3.get_id()}); // Custom Authority 1: Sell ACOINs to buy BCOINs restriction sell_acoin_rx = restriction(amount_to_sell_index, FUNC(attr), vector{is_acoin_rx}); @@ -1773,7 +1774,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { generate_blocks(1); const auto& bitusd = *db.get_index_type().indices().get().find("USDBIT"); const auto &core = asset_id_type()(db); - update_feed_producers(bitusd, {feedproducer.id}); + update_feed_producers(bitusd, {feedproducer.get_id()}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1827,7 +1828,8 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { PUSH_TX(db, trx); custom_authority_id_type auth_id = - db.get_index_type().indices().get().find(feedproducer.id)->id; + db.get_index_type().indices().get().find(feedproducer.get_id()) + ->get_id(); ////// // Bob attempts to publish feed of USDBIT on behalf of feedproducer @@ -1945,7 +1947,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ACTORS((feedproducer)); const auto &bitusd = create_bitasset("USDBIT", feedproducer_id); const auto &core = asset_id_type()(db); - update_feed_producers(bitusd, {feedproducer.id}); + update_feed_producers(bitusd, {feedproducer.get_id()}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -2201,7 +2203,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { generate_blocks(1); const asset_object &alicecoin = *db.get_index_type().indices().get().find("ALICECOIN"); const asset_object &specialcoin = *db.get_index_type().indices().get().find("SPECIALCOIN"); - const asset_id_type alicecoin_id = alicecoin.id; + const asset_id_type alicecoin_id = alicecoin.get_id(); ////// @@ -2304,7 +2306,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to issue the special coin to an allowed account // This should fail because Bob is not authorized to issue SPECIALCOIN to any account ////// - issue_op = issue_amount_to(alice.get_id(), asset(100, specialcoin.id), allowed3.get_id()); + issue_op = issue_amount_to(alice.get_id(), asset(100, specialcoin.get_id()), allowed3.get_id()); trx.clear(); trx.operations = {issue_op}; sign(trx, bob_private_key); @@ -2385,7 +2387,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { generate_blocks(1); const asset_object &alicecoin = *db.get_index_type().indices().get().find("ALICECOIN"); const asset_object &specialcoin = *db.get_index_type().indices().get().find("SPECIALCOIN"); - const asset_id_type alicecoin_id = alicecoin.id; + const asset_id_type alicecoin_id = alicecoin.get_id(); ////// @@ -2494,7 +2496,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to issue the special coin to an allowed account // This should fail because Bob is not authorized to issue SPECIALCOIN to any account ////// - issue_op = issue_amount_to(alice.get_id(), asset(100, specialcoin.id), allowed3.get_id()); + issue_op = issue_amount_to(alice.get_id(), asset(100, specialcoin.get_id()), allowed3.get_id()); trx.clear(); trx.operations = {issue_op}; sign(trx, bob_private_key); @@ -2511,7 +2513,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to issue the UIA to a banned account with the Bob's key // This should fail because Bob is not authorized to issue ALICECOIN to banned account (banned1) ////// - issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.id), banned1.get_id()); + issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.get_id()), banned1.get_id()); trx.clear(); trx.operations = {issue_op}; sign(trx, bob_private_key); @@ -2527,7 +2529,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to issue the UIA to a banned account with the Bob's key // This should fail because Bob is not authorized to issue ALICECOIN to banned account (banned2) ////// - issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.id), banned2.get_id()); + issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.get_id()), banned2.get_id()); trx.clear(); trx.operations = {issue_op}; sign(trx, bob_private_key); @@ -2543,7 +2545,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to issue the UIA to a banned account with the Bob's key // This should fail because Bob is not authorized to issue ALICECOIN to banned account (banned3) ////// - issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.id), banned3.get_id()); + issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.get_id()), banned3.get_id()); trx.clear(); trx.operations = {issue_op}; sign(trx, bob_private_key); @@ -2559,7 +2561,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to issue the UIA to an allowed account // This should succeed because Bob is authorized to issue ALICECOIN to any account ////// - issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.id), allowed3.get_id()); + issue_op = issue_amount_to(alice.get_id(), asset(100, alicecoin.get_id()), allowed3.get_id()); trx.clear(); trx.operations = {issue_op}; sign(trx, bob_private_key); @@ -2633,26 +2635,26 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Initialize: Alice issues her two coins to different accounts ////// asset_issue_operation issue_alice_to_allowed1_op - = issue_amount_to(alice.get_id(), asset(100, alicecoin.id), allowed1.get_id()); + = issue_amount_to(alice.get_id(), asset(100, alicecoin.get_id()), allowed1.get_id()); asset_issue_operation issue_alice_to_allowed2_op - = issue_amount_to(alice.get_id(), asset(200, alicecoin.id), allowed2.get_id()); + = issue_amount_to(alice.get_id(), asset(200, alicecoin.get_id()), allowed2.get_id()); asset_issue_operation issue_alice_to_allowed3_op - = issue_amount_to(alice.get_id(), asset(300, alicecoin.id), allowed3.get_id()); + = issue_amount_to(alice.get_id(), asset(300, alicecoin.get_id()), allowed3.get_id()); asset_issue_operation issue_alice_to_suspicious1_op - = issue_amount_to(alice.get_id(), asset(100, alicecoin.id), suspicious1.get_id()); + = issue_amount_to(alice.get_id(), asset(100, alicecoin.get_id()), suspicious1.get_id()); asset_issue_operation issue_alice_to_suspicious2_op - = issue_amount_to(alice.get_id(), asset(200, alicecoin.id), suspicious2.get_id()); + = issue_amount_to(alice.get_id(), asset(200, alicecoin.get_id()), suspicious2.get_id()); asset_issue_operation issue_special_to_allowed1_op - = issue_amount_to(alice.get_id(), asset(1000, specialcoin.id), allowed1.get_id()); + = issue_amount_to(alice.get_id(), asset(1000, specialcoin.get_id()), allowed1.get_id()); asset_issue_operation issue_special_to_allowed2_op - = issue_amount_to(alice.get_id(), asset(2000, specialcoin.id), allowed2.get_id()); + = issue_amount_to(alice.get_id(), asset(2000, specialcoin.get_id()), allowed2.get_id()); asset_issue_operation issue_special_to_allowed3_op - = issue_amount_to(alice.get_id(), asset(3000, specialcoin.id), allowed3.get_id()); + = issue_amount_to(alice.get_id(), asset(3000, specialcoin.get_id()), allowed3.get_id()); asset_issue_operation issue_special_to_suspicious1_op - = issue_amount_to(alice.get_id(), asset(1000, specialcoin.id), suspicious1.get_id()); + = issue_amount_to(alice.get_id(), asset(1000, specialcoin.get_id()), suspicious1.get_id()); asset_issue_operation issue_special_to_suspicious2_op - = issue_amount_to(alice.get_id(), asset(2000, specialcoin.id), suspicious2.get_id()); + = issue_amount_to(alice.get_id(), asset(2000, specialcoin.get_id()), suspicious2.get_id()); trx.clear(); trx.operations = {issue_alice_to_allowed1_op, issue_alice_to_allowed2_op, issue_alice_to_allowed3_op, issue_alice_to_suspicious1_op, issue_alice_to_suspicious2_op, @@ -2667,7 +2669,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should succeed because Alice is the issuer ////// override_transfer_operation override_op - = create_override(alice.get_id(), allowed1.get_id(), asset(20, alicecoin.id), arbitrator.get_id()); + = create_override(alice.get_id(), allowed1.get_id(), asset(20, alicecoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, alice_private_key); @@ -2676,7 +2678,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { BOOST_CHECK_EQUAL(allowed1_balance_alicecoin_after_override1, 80); override_op - = create_override(alice.get_id(), suspicious1.get_id(), asset(20, alicecoin.id), arbitrator.get_id()); + = create_override(alice.get_id(), suspicious1.get_id(), asset(20, alicecoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, alice_private_key); @@ -2686,21 +2688,21 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { BOOST_CHECK_EQUAL(suspicious1_balance_alicecoin_after_override1, 80); override_op - = create_override(alice.get_id(), allowed1.get_id(), asset(200, specialcoin.id), arbitrator.get_id()); + = create_override(alice.get_id(), allowed1.get_id(), asset(200, specialcoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, alice_private_key); PUSH_TX(db, trx); - int64_t allowed1_balance_specialcoin_after_override1 = get_balance(allowed1.get_id(), specialcoin.id); + int64_t allowed1_balance_specialcoin_after_override1 = get_balance(allowed1.get_id(), specialcoin.get_id()); BOOST_CHECK_EQUAL(allowed1_balance_specialcoin_after_override1, 800); override_op - = create_override(alice.get_id(), suspicious1.get_id(), asset(200, specialcoin.id), arbitrator.get_id()); + = create_override(alice.get_id(), suspicious1.get_id(), asset(200, specialcoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, alice_private_key); PUSH_TX(db, trx); - int64_t suspicious1_balance_specialcoin_after_override1 = get_balance(suspicious1.get_id(), specialcoin.id); + int64_t suspicious1_balance_specialcoin_after_override1 = get_balance(suspicious1.get_id(), specialcoin.get_id()); BOOST_CHECK_EQUAL(suspicious1_balance_specialcoin_after_override1, 800); @@ -2708,7 +2710,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to override some ALICECOIN and SPECIAL from some accounts // This should fail because Bob is not authorized to override any ALICECOIN nor SPECIALCOIN ////// - override_op = create_override(alice.get_id(), allowed1.get_id(), asset(25, alicecoin.id), arbitrator.get_id()); + override_op = create_override(alice.get_id(), allowed1.get_id(), asset(25, alicecoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, bob_private_key); @@ -2717,7 +2719,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // "rejected_custom_auths":[] EXPECT_EXCEPTION_STRING("\"rejected_custom_auths\":[]", [&] {PUSH_TX(db, trx);}); - override_op = create_override(alice.get_id(), allowed1.get_id(), asset(25, specialcoin.id), arbitrator.get_id()); + override_op = create_override(alice.get_id(), allowed1.get_id(), asset(25, specialcoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, bob_private_key); @@ -2798,7 +2800,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to override transfer some ALICECOIN from a suspicious account // This should succeed because Bob is now authorized to override ALICECOIN from some accounts ////// - override_op = create_override(alice.get_id(), suspicious1.get_id(), asset(25, alicecoin.id), arbitrator.get_id()); + override_op = create_override(alice.get_id(), suspicious1.get_id(), asset(25, alicecoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, bob_private_key); @@ -2812,7 +2814,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to override transfer some SPECIALCOIN from a suspicious account // This should fail because Bob is not authorized to override SPECIALCOIN from any accounts ////// - override_op = create_override(alice.get_id(), suspicious1.get_id(), asset(250, specialcoin.id), arbitrator.get_id()); + override_op = create_override(alice.get_id(), suspicious1.get_id(), asset(250, specialcoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, bob_private_key); @@ -2829,7 +2831,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to override transfer some SPECIALCOIN from an allowed account // This should fail because Bob is not authorized to override SPECIALCOIN from any accounts ////// - override_op = create_override(alice.get_id(), allowed3.get_id(), asset(250, specialcoin.id), arbitrator.get_id()); + override_op = create_override(alice.get_id(), allowed3.get_id(), asset(250, specialcoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, bob_private_key); @@ -2846,7 +2848,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Bob attempts to override transfer some ALICECOIN from an allowed account // This should fail because Bob is only authorized to override ALICECOIN from suspicious accounts ////// - override_op = create_override(alice.get_id(), allowed2.get_id(), asset(20, alicecoin.id), arbitrator.get_id()); + override_op = create_override(alice.get_id(), allowed2.get_id(), asset(20, alicecoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, bob_private_key); @@ -2868,7 +2870,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Alice attempts to override transfer of SPECIAL COIN from an allowed account // This should succeed because Alice has not revoked her own authorities as issuer ////// - override_op = create_override(alice.get_id(), allowed3.get_id(), asset(500, specialcoin.id), arbitrator.get_id()); + override_op = create_override(alice.get_id(), allowed3.get_id(), asset(500, specialcoin.get_id()), arbitrator.get_id()); trx.clear(); trx.operations = {override_op}; sign(trx, alice_private_key); @@ -2911,12 +2913,12 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// // Define core asset const auto &core = asset_id_type()(db); - asset_id_type core_id = core.id; + asset_id_type core_id = core.get_id(); // Create a smart asset const asset_object &bitusd = create_bitasset("USDBIT", feedproducer_id); - asset_id_type usd_id = bitusd.id; - update_feed_producers(bitusd, {feedproducer.id}); + asset_id_type usd_id = bitusd.get_id(); + update_feed_producers(bitusd, {feedproducer.get_id()}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; current_feed.maximum_short_squeeze_ratio = 1100; @@ -3650,7 +3652,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// flat_set new_producers = {trusted1.get_id(), trusted2.get_id()}; asset_update_feed_producers_operation producers_op - = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, alice_private_key); @@ -3668,7 +3670,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should fail because Bob is not authorized to update feed producers for ALICECOIN ////// new_producers = {trusted3.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3739,7 +3741,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // when an untrusted account is included ////// new_producers = {trusted4.get_id(), untrusted1.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3757,7 +3759,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // when an untrusted account is included ////// new_producers = {trusted4.get_id(), untrusted1.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3775,7 +3777,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // when an untrusted account is included ////// new_producers = {untrusted2.get_id(), untrusted3.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3847,7 +3849,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// flat_set new_producers = {trusted1.get_id(), trusted2.get_id(), trusted3.get_id()}; asset_update_feed_producers_operation producers_op - = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, alice_private_key); @@ -3866,7 +3868,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should fail because Bob is not authorized to update feed producers for ALICECOIN ////// new_producers = {trusted1.get_id(), trusted2.get_id(), trusted3.get_id(), unknown1.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3936,7 +3938,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should fail not all of the required feed producers are included ////// new_producers = {unknown2.get_id(), unknown3.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3954,7 +3956,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should fail not all of the required feed producers are included ////// new_producers = {trusted1.get_id(), unknown2.get_id(), unknown3.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3972,7 +3974,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should fail not all of the required feed producers are included ////// new_producers = {trusted1.get_id(), unknown2.get_id(), unknown3.get_id(), trusted2.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -3991,7 +3993,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // and because the all of the required feed producers are included ////// new_producers = {trusted1.get_id(), unknown2.get_id(), unknown3.get_id(), trusted2.get_id(), trusted3.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -4004,7 +4006,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // and because the all of the required feed producers are included ////// new_producers = {trusted3.get_id(), trusted2.get_id(), trusted1.get_id()}; - producers_op = create_producers_op(alice.get_id(), alicecoin.id, new_producers); + producers_op = create_producers_op(alice.get_id(), alicecoin.get_id(), new_producers); trx.clear(); trx.operations = {producers_op}; sign(trx, bob_private_key); @@ -4337,7 +4339,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// generate_blocks(1); graphene::chain::htlc_id_type alice_htlc_id = - db.get_index_type().indices().get().find(alice.get_id())->id; + db.get_index_type().indices().get().find(alice.get_id())->get_id(); ////// @@ -4399,7 +4401,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// generate_blocks(1); auto caa = db.get_index_type().indices().get().find(gateway.get_id()); - custom_authority_id_type caa_id = caa->id; + custom_authority_id_type caa_id = caa->get_id(); ////// @@ -4535,7 +4537,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// generate_blocks(1); graphene::chain::htlc_id_type alice_htlc_id = - db.get_index_type().indices().get().find(alice.get_id())->id; + db.get_index_type().indices().get().find(alice.get_id())->get_id(); ////// @@ -4922,8 +4924,8 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// generate_blocks(1); set_expiration(db, trx); - vesting_balance_id_type vesting_balance_id = - db.get_index_type().indices().get().find(alice.get_id())->id; + vesting_balance_id_type vesting_balance_id { + db.get_index_type().indices().get().find(alice.get_id())->id }; ////// @@ -5061,17 +5063,17 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// // Define core asset const auto &core = asset_id_type()(db); - asset_id_type core_id = core.id; + asset_id_type core_id = core.get_id(); // Create a smart asset create_bitasset("USDBIT", feedproducer_id); generate_blocks(1); const asset_object &bitusd = *db.get_index_type().indices().get().find("USDBIT"); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); // Configure the smart asset - update_feed_producers(bitusd, {feedproducer.id}); + update_feed_producers(bitusd, {feedproducer.get_id()}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; current_feed.maximum_short_squeeze_ratio = 1100; @@ -5308,9 +5310,9 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Initialize: assetissuer issues SPECIALCOIN to different accounts ////// asset_issue_operation issue_special_to_alice_op - = issue_amount_to(assetissuer.get_id(), asset(1000, specialcoin.id), alice.get_id()); + = issue_amount_to(assetissuer.get_id(), asset(1000, specialcoin.get_id()), alice.get_id()); asset_issue_operation issue_special_to_charlie_op - = issue_amount_to(assetissuer.get_id(), asset(2000, specialcoin.id), charlie.get_id()); + = issue_amount_to(assetissuer.get_id(), asset(2000, specialcoin.get_id()), charlie.get_id()); trx.clear(); trx.operations = {issue_special_to_alice_op, issue_special_to_charlie_op}; sign(trx, assetissuer_private_key); @@ -5320,24 +5322,24 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// // Alice reserves some SPECIALCOIN from her account ////// - asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.id)); + asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.get_id())); trx.clear(); trx.operations = {reserve_op}; sign(trx, alice_private_key); PUSH_TX(db, trx); - int64_t allowed1_balance_specialcoin_after_override1 = get_balance(alice.get_id(), specialcoin.id); + int64_t allowed1_balance_specialcoin_after_override1 = get_balance(alice.get_id(), specialcoin.get_id()); BOOST_CHECK_EQUAL(allowed1_balance_specialcoin_after_override1, 800); ////// // Charlie reserves some SPECIALCOIN from his account ////// - reserve_op = reserve_asset(charlie.get_id(), asset(200, specialcoin.id)); + reserve_op = reserve_asset(charlie.get_id(), asset(200, specialcoin.get_id())); trx.clear(); trx.operations = {reserve_op}; sign(trx, charlie_private_key); PUSH_TX(db, trx); - int64_t charlie_balance_specialcoin_after_override1 = get_balance(charlie.get_id(), specialcoin.id); + int64_t charlie_balance_specialcoin_after_override1 = get_balance(charlie.get_id(), specialcoin.get_id()); BOOST_CHECK_EQUAL(charlie_balance_specialcoin_after_override1, 1800); @@ -5425,7 +5427,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { ////// generate_blocks(1); const proposal_object& prop = *db.get_index_type().indices().begin(); - proposal_id_type proposal_id = prop.id; + proposal_id_type proposal_id = prop.get_id(); // Alice approves the proposal proposal_update_operation approve_proposal; @@ -5450,7 +5452,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This attempt should fail because Bob the Alice authorization is not yet active ////// { - asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.id)); + asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.get_id())); trx.clear(); trx.operations = {reserve_op}; sign(trx, bob_private_key); @@ -5473,7 +5475,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should succeed because the authorization is active ////// { - asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.id)); + asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.get_id())); trx.clear(); trx.operations = {reserve_op}; sign(trx, bob_private_key); @@ -5493,7 +5495,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should succeed because the authorization is active ////// { - asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.id)); + asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.get_id())); trx.clear(); trx.operations = {reserve_op}; sign(trx, bob_private_key); @@ -5513,7 +5515,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // This should fail because Bob the authorization has expired ////// { - asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.id)); + asset_reserve_operation reserve_op = reserve_asset(alice.get_id(), asset(200, specialcoin.get_id())); trx.clear(); trx.operations = {reserve_op}; sign(trx, bob_private_key); @@ -5789,7 +5791,7 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { auto itr_witnesses = witnesses.begin(); witness_id_type witness0_id = itr_witnesses[0]; const auto& idx = db.get_index_type().indices().get(); - witness_object witness0_obj = *idx.find(witness0_id); + witness_object witness0_obj = *idx.find(object_id_type(witness0_id)); ////// @@ -6132,13 +6134,13 @@ BOOST_AUTO_TEST_CASE(custom_auths) { try { // Create a new witness account (witness0) ACTORS((witness0)); // Upgrade witness account to LTM - upgrade_to_lifetime_member(witness0.id); + upgrade_to_lifetime_member(witness0.get_id()); generate_block(); // Create the witnesses // Get the witness0 identifier after a block has been generated // to be sure of using the most up-to-date identifier for the account - const account_id_type witness0_identifier = get_account("witness0").id; + const account_id_type witness0_identifier = get_account("witness0").get_id(); create_witness(witness0_identifier, witness0_private_key); generate_block(); diff --git a/tests/tests/custom_operations.cpp b/tests/tests/custom_operations.cpp index 37dfacc18d..e1a837cc59 100644 --- a/tests/tests/custom_operations.cpp +++ b/tests/tests/custom_operations.cpp @@ -316,7 +316,7 @@ try { BOOST_CHECK_EQUAL(storage_results[1].key, "language"); BOOST_CHECK_EQUAL(storage_results[1].value->as_string(), "en"); - account_storage_id_type storage_id = storage_results[1].id; + account_storage_id_type storage_id { storage_results[1].id }; storage_results = custom_operations_api.get_storage_info({}, "settings", {}, 2, storage_id); BOOST_REQUIRE_EQUAL(storage_results.size(), 2U ); diff --git a/tests/tests/database_api_tests.cpp b/tests/tests/database_api_tests.cpp index fe2138221d..95f7a88b62 100644 --- a/tests/tests/database_api_tests.cpp +++ b/tests/tests/database_api_tests.cpp @@ -148,9 +148,9 @@ BOOST_AUTO_TEST_CASE( get_signatures_non_immediate_owner ) const account_object& nathan = create_account("nathan", nathan_key1.get_public_key() ); const account_object& ashley = create_account("ashley", ashley_key1.get_public_key() ); const account_object& oliver = create_account("oliver", oliver_key1.get_public_key() ); - account_id_type nathan_id = nathan.id; - account_id_type ashley_id = ashley.id; - account_id_type oliver_id = oliver.id; + account_id_type nathan_id = nathan.get_id(); + account_id_type ashley_id = ashley.get_id(); + account_id_type oliver_id = oliver.get_id(); try { account_update_operation op; @@ -759,8 +759,8 @@ BOOST_AUTO_TEST_CASE( get_required_signatures_partially_signed_or_not ) } BOOST_AUTO_TEST_CASE( subscription_key_collision_test ) -{ - object_id_type uia_object_id = create_user_issued_asset( "UIATEST" ).get_id(); +{ try { + object_id_type uia_object_id = create_user_issued_asset( "UIATEST" ).id; uint32_t objects_changed = 0; auto callback = [&]( const variant& v ) @@ -773,7 +773,7 @@ BOOST_AUTO_TEST_CASE( subscription_key_collision_test ) // subscribe to an account which has same instance ID as UIATEST vector collision_ids; - collision_ids.push_back( string( object_id_type( account_id_type( uia_object_id ) ) ) ); + collision_ids.push_back( string( object_id_type( account_id_type( uia_object_id.instance() ) ) ) ); db_api.get_accounts( collision_ids ); generate_block(); @@ -789,7 +789,7 @@ BOOST_AUTO_TEST_CASE( subscription_key_collision_test ) fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread BOOST_CHECK_EQUAL( objects_changed, 0 ); // UIATEST did not change in this block, so no notification -} +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } BOOST_AUTO_TEST_CASE( subscription_notification_test ) { @@ -901,7 +901,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) #undef SUB_NOTIF_TEST_START_ID_DISABLE_AUTO_SUB vector account_ids; - account_ids.push_back( alice_id ); + account_ids.push_back( alice.id ); db_api1.get_objects( account_ids ); // db_api1 subscribe to Alice db_api11.get_objects( account_ids, true ); // db_api11 subscribe to Alice db_api21.get_objects( account_ids, false ); // db_api21 doesn't subscribe to Alice @@ -1105,7 +1105,7 @@ BOOST_AUTO_TEST_CASE( get_all_workers ) vector results; const auto& worker1 = create_worker( connie_id, 1000, fc::days(10) ); - worker_id_type worker1_id = worker1.id; + worker_id_type worker1_id { worker1.id }; BOOST_REQUIRE_EQUAL( db_api.get_all_workers().size(), 1 ); BOOST_REQUIRE_EQUAL( db_api.get_all_workers(true).size(), 0 ); @@ -1123,7 +1123,7 @@ BOOST_AUTO_TEST_CASE( get_all_workers ) BOOST_CHECK( db_api.get_all_workers(true).front().id == worker1_id ); const auto& worker2 = create_worker( whitney_id, 1000, fc::days(50) ); - worker_id_type worker2_id = worker2.id; + worker_id_type worker2_id { worker2.id }; BOOST_REQUIRE_EQUAL( db_api.get_all_workers().size(), 2 ); BOOST_REQUIRE_EQUAL( db_api.get_all_workers(true).size(), 1 ); @@ -1134,7 +1134,7 @@ BOOST_AUTO_TEST_CASE( get_all_workers ) BOOST_CHECK( db_api.get_all_workers(false).front().id == worker2_id ); const auto& worker3 = create_worker( wolverine_id, 1000, fc::days(100) ); - worker_id_type worker3_id = worker3.id; + worker_id_type worker3_id { worker3.id }; BOOST_REQUIRE_EQUAL( db_api.get_all_workers().size(), 3 ); BOOST_REQUIRE_EQUAL( db_api.get_all_workers(true).size(), 1 ); @@ -1185,13 +1185,13 @@ BOOST_AUTO_TEST_CASE( get_workers_by_account ) vector results; const auto& worker1 = create_worker( connie_id ); - worker_id_type worker1_id = worker1.id; + worker_id_type worker1_id { worker1.id }; const auto& worker2 = create_worker( whitney_id, 1000, fc::days(50) ); - worker_id_type worker2_id = worker2.id; + worker_id_type worker2_id { worker2.id }; const auto& worker3 = create_worker( whitney_id, 1000, fc::days(100) ); - worker_id_type worker3_id = worker3.id; + worker_id_type worker3_id { worker3.id }; BOOST_REQUIRE_EQUAL( db_api.get_workers_by_account("connie").size(), 1 ); BOOST_CHECK( db_api.get_workers_by_account("connie").front().id == worker1_id ); @@ -1290,7 +1290,7 @@ BOOST_AUTO_TEST_CASE(get_limit_orders_by_account) o = results.back(); // Get the No. 101-201 orders - results = db_api.get_limit_orders_by_account( seller.name, {}, o.id ); + results = db_api.get_limit_orders_by_account( seller.name, {}, o.get_id() ); BOOST_CHECK_EQUAL( results.size(), 101 ); for (size_t i = 0 ; i < results.size() - 1 ; ++i) { @@ -1301,7 +1301,7 @@ BOOST_AUTO_TEST_CASE(get_limit_orders_by_account) o = results.back(); // Get the No. 201- orders - results = db_api.get_limit_orders_by_account( seller.name, {}, o.id ); + results = db_api.get_limit_orders_by_account( seller.name, {}, o.get_id() ); BOOST_CHECK_EQUAL( results.size(), 50 ); for (size_t i = 0 ; i < results.size() - 1 ; ++i) { @@ -1311,7 +1311,7 @@ BOOST_AUTO_TEST_CASE(get_limit_orders_by_account) BOOST_CHECK(results.back().sell_price == price(core.amount(100), bitcny.amount(150))); // Get the No. 201-210 orders - results2 = db_api.get_limit_orders_by_account( seller.name, 10, o.id ); + results2 = db_api.get_limit_orders_by_account( seller.name, 10, o.get_id() ); BOOST_CHECK_EQUAL( results2.size(), 10 ); for (size_t i = 0 ; i < results2.size() - 1 ; ++i) { @@ -1322,12 +1322,12 @@ BOOST_AUTO_TEST_CASE(get_limit_orders_by_account) BOOST_CHECK(results2.back().sell_price == price(core.amount(100), bitcny.amount(170))); // Buyer has 70 orders, all IDs are greater than sellers - results = db_api.get_limit_orders_by_account( buyer.name, 90, o.id ); + results = db_api.get_limit_orders_by_account( buyer.name, 90, o.get_id() ); BOOST_CHECK_EQUAL( results.size(), 70 ); o = results.back(); // All seller's order IDs are smaller, so querying with a buyer's ID will get nothing - results = db_api.get_limit_orders_by_account( seller.name, 90, o.id ); + results = db_api.get_limit_orders_by_account( seller.name, 90, o.get_id() ); BOOST_CHECK_EQUAL( results.size(), 0 ); // Watcher has no order @@ -1692,7 +1692,7 @@ BOOST_AUTO_TEST_CASE( verify_authority_multiple_accounts ) try { account_update_operation op; op.account = nathan.id; - op.active = authority(3, nathan_public_key, 1, alice.id, 1, bob.id, 1); + op.active = authority(3, nathan_public_key, 1, alice.get_id(), 1, bob.get_id(), 1); op.owner = *op.active; trx.operations.push_back(op); sign(trx, nathan_private_key); @@ -1764,8 +1764,8 @@ BOOST_AUTO_TEST_CASE( get_call_orders_by_account ) { int64_t init_balance(1000000); transfer(committee_account, caller_id, asset(init_balance)); - update_feed_producers(usd, {feedproducer.id}); - update_feed_producers(cny, {feedproducer.id}); + update_feed_producers(usd, {feedproducer.get_id()}); + update_feed_producers(cny, {feedproducer.get_id()}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1803,13 +1803,13 @@ BOOST_AUTO_TEST_CASE( get_settle_orders_by_account ) { const auto &usd = create_bitasset("USD", creator_id); const auto &core = asset_id_type()(db); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); int64_t init_balance(1000000); transfer(committee_account, settler_id, asset(init_balance)); transfer(committee_account, caller_id, asset(init_balance)); - update_feed_producers(usd, {feedproducer.id}); + update_feed_producers(usd, {feedproducer.get_id()}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1820,7 +1820,7 @@ BOOST_AUTO_TEST_CASE( get_settle_orders_by_account ) { borrow(caller, usd.amount(1000), asset(15000)); generate_block(); - transfer(caller.id, settler.id, asset(200, usd_id)); + transfer(caller.get_id(), settler.get_id(), asset(200, usd_id)); auto result = force_settle( settler, usd_id(db).amount(4)); generate_block(); @@ -1854,11 +1854,11 @@ BOOST_AUTO_TEST_CASE( asset_in_collateral ) BOOST_CHECK_EQUAL( 0, oassets[0]->total_in_collateral->value ); BOOST_CHECK( !oassets[0]->total_backing_collateral.valid() ); - asset_id_type bitusd_id = create_bitasset( "USDBIT", nathan_id, 100, charge_market_fee ).id; + asset_id_type bitusd_id = create_bitasset( "USDBIT", nathan_id, 100, charge_market_fee ).get_id(); update_feed_producers( bitusd_id, { nathan_id } ); - asset_id_type bitdan_id = create_bitasset( "DANBIT", dan_id, 100, charge_market_fee ).id; + asset_id_type bitdan_id = create_bitasset( "DANBIT", dan_id, 100, charge_market_fee ).get_id(); update_feed_producers( bitdan_id, { nathan_id } ); - asset_id_type btc_id = create_bitasset( "BTC", nathan_id, 100, charge_market_fee, 8, bitusd_id ).id; + asset_id_type btc_id = create_bitasset( "BTC", nathan_id, 100, charge_market_fee, 8, bitusd_id ).get_id(); update_feed_producers( btc_id, { nathan_id } ); oassets = db_api.get_assets( { GRAPHENE_SYMBOL, "USDBIT", "DANBIT", "BTC" } ); @@ -1998,9 +1998,9 @@ BOOST_AUTO_TEST_CASE( get_trade_history ) ACTORS((bob)(alice)); const auto& eur = create_user_issued_asset("EUR"); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); const auto& usd = create_user_issued_asset("USD"); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( bob_id, usd.amount(1000000) ); issue_uia( alice_id, eur.amount(1000000) ); diff --git a/tests/tests/database_tests.cpp b/tests/tests/database_tests.cpp index dd072fc1d5..955abc095a 100644 --- a/tests/tests/database_tests.cpp +++ b/tests/tests/database_tests.cpp @@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE(failed_modify_test) const auto& obj = db.create([](account_balance_object& obj) { obj.owner = account_id_type(123); }); - account_balance_id_type obj_id = obj.id; + account_balance_id_type obj_id { obj.id }; BOOST_CHECK_EQUAL(obj.owner.instance.value, 123u); // Modify dummy object, check that changes stick @@ -86,15 +86,15 @@ BOOST_AUTO_TEST_CASE(failed_modify_test) BOOST_CHECK_THROW(db.modify(obj, [](account_balance_object& obj) { throw 5; }), int); - BOOST_CHECK(db.find_object(obj_id)); + BOOST_CHECK(db.find(obj_id)); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE( flat_index_test ) { try { ACTORS((sam)); - const auto& bitusd = create_bitasset("USDBIT", sam.id); - const asset_id_type bitusd_id = bitusd.id; - update_feed_producers(bitusd, {sam.id}); + const auto& bitusd = create_bitasset("USDBIT", sam.get_id()); + const asset_id_type bitusd_id = bitusd.get_id(); + update_feed_producers(bitusd, {sam.get_id()}); price_feed current_feed; current_feed.settlement_price = bitusd.amount(100) / asset(100); publish_feed(bitusd, sam, current_feed); @@ -113,7 +113,7 @@ BOOST_AUTO_TEST_CASE( flat_index_test ) } // force maintenance - const auto& dynamic_global_props = db.get(dynamic_global_property_id_type()); + const auto& dynamic_global_props = db.get(dynamic_global_property_id_type()); generate_blocks(dynamic_global_props.next_maintenance_time, true); BOOST_CHECK( !(*bitusd_id(db).bitasset_data_id)(db).current_feed.settlement_price.is_null() ); @@ -154,7 +154,7 @@ BOOST_AUTO_TEST_CASE( direct_index_test ) BOOST_CHECK_THROW( direct.get( account_id_type( 1 ) ), fc::assert_exception ); account_object test_account; - test_account.id = account_id_type(1); + test_account.id = object_id_type( account_id_type(1) ); test_account.name = "account1"; my_accounts.load( fc::raw::pack( test_account ) ); @@ -166,7 +166,7 @@ BOOST_AUTO_TEST_CASE( direct_index_test ) BOOST_CHECK_EQUAL( test_account.name, direct.get( test_account.id ).name ); // The following assumes that MAX_HOLE = 100 - test_account.id = account_id_type(102); + test_account.id = object_id_type( account_id_type(102) ); test_account.name = "account102"; // highest insert was 1, direct.next is 2 => 102 is highest allowed instance my_accounts.load( fc::raw::pack( test_account ) ); @@ -179,7 +179,7 @@ BOOST_AUTO_TEST_CASE( direct_index_test ) acct.name = "account0"; } ); - test_account.id = account_id_type(50); + test_account.id = object_id_type( account_id_type(50) ); test_account.name = "account50"; my_accounts.load( fc::raw::pack( test_account ) ); @@ -194,7 +194,7 @@ BOOST_AUTO_TEST_CASE( direct_index_test ) }); // direct.next is still 103, so 204 is not allowed - test_account.id = account_id_type(204); + test_account.id = object_id_type( account_id_type(204) ); test_account.name = "account204"; GRAPHENE_REQUIRE_THROW( my_accounts.load( fc::raw::pack( test_account ) ), fc::assert_exception ); // This is actually undefined behaviour. The object has been inserted into @@ -204,7 +204,8 @@ BOOST_AUTO_TEST_CASE( direct_index_test ) uint32_t count = 0; for( uint32_t i = 0; i < 250; i++ ) { - const account_object* aptr = dynamic_cast< const account_object* >( my_accounts.find( account_id_type( i ) ) ); + const account_object* aptr = dynamic_cast< const account_object* >( + my_accounts.find( object_id_type( account_id_type( i ) ) ) ); if( aptr ) { count++; @@ -217,7 +218,7 @@ BOOST_AUTO_TEST_CASE( direct_index_test ) BOOST_CHECK_EQUAL( count, my_accounts.indices().size() - 1 ); GRAPHENE_REQUIRE_THROW( my_accounts.modify( direct.get( account_id_type( 1 ) ), [] ( object& acct ) { - acct.id = account_id_type(2); + acct.id = object_id_type( account_id_type(2) ); }), fc::assert_exception ); // This is actually undefined behaviour. The object has been modified, but // but the secondary has not updated its representation diff --git a/tests/tests/fee_tests.cpp b/tests/tests/fee_tests.cpp index 2f26c2b151..f8d13323c7 100644 --- a/tests/tests/fee_tests.cpp +++ b/tests/tests/fee_tests.cpp @@ -99,8 +99,10 @@ BOOST_AUTO_TEST_CASE(asset_claim_fees_test) transfer( committee_account, izzy_id, _core(1000000) ); transfer( committee_account, jill_id, _core(1000000) ); - asset_id_type izzycoin_id = create_bitasset( "IZZYCOIN", izzy_id, GRAPHENE_1_PERCENT, charge_market_fee ).id; - asset_id_type jillcoin_id = create_bitasset( "JILLCOIN", jill_id, 2*GRAPHENE_1_PERCENT, charge_market_fee ).id; + asset_id_type izzycoin_id = create_bitasset( "IZZYCOIN", izzy_id, GRAPHENE_1_PERCENT, charge_market_fee ) + .get_id(); + asset_id_type jillcoin_id = create_bitasset( "JILLCOIN", jill_id, 2*GRAPHENE_1_PERCENT, charge_market_fee ) + .get_id(); const share_type izzy_prec = asset::scaled_precision( asset_id_type(izzycoin_id)(db).precision ); const share_type jill_prec = asset::scaled_precision( asset_id_type(jillcoin_id)(db).precision ); @@ -219,9 +221,9 @@ BOOST_AUTO_TEST_CASE(asset_claim_pool_test) const asset_object& alicecoin = create_user_issued_asset( "ALICECOIN", alice, 0 ); const asset_object& aliceusd = create_user_issued_asset( "ALICEUSD", alice, 0 ); - asset_id_type alicecoin_id = alicecoin.id; - asset_id_type aliceusd_id = aliceusd.id; - asset_id_type bobcoin_id = create_user_issued_asset( "BOBCOIN", bob, 0).id; + asset_id_type alicecoin_id = alicecoin.get_id(); + asset_id_type aliceusd_id = aliceusd.get_id(); + asset_id_type bobcoin_id = create_user_issued_asset( "BOBCOIN", bob, 0).get_id(); // prepare users' balance issue_uia( alice, aliceusd.amount( 20000000 ) ); @@ -726,7 +728,7 @@ BOOST_AUTO_TEST_CASE( fee_refund_test ) transfer( account_id_type(), bob_id, asset(bob_b0) ); asset_id_type core_id = asset_id_type(); - asset_id_type usd_id = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee ).id; + asset_id_type usd_id = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee ).get_id(); issue_uia( alice_id, asset( alice_b0, usd_id ) ); issue_uia( bob_id, asset( bob_b0, usd_id ) ); @@ -785,8 +787,8 @@ BOOST_AUTO_TEST_CASE( fee_refund_test ) // Check non-overlapping - limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id) )->id; - limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000) )->id; + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id) )->get_id(); + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000) )->get_id(); BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_b0 - 1000 - order_create_fee ); BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_b0 ); @@ -861,7 +863,7 @@ BOOST_AUTO_TEST_CASE( non_core_fee_refund_test ) asset_id_type core_id = asset_id_type(); const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee ); - asset_id_type usd_id = usd_obj.id; + asset_id_type usd_id = usd_obj.get_id(); issue_uia( alice_id, asset( alice_b0, usd_id ) ); issue_uia( bob_id, asset( bob_b0, usd_id ) ); @@ -951,8 +953,9 @@ BOOST_AUTO_TEST_CASE( non_core_fee_refund_test ) // Check non-overlapping // Alice creates order // Bob creates order which doesn't match - limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id) )->id; - limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000), exp, cer )->id; + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id) )->get_id(); + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000), exp, cer ) + ->get_id(); alice_bc -= order_create_fee; alice_bc -= 1000; @@ -1025,10 +1028,10 @@ BOOST_AUTO_TEST_CASE( non_core_fee_refund_test ) // Check partial fill const limit_order_object* ao2 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), exp, cer ); - const limit_order_id_type ao2id = ao2->id; + const limit_order_id_type ao2id = ao2->get_id(); const limit_order_object* bo2 = create_sell_order( bob_id, asset(100, usd_id), asset(500) ); - BOOST_CHECK( db.find( ao2id ) != nullptr ); + BOOST_CHECK( db.find( ao2id ) != nullptr ); BOOST_CHECK( bo2 == nullptr ); // data after order created @@ -1098,11 +1101,11 @@ BOOST_AUTO_TEST_CASE( non_core_fee_refund_test ) const limit_order_object* ao34 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); const limit_order_object* ao35 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); - const limit_order_id_type ao31id = ao31->id; - const limit_order_id_type ao32id = ao32->id; - const limit_order_id_type ao33id = ao33->id; - const limit_order_id_type ao34id = ao34->id; - const limit_order_id_type ao35id = ao35->id; + const limit_order_id_type ao31id = ao31->get_id(); + const limit_order_id_type ao32id = ao32->get_id(); + const limit_order_id_type ao33id = ao33->get_id(); + const limit_order_id_type ao34id = ao34->get_id(); + const limit_order_id_type ao35id = ao35->get_id(); alice_bc -= 1000 * 5; alice_bu -= order_create_fee * 5; @@ -1119,11 +1122,11 @@ BOOST_AUTO_TEST_CASE( non_core_fee_refund_test ) // Bob creating an order matching multiple Alice's orders const limit_order_object* bo31 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); - BOOST_CHECK( db.find( ao31id ) == nullptr ); - BOOST_CHECK( db.find( ao32id ) != nullptr ); - BOOST_CHECK( db.find( ao33id ) == nullptr ); - BOOST_CHECK( db.find( ao34id ) != nullptr ); - BOOST_CHECK( db.find( ao35id ) != nullptr ); + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) != nullptr ); + BOOST_CHECK( db.find( ao35id ) != nullptr ); BOOST_CHECK( bo31 == nullptr ); // data after order created @@ -1144,11 +1147,11 @@ BOOST_AUTO_TEST_CASE( non_core_fee_refund_test ) // Bob creating an order matching multiple Alice's orders const limit_order_object* bo32 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); - BOOST_CHECK( db.find( ao31id ) == nullptr ); - BOOST_CHECK( db.find( ao32id ) != nullptr ); - BOOST_CHECK( db.find( ao33id ) == nullptr ); - BOOST_CHECK( db.find( ao34id ) == nullptr ); - BOOST_CHECK( db.find( ao35id ) == nullptr ); + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) == nullptr ); + BOOST_CHECK( db.find( ao35id ) == nullptr ); BOOST_CHECK( bo32 != nullptr ); // data after order created @@ -1247,7 +1250,7 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) asset_id_type core_id = asset_id_type(); const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee ); - asset_id_type usd_id = usd_obj.id; + asset_id_type usd_id = usd_obj.get_id(); issue_uia( alice_id, asset( alice_b0, usd_id ) ); issue_uia( bob_id, asset( bob_b0, usd_id ) ); @@ -1308,16 +1311,17 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // prepare orders BOOST_TEST_MESSAGE( "Creating orders those will never match: ao1, ao2, bo1, bo2 .." ); // ao1: won't expire, won't match, fee in core - limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(100000, usd_id) )->id; + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(100000, usd_id) )->get_id(); BOOST_CHECK( db.find( ao1_id ) != nullptr ); // ao2: will expire, won't match, fee in core - limit_order_id_type ao2_id = create_sell_order( alice_id, asset(800), asset(100000, usd_id), exp )->id; + limit_order_id_type ao2_id = create_sell_order( alice_id, asset(800), asset(100000, usd_id), exp )->get_id(); BOOST_CHECK( db.find( ao2_id ) != nullptr ); // bo1: won't expire, won't match, fee in usd - limit_order_id_type bo1_id = create_sell_order( bob_id, asset(1000, usd_id), asset(100000), max_exp, cer )->id; + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(1000, usd_id), asset(100000), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo1_id ) != nullptr ); // bo2: will expire, won't match, fee in usd - limit_order_id_type bo2_id = create_sell_order( bob_id, asset(800, usd_id), asset(100000), exp, cer )->id; + limit_order_id_type bo2_id = create_sell_order( bob_id, asset(800, usd_id), asset(100000), exp, cer )->get_id(); BOOST_CHECK( db.find( bo2_id ) != nullptr ); alice_bc -= order_create_fee * 2; @@ -1342,7 +1346,7 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // ao3: won't expire, partially match before hard fork 445, fee in core BOOST_TEST_MESSAGE( "Creating order ao3 .." ); - limit_order_id_type ao3_id = create_sell_order( alice_id, asset(900), asset(2700, usd_id) )->id; + limit_order_id_type ao3_id = create_sell_order( alice_id, asset(900), asset(2700, usd_id) )->get_id(); BOOST_CHECK( db.find( ao3_id ) != nullptr ); create_sell_order( bob_id, asset(600, usd_id), asset(200) ); @@ -1363,7 +1367,7 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // ao4: will expire, will partially match before hard fork 445, fee in core BOOST_TEST_MESSAGE( "Creating order ao4 .." ); - limit_order_id_type ao4_id = create_sell_order( alice_id, asset(700), asset(1400, usd_id), exp )->id; + limit_order_id_type ao4_id = create_sell_order( alice_id, asset(700), asset(1400, usd_id), exp )->get_id(); BOOST_CHECK( db.find( ao4_id ) != nullptr ); create_sell_order( bob_id, asset(200, usd_id), asset(100) ); @@ -1384,7 +1388,8 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // bo3: won't expire, will partially match before hard fork 445, fee in usd BOOST_TEST_MESSAGE( "Creating order bo3 .." ); - limit_order_id_type bo3_id = create_sell_order( bob_id, asset(500, usd_id), asset(1500), max_exp, cer )->id; + limit_order_id_type bo3_id = create_sell_order( bob_id, asset(500, usd_id), asset(1500), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo3_id ) != nullptr ); create_sell_order( alice_id, asset(450), asset(150, usd_id) ); @@ -1407,7 +1412,7 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // bo4: will expire, will partially match before hard fork 445, fee in usd BOOST_TEST_MESSAGE( "Creating order bo4 .." ); - limit_order_id_type bo4_id = create_sell_order( bob_id, asset(300, usd_id), asset(600), exp, cer )->id; + limit_order_id_type bo4_id = create_sell_order( bob_id, asset(300, usd_id), asset(600), exp, cer )->get_id(); BOOST_CHECK( db.find( bo4_id ) != nullptr ); create_sell_order( alice_id, asset(140), asset(70, usd_id) ); @@ -1430,7 +1435,7 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // ao5: won't expire, partially match after hard fork 445, fee in core BOOST_TEST_MESSAGE( "Creating order ao5 .." ); - limit_order_id_type ao5_id = create_sell_order( alice_id, asset(606), asset(909, usd_id) )->id; + limit_order_id_type ao5_id = create_sell_order( alice_id, asset(606), asset(909, usd_id) )->get_id(); BOOST_CHECK( db.find( ao5_id ) != nullptr ); alice_bc -= order_create_fee; @@ -1446,7 +1451,7 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // ao6: will expire, partially match after hard fork 445, fee in core BOOST_TEST_MESSAGE( "Creating order ao6 .." ); - limit_order_id_type ao6_id = create_sell_order( alice_id, asset(333), asset(444, usd_id), exp2 )->id; + limit_order_id_type ao6_id = create_sell_order( alice_id, asset(333), asset(444, usd_id), exp2 )->get_id(); BOOST_CHECK( db.find( ao6_id ) != nullptr ); alice_bc -= order_create_fee; @@ -1462,7 +1467,8 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // bo5: won't expire, partially match after hard fork 445, fee in usd BOOST_TEST_MESSAGE( "Creating order bo5 .." ); - limit_order_id_type bo5_id = create_sell_order( bob_id, asset(255, usd_id), asset(408), max_exp, cer )->id; + limit_order_id_type bo5_id = create_sell_order( bob_id, asset(255, usd_id), asset(408), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo5_id ) != nullptr ); bob_bu -= order_create_fee; @@ -1480,7 +1486,7 @@ BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) // bo6: will expire, partially match after hard fork 445, fee in usd BOOST_TEST_MESSAGE( "Creating order bo6 .." ); - limit_order_id_type bo6_id = create_sell_order( bob_id, asset(127, usd_id), asset(127), exp2, cer )->id; + limit_order_id_type bo6_id = create_sell_order( bob_id, asset(127, usd_id), asset(127), exp2, cer )->get_id(); BOOST_CHECK( db.find( bo6_id ) != nullptr ); bob_bu -= order_create_fee; @@ -1751,7 +1757,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) int64_t cer_usd_amount = 3; price tmp_cer( asset( cer_core_amount ), asset( cer_usd_amount, asset_id_type(1) ) ); const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee, tmp_cer ); - asset_id_type usd_id = usd_obj.id; + asset_id_type usd_id = usd_obj.get_id(); issue_uia( alice_id, asset( alice_b0, usd_id ) ); issue_uia( bob_id, asset( bob_b0, usd_id ) ); @@ -1904,7 +1910,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) // Bob creates order which doesn't match BOOST_TEST_MESSAGE( "Creating non-overlapping orders" ); BOOST_TEST_MESSAGE( "Creating ao1" ); - limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id), exp )->id; + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id), exp )->get_id(); alice_bc -= order_create_fee; alice_bc -= 1000; @@ -1973,7 +1979,8 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); BOOST_TEST_MESSAGE( "Creating bo1" ); - limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000), exp, cer )->id; + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000), exp, cer ) + ->get_id(); bob_bu -= usd_create_fee; bob_bu -= 500; @@ -2061,10 +2068,10 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) // Check partial fill BOOST_TEST_MESSAGE( "Creating ao2, then be partially filled by bo2" ); const limit_order_object* ao2 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), exp, cer ); - const limit_order_id_type ao2id = ao2->id; + const limit_order_id_type ao2id = ao2->get_id(); const limit_order_object* bo2 = create_sell_order( bob_id, asset(100, usd_id), asset(500) ); - BOOST_CHECK( db.find( ao2id ) != nullptr ); + BOOST_CHECK( db.find( ao2id ) != nullptr ); BOOST_CHECK( bo2 == nullptr ); // data after order created @@ -2140,11 +2147,11 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) const limit_order_object* ao34 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); const limit_order_object* ao35 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); - const limit_order_id_type ao31id = ao31->id; - const limit_order_id_type ao32id = ao32->id; - const limit_order_id_type ao33id = ao33->id; - const limit_order_id_type ao34id = ao34->id; - const limit_order_id_type ao35id = ao35->id; + const limit_order_id_type ao31id = ao31->get_id(); + const limit_order_id_type ao32id = ao32->get_id(); + const limit_order_id_type ao33id = ao33->get_id(); + const limit_order_id_type ao34id = ao34->get_id(); + const limit_order_id_type ao35id = ao35->get_id(); alice_bc -= 1000 * 5; alice_bu -= usd_create_fee * 5; @@ -2162,11 +2169,11 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) BOOST_TEST_MESSAGE( "Creating bo31, completely fill ao31 and ao33, partially fill ao34" ); const limit_order_object* bo31 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); - BOOST_CHECK( db.find( ao31id ) == nullptr ); - BOOST_CHECK( db.find( ao32id ) != nullptr ); - BOOST_CHECK( db.find( ao33id ) == nullptr ); - BOOST_CHECK( db.find( ao34id ) != nullptr ); - BOOST_CHECK( db.find( ao35id ) != nullptr ); + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) != nullptr ); + BOOST_CHECK( db.find( ao35id ) != nullptr ); BOOST_CHECK( bo31 == nullptr ); // data after order created @@ -2189,11 +2196,11 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) BOOST_TEST_MESSAGE( "Creating bo32, completely fill partially filled ao34 and new ao35, leave on market" ); const limit_order_object* bo32 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); - BOOST_CHECK( db.find( ao31id ) == nullptr ); - BOOST_CHECK( db.find( ao32id ) != nullptr ); - BOOST_CHECK( db.find( ao33id ) == nullptr ); - BOOST_CHECK( db.find( ao34id ) == nullptr ); - BOOST_CHECK( db.find( ao35id ) == nullptr ); + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) == nullptr ); + BOOST_CHECK( db.find( ao35id ) == nullptr ); BOOST_CHECK( bo32 != nullptr ); // data after order created @@ -2303,7 +2310,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) int64_t cer_usd_amount = 3; price tmp_cer( asset( cer_core_amount ), asset( cer_usd_amount, asset_id_type(1) ) ); const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee, tmp_cer ); - asset_id_type usd_id = usd_obj.id; + asset_id_type usd_id = usd_obj.get_id(); issue_uia( alice_id, asset( alice_b0, usd_id ) ); issue_uia( bob_id, asset( bob_b0, usd_id ) ); @@ -2372,16 +2379,18 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // prepare orders BOOST_TEST_MESSAGE( "Creating orders those will never match: ao1, ao2, bo1, bo2 .." ); // ao1: won't expire, won't match, fee in core - limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(100000, usd_id) )->id; + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(100000, usd_id) )->get_id(); BOOST_CHECK( db.find( ao1_id ) != nullptr ); // ao2: will expire, won't match, fee in core - limit_order_id_type ao2_id = create_sell_order( alice_id, asset(800), asset(100000, usd_id), exp )->id; + limit_order_id_type ao2_id = create_sell_order( alice_id, asset(800), asset(100000, usd_id), exp )->get_id(); BOOST_CHECK( db.find( ao2_id ) != nullptr ); // bo1: won't expire, won't match, fee in usd - limit_order_id_type bo1_id = create_sell_order( bob_id, asset(1000, usd_id), asset(100000), max_exp, cer )->id; + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(1000, usd_id), asset(100000), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo1_id ) != nullptr ); // bo2: will expire, won't match, fee in usd - limit_order_id_type bo2_id = create_sell_order( bob_id, asset(800, usd_id), asset(100000), exp, cer )->id; + limit_order_id_type bo2_id = create_sell_order( bob_id, asset(800, usd_id), asset(100000), exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo2_id ) != nullptr ); alice_bc -= order_create_fee * 2; @@ -2406,7 +2415,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao3: won't expire, partially match before hard fork 445, fee in core BOOST_TEST_MESSAGE( "Creating order ao3 .." ); // 1:30 - limit_order_id_type ao3_id = create_sell_order( alice_id, asset(900), asset(27000, usd_id) )->id; + limit_order_id_type ao3_id = create_sell_order( alice_id, asset(900), asset(27000, usd_id) )->get_id(); BOOST_CHECK( db.find( ao3_id ) != nullptr ); create_sell_order( bob_id, asset(6000, usd_id), asset(200) ); @@ -2427,7 +2436,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao4: will expire, will partially match before hard fork 445, fee in core BOOST_TEST_MESSAGE( "Creating order ao4 .." ); // 1:20 - limit_order_id_type ao4_id = create_sell_order( alice_id, asset(700), asset(14000, usd_id), exp )->id; + limit_order_id_type ao4_id = create_sell_order( alice_id, asset(700), asset(14000, usd_id), exp )->get_id(); BOOST_CHECK( db.find( ao4_id ) != nullptr ); create_sell_order( bob_id, asset(2000, usd_id), asset(100) ); @@ -2448,7 +2457,8 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo3: won't expire, will partially match before hard fork 445, fee in usd BOOST_TEST_MESSAGE( "Creating order bo3 .." ); // 1:30 - limit_order_id_type bo3_id = create_sell_order( bob_id, asset(500, usd_id), asset(15000), max_exp, cer )->id; + limit_order_id_type bo3_id = create_sell_order( bob_id, asset(500, usd_id), asset(15000), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo3_id ) != nullptr ); create_sell_order( alice_id, asset(4500), asset(150, usd_id) ); @@ -2471,7 +2481,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo4: will expire, will partially match before hard fork 445, fee in usd BOOST_TEST_MESSAGE( "Creating order bo4 .." ); // 1:20 - limit_order_id_type bo4_id = create_sell_order( bob_id, asset(300, usd_id), asset(6000), exp, cer )->id; + limit_order_id_type bo4_id = create_sell_order( bob_id, asset(300, usd_id), asset(6000), exp, cer )->get_id(); BOOST_CHECK( db.find( bo4_id ) != nullptr ); create_sell_order( alice_id, asset(1400), asset(70, usd_id) ); @@ -2495,7 +2505,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao11: won't expire, partially match after hard fork core-604, fee in core BOOST_TEST_MESSAGE( "Creating order ao11 .." ); // 1:18 - limit_order_id_type ao11_id = create_sell_order( alice_id, asset(510), asset(9180, usd_id) )->id; + limit_order_id_type ao11_id = create_sell_order( alice_id, asset(510), asset(9180, usd_id) )->get_id(); BOOST_CHECK( db.find( ao11_id ) != nullptr ); alice_bc -= order_create_fee; @@ -2511,7 +2521,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao12: will expire, partially match after hard fork core-604, fee in core BOOST_TEST_MESSAGE( "Creating order ao12 .." ); // 1:16 - limit_order_id_type ao12_id = create_sell_order( alice_id, asset(256), asset(4096, usd_id), exp2 )->id; + limit_order_id_type ao12_id = create_sell_order( alice_id, asset(256), asset(4096, usd_id), exp2 )->get_id(); BOOST_CHECK( db.find( ao12_id ) != nullptr ); alice_bc -= order_create_fee; @@ -2527,7 +2537,8 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo11: won't expire, partially match after hard fork core-604, fee in usd BOOST_TEST_MESSAGE( "Creating order bo11 .." ); // 1:18 - limit_order_id_type bo11_id = create_sell_order( bob_id, asset(388, usd_id), asset(6984), max_exp, cer )->id; + limit_order_id_type bo11_id = create_sell_order( bob_id, asset(388, usd_id), asset(6984), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo11_id ) != nullptr ); bob_bu -= usd_create_fee; @@ -2545,7 +2556,8 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo12: will expire, partially match after hard fork core-604, fee in usd BOOST_TEST_MESSAGE( "Creating order bo12 .." ); // 1:17 - limit_order_id_type bo12_id = create_sell_order( bob_id, asset(213, usd_id), asset(3621), exp2, cer )->id; + limit_order_id_type bo12_id = create_sell_order( bob_id, asset(213, usd_id), asset(3621), exp2, cer ) + ->get_id(); BOOST_CHECK( db.find( bo12_id ) != nullptr ); bob_bu -= usd_create_fee; @@ -2563,7 +2575,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao5: won't expire, partially match after hard fork 445, fee in core BOOST_TEST_MESSAGE( "Creating order ao5 .." ); // 1:15 - limit_order_id_type ao5_id = create_sell_order( alice_id, asset(606), asset(9090, usd_id) )->id; + limit_order_id_type ao5_id = create_sell_order( alice_id, asset(606), asset(9090, usd_id) )->get_id(); BOOST_CHECK( db.find( ao5_id ) != nullptr ); alice_bc -= order_create_fee; @@ -2580,7 +2592,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao6: will expire, partially match after hard fork 445, fee in core if( false ) { // only can have either ao5 or ao6, can't have both BOOST_TEST_MESSAGE( "Creating order ao6 .." ); // 3:40 = 1:13.33333 - limit_order_id_type ao6_id = create_sell_order( alice_id, asset(333), asset(4440, usd_id), exp )->id; + limit_order_id_type ao6_id = create_sell_order( alice_id, asset(333), asset(4440, usd_id), exp )->get_id(); BOOST_CHECK( db.find( ao6_id ) != nullptr ); alice_bc -= order_create_fee; @@ -2598,7 +2610,8 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo5: won't expire, partially match after hard fork 445, fee in usd if( false ) { // only can have either bo5 or bo6, can't have both BOOST_TEST_MESSAGE( "Creating order bo5 .." ); // 1:16 - limit_order_id_type bo5_id = create_sell_order( bob_id, asset(255, usd_id), asset(4080), max_exp, cer )->id; + limit_order_id_type bo5_id = create_sell_order( bob_id, asset(255, usd_id), asset(4080), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo5_id ) != nullptr ); bob_bu -= usd_create_fee; @@ -2617,7 +2630,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo6: will expire, partially match after hard fork 445, fee in usd BOOST_TEST_MESSAGE( "Creating order bo6 .." ); // 1:10 - limit_order_id_type bo6_id = create_sell_order( bob_id, asset(127, usd_id), asset(1270), exp, cer )->id; + limit_order_id_type bo6_id = create_sell_order( bob_id, asset(127, usd_id), asset(1270), exp, cer )->get_id(); BOOST_CHECK( db.find( bo6_id ) != nullptr ); BOOST_CHECK( db.find( bo6_id ) != nullptr ); @@ -2731,16 +2744,18 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // prepare more orders BOOST_TEST_MESSAGE( "Creating more orders those will never match: ao7, ao8, bo7, bo8 .." ); // ~ 1:100 // ao7: won't expire, won't match, fee in core - limit_order_id_type ao7_id = create_sell_order( alice_id, asset(1003), asset(100000, usd_id) )->id; + limit_order_id_type ao7_id = create_sell_order( alice_id, asset(1003), asset(100000, usd_id) )->get_id(); BOOST_CHECK( db.find( ao7_id ) != nullptr ); // ao8: will expire, won't match, fee in core - limit_order_id_type ao8_id = create_sell_order( alice_id, asset(803), asset(100000, usd_id), exp1 )->id; + limit_order_id_type ao8_id = create_sell_order( alice_id, asset(803), asset(100000, usd_id), exp1 )->get_id(); BOOST_CHECK( db.find( ao8_id ) != nullptr ); // bo7: won't expire, won't match, fee in usd - limit_order_id_type bo7_id = create_sell_order( bob_id, asset(1003, usd_id), asset(100000), max_exp, cer )->id; + limit_order_id_type bo7_id = create_sell_order( bob_id, asset(1003, usd_id), asset(100000), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo7_id ) != nullptr ); // bo8: will expire, won't match, fee in usd - limit_order_id_type bo8_id = create_sell_order( bob_id, asset(803, usd_id), asset(100000), exp1, cer )->id; + limit_order_id_type bo8_id = create_sell_order( bob_id, asset(803, usd_id), asset(100000), exp1, cer ) + ->get_id(); BOOST_CHECK( db.find( bo8_id ) != nullptr ); alice_bc -= order_create_fee * 2; @@ -2765,7 +2780,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao9: won't expire, partially match before hard fork core-604, fee in core BOOST_TEST_MESSAGE( "Creating order ao9 .." ); // 1:3 - limit_order_id_type ao9_id = create_sell_order( alice_id, asset(909), asset(2727, usd_id) )->id; + limit_order_id_type ao9_id = create_sell_order( alice_id, asset(909), asset(2727, usd_id) )->get_id(); BOOST_CHECK( db.find( ao9_id ) != nullptr ); create_sell_order( bob_id, asset(606, usd_id), asset(202) ); @@ -2786,7 +2801,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao10: will expire, will partially match before hard fork core-604, fee in core BOOST_TEST_MESSAGE( "Creating order ao10 .." ); // 1:2 - limit_order_id_type ao10_id = create_sell_order( alice_id, asset(707), asset(1414, usd_id), exp )->id; + limit_order_id_type ao10_id = create_sell_order( alice_id, asset(707), asset(1414, usd_id), exp )->get_id(); BOOST_CHECK( db.find( ao10_id ) != nullptr ); create_sell_order( bob_id, asset(202, usd_id), asset(101) ); @@ -2807,7 +2822,8 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo9: won't expire, will partially match before hard fork core-604, fee in usd BOOST_TEST_MESSAGE( "Creating order bo9 .." ); // 1:3 - limit_order_id_type bo9_id = create_sell_order( bob_id, asset(505, usd_id), asset(1515), max_exp, cer )->id; + limit_order_id_type bo9_id = create_sell_order( bob_id, asset(505, usd_id), asset(1515), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo9_id ) != nullptr ); create_sell_order( alice_id, asset(453), asset(151, usd_id) ); @@ -2830,7 +2846,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo10: will expire, will partially match before hard fork core-604, fee in usd BOOST_TEST_MESSAGE( "Creating order bo10 .." ); // 1:2 - limit_order_id_type bo10_id = create_sell_order( bob_id, asset(302, usd_id), asset(604), exp, cer )->id; + limit_order_id_type bo10_id = create_sell_order( bob_id, asset(302, usd_id), asset(604), exp, cer )->get_id(); BOOST_CHECK( db.find( bo10_id ) != nullptr ); create_sell_order( alice_id, asset(142), asset(71, usd_id) ); @@ -2853,7 +2869,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao13: won't expire, partially match after hard fork core-604, fee in core BOOST_TEST_MESSAGE( "Creating order ao13 .." ); // 1:1.5 - limit_order_id_type ao13_id = create_sell_order( alice_id, asset(424), asset(636, usd_id) )->id; + limit_order_id_type ao13_id = create_sell_order( alice_id, asset(424), asset(636, usd_id) )->get_id(); BOOST_CHECK( db.find( ao13_id ) != nullptr ); alice_bc -= order_create_fee; @@ -2869,7 +2885,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // ao14: will expire, partially match after hard fork core-604, fee in core BOOST_TEST_MESSAGE( "Creating order ao14 .." ); // 1:1.2 - limit_order_id_type ao14_id = create_sell_order( alice_id, asset(525), asset(630, usd_id), exp )->id; + limit_order_id_type ao14_id = create_sell_order( alice_id, asset(525), asset(630, usd_id), exp )->get_id(); BOOST_CHECK( db.find( ao14_id ) != nullptr ); alice_bc -= order_create_fee; @@ -2885,7 +2901,8 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo13: won't expire, partially match after hard fork core-604, fee in usd BOOST_TEST_MESSAGE( "Creating order bo13 .." ); // 1:1.5 - limit_order_id_type bo13_id = create_sell_order( bob_id, asset(364, usd_id), asset(546), max_exp, cer )->id; + limit_order_id_type bo13_id = create_sell_order( bob_id, asset(364, usd_id), asset(546), max_exp, cer ) + ->get_id(); BOOST_CHECK( db.find( bo13_id ) != nullptr ); bob_bu -= usd_create_fee; @@ -2903,7 +2920,7 @@ BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) // bo14: will expire, partially match after hard fork core-604, fee in usd BOOST_TEST_MESSAGE( "Creating order bo14 .." ); // 1:1.2 - limit_order_id_type bo14_id = create_sell_order( bob_id, asset(365, usd_id), asset(438), exp, cer )->id; + limit_order_id_type bo14_id = create_sell_order( bob_id, asset(365, usd_id), asset(438), exp, cer )->get_id(); BOOST_CHECK( db.find( bo14_id ) != nullptr ); bob_bu -= usd_create_fee; @@ -3435,7 +3452,7 @@ BOOST_AUTO_TEST_CASE( stealth_fba_test ) // Izzy creates STEALTH asset_id_type stealth_id = create_user_issued_asset( "STEALTH", izzy_id(db), - disable_confidential | transfer_restricted | override_authority | white_list | charge_market_fee ).id; + disable_confidential | transfer_restricted | override_authority | white_list | charge_market_fee ).get_id(); /* // this is disabled because it doesn't work, our modify() is probably being overwritten by undo diff --git a/tests/tests/force_settle_fee_tests.cpp b/tests/tests/force_settle_fee_tests.cpp index ea2f732ab5..9d3bc42199 100644 --- a/tests/tests/force_settle_fee_tests.cpp +++ b/tests/tests/force_settle_fee_tests.cpp @@ -175,15 +175,15 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Fund actors uint64_t initial_balance_core = 10000000; - transfer(committee_account, assetowner.id, asset(initial_balance_core)); - transfer(committee_account, feedproducer.id, asset(initial_balance_core)); + transfer(committee_account, assetowner.get_id(), asset(initial_balance_core)); + transfer(committee_account, feedproducer.get_id(), asset(initial_balance_core)); transfer(committee_account, michael_id, asset(initial_balance_core)); - transfer(committee_account, paul.id, asset(initial_balance_core)); + transfer(committee_account, paul.get_id(), asset(initial_balance_core)); // 1. Create assets const uint16_t usd_fso_percent = 5 * GRAPHENE_1_PERCENT; // 5% Force-settlement offset fee % const uint16_t usd_fsf_percent = 3 * GRAPHENE_1_PERCENT; // 3% Force-settlement fee % (BSIP87) - create_smart_asset("USDBIT", assetowner.id, usd_fso_percent, usd_fsf_percent); + create_smart_asset("USDBIT", assetowner.get_id(), usd_fso_percent, usd_fsf_percent); generate_block(); set_expiration(db, trx); @@ -191,8 +191,8 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const auto &bitusd = get_asset("USDBIT"); const auto &core = asset_id_type()(db); - asset_id_type bitusd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type bitusd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); const int64_t bitusd_unit = asset::scaled_precision(bitusd.precision).value; // 100 satoshi USDBIT in 1 USDBIT // 2. Publish a feed for the smart asset @@ -212,7 +212,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) int64_t michael_initial_core = 8; const call_order_object &call_michael = *borrow(michael, bitusd.amount(michael_initial_usd), core.amount(michael_initial_core)); - call_order_id_type call_michael_id = call_michael.id; + call_order_id_type call_michael_id = call_michael.get_id(); BOOST_CHECK_EQUAL(get_balance(michael, bitusd), michael_initial_usd); BOOST_CHECK_EQUAL(get_balance(michael, core), initial_balance_core - michael_initial_core); @@ -226,7 +226,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) int64_t paul_initial_core = paul_initial_usd * 2 / 20; // 10000 const call_order_object &call_paul = *borrow(paul, bitusd.amount(paul_initial_usd), core.amount(paul_initial_core)); - call_order_id_type call_paul_id = call_paul.id; + call_order_id_type call_paul_id = call_paul.get_id(); BOOST_REQUIRE_EQUAL(get_balance(paul, bitusd), paul_initial_usd); BOOST_CHECK_EQUAL(get_balance(paul, bitusd), paul_initial_usd); @@ -237,7 +237,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 5. Paul transfers 200 bitUSD to Rachel /////// int64_t rachel_initial_usd = 200 * bitusd_unit; - transfer(paul.id, rachel.id, asset(rachel_initial_usd, bitusd.id)); + transfer(paul.get_id(), rachel.get_id(), asset(rachel_initial_usd, bitusd.get_id())); BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), rachel_initial_usd); @@ -252,8 +252,8 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const int64_t rachel_settle_amount = 20 * bitusd_unit; operation_result result = force_settle(rachel, bitusd.amount(rachel_settle_amount)); - force_settlement_id_type rachel_settle_id = *result.get() - .value.new_objects->begin(); + force_settlement_id_type rachel_settle_id { *result.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL(rachel_settle_id(db).balance.amount.value, rachel_settle_amount); // Check Rachel's balance @@ -420,10 +420,10 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Fund actors uint64_t initial_balance_core = 10000000; - transfer(committee_account, assetowner.id, asset(initial_balance_core)); - transfer(committee_account, feedproducer.id, asset(initial_balance_core)); + transfer(committee_account, assetowner.get_id(), asset(initial_balance_core)); + transfer(committee_account, feedproducer.get_id(), asset(initial_balance_core)); transfer(committee_account, michael_id, asset(initial_balance_core)); - transfer(committee_account, paul.id, asset(initial_balance_core)); + transfer(committee_account, paul.get_id(), asset(initial_balance_core)); /////// // 1. Create assets @@ -433,13 +433,13 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Attempt and fail to create the smart asset with a force-settlement fee % before HARDFORK_CORE_BSIP87_TIME trx.clear(); - REQUIRE_EXCEPTION_WITH_TEXT(create_smart_asset("USDBIT", assetowner.id, usd_fso_percent, usd_fsf_percent_0), + REQUIRE_EXCEPTION_WITH_TEXT(create_smart_asset("USDBIT", assetowner_id, usd_fso_percent, usd_fsf_percent_0), "cannot be set before Hardfork BSIP87"); // Create the smart asset without a force-settlement fee % trx.clear(); - create_smart_asset("USDBIT", assetowner.id, usd_fso_percent); + create_smart_asset("USDBIT", assetowner_id, usd_fso_percent); generate_block(); set_expiration(db, trx); @@ -454,7 +454,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) /////// // 2. Publish a feed for the smart asset /////// - update_feed_producers(bitusd.id, {feedproducer_id}); + update_feed_producers(bitusd.get_id(), {feedproducer_id}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; current_feed.maximum_short_squeeze_ratio = 1100; @@ -472,7 +472,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) int64_t paul_initial_core = paul_initial_usd * 2 * 20; // 400000 const call_order_object &call_paul = *borrow(paul, bitusd.amount(paul_initial_usd), core.amount(paul_initial_core)); - call_order_id_type call_paul_id = call_paul.id; + call_order_id_type call_paul_id = call_paul.get_id(); BOOST_REQUIRE_EQUAL(get_balance(paul, bitusd), paul_initial_usd); BOOST_CHECK_EQUAL(get_balance(paul, bitusd), paul_initial_usd); @@ -483,7 +483,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 4. Paul gives Rachel 20 bitUSD and retains 80 bitUSD /////// int64_t rachel_initial_usd = 20 * bitusd_unit; - transfer(paul.id, rachel.id, asset(rachel_initial_usd, bitusd.id)); + transfer(paul.get_id(), rachel.get_id(), asset(rachel_initial_usd, bitusd.get_id())); BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), rachel_initial_usd); BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); @@ -498,8 +498,8 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const int64_t rachel_settle_amount = 2 * bitusd_unit; operation_result result = force_settle(rachel, bitusd.amount(rachel_settle_amount)); - force_settlement_id_type rachel_settle_id = *result.get() - .value.new_objects->begin(); + force_settlement_id_type rachel_settle_id { *result.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL(rachel_settle_id(db).balance.amount.value, rachel_settle_amount); // Advance time to complete the force settlement and to update the price feed @@ -581,7 +581,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 8. Paul gives Michael 30 bitUSD and retains 50 bitUSD /////// int64_t michael_initial_usd = 30 * bitusd_unit; - transfer(paul.id, michael.id, asset(michael_initial_usd, bitusd.id)); + transfer(paul.get_id(), michael.get_id(), asset(michael_initial_usd, bitusd.get_id())); // Check Michael's balance BOOST_CHECK_EQUAL(get_balance(michael, bitusd), michael_initial_usd); @@ -598,8 +598,8 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const int64_t michael_settle_amount = 5 * bitusd_unit; result = force_settle(michael, bitusd.amount(michael_settle_amount)); - force_settlement_id_type michael_settle_id = *result.get() - .value.new_objects->begin(); + force_settlement_id_type michael_settle_id { *result.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL(michael_settle_id(db).balance.amount.value, michael_settle_amount); // Advance time to complete the force settlement and to update the price feed @@ -668,7 +668,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 11. Paul gives Yanna 40 bitUSD and retains 10 bitUSD /////// int64_t yanna_initial_usd = 40 * bitusd_unit; - transfer(paul.id, yanna.id, asset(yanna_initial_usd, bitusd.id)); + transfer(paul.get_id(), yanna.get_id(), asset(yanna_initial_usd, bitusd.get_id())); // Check Yanna's balance BOOST_CHECK_EQUAL(get_balance(yanna, bitusd), yanna_initial_usd); @@ -686,8 +686,8 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const int64_t yanna_settle_amount = 10 * bitusd_unit; result = force_settle(yanna, bitusd.amount(yanna_settle_amount)); - force_settlement_id_type yanna_settle_id = *result.get() - .value.new_objects->begin(); + force_settlement_id_type yanna_settle_id { *result.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL(yanna_settle_id(db).balance.amount.value, yanna_settle_amount); // Advance time to complete the force settlement and to update the price feed @@ -757,7 +757,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 14. Paul gives Vikram 10 bitUSD and retains 0 bitUSD /////// int64_t vikram_initial_usd = 10 * bitusd_unit; - transfer(paul.id, vikram.id, asset(vikram_initial_usd, bitusd.id)); + transfer(paul.get_id(), vikram.get_id(), asset(vikram_initial_usd, bitusd.get_id())); // Check Yanna's balance BOOST_CHECK_EQUAL(get_balance(vikram, bitusd), vikram_initial_usd); @@ -776,8 +776,8 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const int64_t vikram_settle_amount = 10 * bitusd_unit; result = force_settle(vikram, bitusd.amount(vikram_settle_amount)); - force_settlement_id_type vikram_settle_id = *result.get() - .value.new_objects->begin(); + force_settlement_id_type vikram_settle_id { *result.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL(vikram_settle_id(db).balance.amount.value, vikram_settle_amount); // Advance time to complete the force settlement and to update the price feed @@ -860,9 +860,9 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) /////// // Rachel, Michael, and Yanna return their remaining bitUSD to Paul trx.clear(); - transfer(rachel.id, paul.id, bitusd.amount(get_balance(rachel, bitusd))); - transfer(michael.id, paul.id, bitusd.amount(get_balance(michael, bitusd))); - transfer(yanna.id, paul.id, bitusd.amount(get_balance(yanna, bitusd))); + transfer(rachel.get_id(), paul.get_id(), bitusd.amount(get_balance(rachel, bitusd))); + transfer(michael.get_id(), paul.get_id(), bitusd.amount(get_balance(michael, bitusd))); + transfer(yanna.get_id(), paul.get_id(), bitusd.amount(get_balance(yanna, bitusd))); // Vikram has no bitUSD to transfer BOOST_CHECK_EQUAL(get_balance(vikram, bitusd), 0); @@ -1059,14 +1059,14 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Fund actors uint64_t initial_balance_core = 10000000; - transfer(committee_account, assetowner.id, asset(initial_balance_core)); - transfer(committee_account, feedproducer.id, asset(initial_balance_core)); - transfer(committee_account, paul.id, asset(initial_balance_core)); + transfer(committee_account, assetowner.get_id(), asset(initial_balance_core)); + transfer(committee_account, feedproducer.get_id(), asset(initial_balance_core)); + transfer(committee_account, paul.get_id(), asset(initial_balance_core)); // 1. Create assets const uint16_t usd_fso_percent = 5 * GRAPHENE_1_PERCENT; // 5% Force-settlement offset fee % const uint16_t usd_fsf_percent = 100 * GRAPHENE_1_PERCENT; // 100% Force-settlement fee % (BSIP87) - create_smart_asset("USDBIT", assetowner.id, usd_fso_percent, usd_fsf_percent); + create_smart_asset("USDBIT", assetowner.get_id(), usd_fso_percent, usd_fsf_percent); generate_block(); set_expiration(db, trx); @@ -1080,7 +1080,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) /////// // 2. Publish a feed for the smart asset /////// - update_feed_producers(bitusd.id, {feedproducer_id}); + update_feed_producers(bitusd.get_id(), {feedproducer_id}); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; current_feed.maximum_short_squeeze_ratio = 1100; @@ -1098,7 +1098,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) int64_t paul_initial_core = paul_initial_usd * 2 * 20; // 400000 const call_order_object &call_paul = *borrow(paul, bitusd.amount(paul_initial_usd), core.amount(paul_initial_core)); - call_order_id_type call_paul_id = call_paul.id; + call_order_id_type call_paul_id = call_paul.get_id(); BOOST_REQUIRE_EQUAL(get_balance(paul, bitusd), paul_initial_usd); BOOST_CHECK_EQUAL(get_balance(paul, bitusd), paul_initial_usd); @@ -1109,7 +1109,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 4. Paul gives Rachel 20 bitUSD and retains 80 bitUSD /////// int64_t rachel_initial_usd = 20 * bitusd_unit; - transfer(paul.id, rachel.id, asset(rachel_initial_usd, bitusd.id)); + transfer(paul.get_id(), rachel.get_id(), asset(rachel_initial_usd, bitusd.get_id())); BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), rachel_initial_usd); BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); @@ -1124,8 +1124,8 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const int64_t rachel_settle_amount = 2 * bitusd_unit; // 200 satoshi bitusd operation_result result = force_settle(rachel, bitusd.amount(rachel_settle_amount)); - force_settlement_id_type rachel_settle_id = *result.get() - .value.new_objects->begin(); + force_settlement_id_type rachel_settle_id { *result.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL(rachel_settle_id(db).balance.amount.value, rachel_settle_amount); // Advance time to complete the force settlement and to update the price feed @@ -1213,7 +1213,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Fund actors uint64_t initial_balance_core = 10000000; - transfer(committee_account, assetowner.id, asset(initial_balance_core)); + transfer(committee_account, assetowner.get_id(), asset(initial_balance_core)); // Confirm before hardfork activation BOOST_CHECK(db.head_block_time() < HARDFORK_CORE_BSIP87_TIME); @@ -1228,7 +1228,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Attempt to create the smart asset with a force-settlement fee % // The attempt should fail because it is before HARDFORK_CORE_BSIP87_TIME trx.clear(); - REQUIRE_EXCEPTION_WITH_TEXT(create_smart_asset("USDBIT", assetowner.id, usd_fso_percent, usd_fsf_percent_0), + REQUIRE_EXCEPTION_WITH_TEXT(create_smart_asset("USDBIT", assetowner_id, usd_fso_percent, usd_fsf_percent_0), "cannot be set before Hardfork BSIP87"); @@ -1236,7 +1236,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 2. Asset owner fails to create the smart coin called bitUSD with a force-settlement fee % in a proposal /////// { - asset_create_operation create_op = create_smart_asset_op("USDBIT", assetowner.id, usd_fso_percent, + asset_create_operation create_op = create_smart_asset_op("USDBIT", assetowner_id, usd_fso_percent, usd_fsf_percent_0); proposal_create_operation cop; cop.review_period_seconds = 86400; @@ -1256,7 +1256,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // 3. Asset owner succeeds to create the smart coin called bitUSD without a force-settlement fee % /////// trx.clear(); - create_smart_asset("USDBIT", assetowner.id, usd_fso_percent); + create_smart_asset("USDBIT", assetowner_id, usd_fso_percent); generate_block(); set_expiration(db, trx); @@ -1356,7 +1356,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) const uint16_t fsf_percent_1 = 1 * GRAPHENE_1_PERCENT; // 1% Force-settlement fee % (BSIP87) const uint16_t fsf_percent_5 = 1 * GRAPHENE_1_PERCENT; // 5% Force-settlement fee % (BSIP87) trx.clear(); - create_smart_asset("CNYBIT", assetowner.id, usd_fso_percent, fsf_percent_1); + create_smart_asset("CNYBIT", assetowner_id, usd_fso_percent, fsf_percent_1); generate_block(); set_expiration(db, trx); @@ -1374,7 +1374,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) /////// { // Create the proposal - asset_create_operation create_op = create_smart_asset_op("RUBBIT", assetowner.id, usd_fso_percent, + asset_create_operation create_op = create_smart_asset_op("RUBBIT", assetowner_id, usd_fso_percent, fsf_percent_1); proposal_create_operation cop; cop.review_period_seconds = 86400; @@ -1390,7 +1390,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Approve the proposal - proposal_id_type pid = processed.operation_results[0].get(); + proposal_id_type pid { processed.operation_results[0].get() }; proposal_update_operation pup; pup.fee_paying_account = assetowner_id; @@ -1460,7 +1460,7 @@ BOOST_FIXTURE_TEST_SUITE(force_settle_tests, force_settle_database_fixture) // Approve the proposal - proposal_id_type pid = processed.operation_results[0].get(); + proposal_id_type pid { processed.operation_results[0].get() }; proposal_update_operation pup; pup.fee_paying_account = assetowner_id; diff --git a/tests/tests/force_settle_match_tests.cpp b/tests/tests/force_settle_match_tests.cpp index a0b030a2e9..6b2023e75d 100644 --- a/tests/tests/force_settle_match_tests.cpp +++ b/tests/tests/force_settle_match_tests.cpp @@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_settle_call) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); { // set margin call fee ratio @@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_settle_call) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -85,10 +85,10 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_settle_call) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% > 175% const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); transfer(borrower, seller, bitusd.amount(1000)); @@ -117,18 +117,18 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_settle_call) price mc( asset(10*175), asset(1*100, usd_id) ); // This sell order above MSSP will not be matched with a call - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->get_id(); // This buy order at MSSP will be matched only if no margin call (margin call takes precedence) - limit_order_id_type buy_med = create_sell_order(buyer2, asset(33000), bitusd.amount(3000))->id; + limit_order_id_type buy_med = create_sell_order(buyer2, asset(33000), bitusd.amount(3000))->get_id(); // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence) - limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->id; + limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(buyer2, bitusd) ); @@ -155,16 +155,16 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_settle_call) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle_id ) != nullptr ); // buy orders won't change - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); - BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 33000 ); - BOOST_CHECK_EQUAL( db.find( buy_high )->for_sale.value, 111 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 33000 ); + BOOST_CHECK_EQUAL( db.find( buy_high )->for_sale.value, 111 ); // the settle order will match with call, at mssp: 1/11 = 1000/11000 - const call_order_object* tmp_call = db.find( call_id ); + const call_order_object* tmp_call = db.find( call_id ); BOOST_CHECK( tmp_call != nullptr ); // call will receive call_to_cover, pay 11*call_to_cover @@ -180,7 +180,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_settle_call) BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); // the settle order then will match with call2, at mssp: 1/11 = 1000/11000 - const call_order_object* tmp_call2 = db.find( call2_id ); + const call_order_object* tmp_call2 = db.find( call2_id ); BOOST_CHECK( tmp_call2 != nullptr ); // call2 will receive call2_to_cover, pay 11*call2_to_cover @@ -233,7 +233,7 @@ BOOST_AUTO_TEST_CASE(hf2481_small_settle_call) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); { // set margin call fee ratio @@ -253,7 +253,7 @@ BOOST_AUTO_TEST_CASE(hf2481_small_settle_call) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -262,13 +262,13 @@ BOOST_AUTO_TEST_CASE(hf2481_small_settle_call) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/175 CORE/USD = 6/70, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(100000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 285% collateral const call_order_object& call2 = *borrow( borrower2, bitusd.amount(7), asset(1), 1700); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 285% collateral const call_order_object& call3 = *borrow( borrower3, bitusd.amount(14), asset(2), 1700); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(100000)); // adjust price feed to get call orders into margin call territory @@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(hf2481_small_settle_call) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle_id ) == nullptr ); // the settle order will match with call2, at mssp: 100/11, @@ -345,7 +345,7 @@ BOOST_AUTO_TEST_CASE(hf2481_small_settle_call) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle2_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle2_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle2_id ) == nullptr ); // the settle order will match with call, at mssp: 100/11 @@ -391,7 +391,7 @@ BOOST_AUTO_TEST_CASE(hf2481_small_settle_call) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle3_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle3_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle3_id ) == nullptr ); // the settle order will match with call, at mssp @@ -422,7 +422,7 @@ BOOST_AUTO_TEST_CASE(hf2481_small_settle_call) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle4_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle4_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle4_id ) == nullptr ); // the settle order will match with call, at mssp @@ -465,7 +465,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); { // set margin call fee ratio @@ -487,7 +487,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -496,15 +496,15 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% > 175% const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); // create a small position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7, no tcr const call_order_object& call4 = *borrow( borrower4, bitusd.amount(10), asset(160) ); - call_order_id_type call4_id = call4.id; + call_order_id_type call4_id = call4.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); @@ -528,28 +528,28 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) BOOST_CHECK_EQUAL( 10, get_balance(borrower4, bitusd) ); // This sell order above MSSP will not be matched with a call - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) ); // Create a sell order which will be matched with several call orders later, price 1/9 - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->id; - BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 ); + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 ); // Create a force settlement, will be matched with several call orders later auto result = force_settle( seller, bitusd.amount(2400) ); BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle_id ) != nullptr ); // prepare price feed to get call and call2 (but not call3) into margin call territory @@ -590,7 +590,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); // the limit order then will match with call2, at limit order's price: 1/9 - const call_order_object* tmp_call2 = db.find( call2_id ); + const call_order_object* tmp_call2 = db.find( call2_id ); BOOST_CHECK( tmp_call2 != nullptr ); // if the limit is big enough, call2 will receive call2_to_cover, pay 9*call2_to_cover @@ -604,7 +604,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) // call4 will match with the settle order, since it has no tcr, it will be fully closed // match price is 1/11 - const call_order_object* tmp_call4 = db.find( call4_id ); + const call_order_object* tmp_call4 = db.find( call4_id ); BOOST_CHECK( tmp_call4 == nullptr ); // borrower4 balance changes @@ -627,7 +627,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); // sell_id is completely filled - BOOST_CHECK( !db.find( sell_id ) ); + BOOST_CHECK( !db.find( sell_id ) ); // settle order is not fully filled BOOST_CHECK( db.find( settle_id ) != nullptr ); @@ -638,7 +638,7 @@ BOOST_AUTO_TEST_CASE(tcr_test_hf2481_call_settle) get_balance(seller, core) ); // 500*9 + 10*10.7 + call2_cover2 * 10.7 // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // Can not reduce CR of a call order to trigger a margin call but not get fully filled and final CR <= ICR BOOST_CHECK_THROW( borrow( borrower_id(db), asset(10000, usd_id), asset(160000), 1700), fc::exception ); @@ -687,7 +687,7 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); { // set margin call fee ratio @@ -711,7 +711,7 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -720,16 +720,16 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% > 175% const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); // create a small position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7, no tcr const call_order_object& call4 = *borrow( borrower4, bitusd.amount(10), asset(160) ); - call_order_id_type call4_id = call4.id; + call_order_id_type call4_id = call4.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); @@ -753,28 +753,28 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) BOOST_CHECK_EQUAL( 10, get_balance(borrower4, bitusd) ); // This sell order above MSSP will not be matched with a call - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) ); // Create a sell order which will be matched with several call orders later, price 1/9 - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->id; - BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 ); + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 ); // Create a force settlement, will be matched with several call orders later auto result = force_settle( seller, bitusd.amount(2400) ); BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle_id ) != nullptr ); BOOST_CHECK_EQUAL( 2400, settle_id(db).balance.amount.value ); @@ -805,7 +805,7 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) generate_block(); // firstly the limit order will match with call, at limit order's price: 1/9 - const call_order_object* tmp_call = db.find( call_id ); + const call_order_object* tmp_call = db.find( call_id ); BOOST_CHECK( tmp_call != nullptr ); // call will receive call_to_cover, pay 9*call_to_cover @@ -820,7 +820,7 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, usd_id) ); // the limit order then will match with call2, at limit order's price: 1/9 - const call_order_object* tmp_call2 = db.find( call2_id ); + const call_order_object* tmp_call2 = db.find( call2_id ); BOOST_CHECK( tmp_call2 != nullptr ); // if the limit is big enough, call2 will receive call2_to_cover, pay 9*call2_to_cover @@ -833,7 +833,7 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) BOOST_CHECK_EQUAL( 0, get_balance(borrower2_id, usd_id) ); // sell_id is completely filled - BOOST_CHECK( !db.find( sell_id ) ); + BOOST_CHECK( !db.find( sell_id ) ); // all call orders are still there BOOST_CHECK( db.find( call_id ) != nullptr ); @@ -863,14 +863,14 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) BOOST_CHECK_EQUAL( 25000, call3_id(db).collateral.value ); // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // pass the hard fork time generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); // call4 will match with the settle order, since it has no tcr, it will be fully closed // match price is 1/11 - const call_order_object* tmp_call4 = db.find( call4_id ); + const call_order_object* tmp_call4 = db.find( call4_id ); BOOST_CHECK( tmp_call4 == nullptr ); // borrower4 balance changes @@ -902,7 +902,7 @@ BOOST_AUTO_TEST_CASE(hf2481_cross_test) get_balance(seller_id, asset_id_type()) ); // 500*9 + 10*10.7 + call2_cover2 * 10.7 // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // generate a block generate_block(); @@ -930,7 +930,7 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); { // set margin call fee ratio @@ -953,7 +953,7 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); transfer(committee_account, borrower5_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -962,19 +962,19 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/175 CORE/USD = 60/700, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(100000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/175 CORE/USD = 62/700, tcr 200% > 175% const call_order_object& call2 = *borrow( borrower2, bitusd.amount(100000), asset(15500), 2000); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 500% collateral, call price is 25/175 CORE/USD = 100/700, no tcr const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(25000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); // create a small position with 320% collateral, call price is 16/175 CORE/USD = 64/700, no tcr const call_order_object& call4 = *borrow( borrower4, bitusd.amount(1000), asset(160) ); - call_order_id_type call4_id = call4.id; + call_order_id_type call4_id = call4.get_id(); // create yet another position with 900% collateral, call price is 45/175 CORE/USD = 180/700, no tcr const call_order_object& call5 = *borrow( borrower5, bitusd.amount(100000), asset(45000)); - call_order_id_type call5_id = call5.id; + call_order_id_type call5_id = call5.get_id(); transfer(borrower, seller, bitusd.amount(100000)); transfer(borrower2, seller, bitusd.amount(100000)); @@ -1002,36 +1002,36 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) share_type expected_seller_usd_balance = 300000; // This sell order above MCOP will not be matched with a call - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(700), core.amount(150))->id; - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 700 ); + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(700), core.amount(150))->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 700 ); expected_seller_usd_balance -= 700; BOOST_CHECK_EQUAL( expected_seller_usd_balance.value, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(1000))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(1000))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) ); // Create a sell order which will be matched with several call orders later, price 100/9 - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(100000), core.amount(9000) )->id; - BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 100000 ); + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(100000), core.amount(9000) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 100000 ); expected_seller_usd_balance -= 100000; // Create another sell order which will trigger a blackswan event if matched, price 100/21 limit_order_id_type sell_swan; if( i == 1 ) { - sell_swan = create_sell_order(seller, bitusd.amount(100), core.amount(21) )->id; - BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 100 ); + sell_swan = create_sell_order(seller, bitusd.amount(100), core.amount(21) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 100 ); expected_seller_usd_balance -= 100; } else if( i == 2 ) { - sell_swan = create_sell_order(seller, bitusd.amount(10000), core.amount(2100) )->id; - BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); + sell_swan = create_sell_order(seller, bitusd.amount(10000), core.amount(2100) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); expected_seller_usd_balance -= 10000; } @@ -1040,7 +1040,7 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle_id ) != nullptr ); expected_seller_usd_balance -= 40000; @@ -1049,7 +1049,7 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle2_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle2_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle2_id ) != nullptr ); expected_seller_usd_balance -= 10000; @@ -1058,7 +1058,7 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle3_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle3_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle3_id ) != nullptr ); expected_seller_usd_balance -= 3; @@ -1072,7 +1072,7 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle4_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle4_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle4_id ) != nullptr ); expected_seller_usd_balance -= 5; @@ -1139,7 +1139,7 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) expected_margin_call_fees += margin_call_fee_limit_2; // sell_id is completely filled - BOOST_CHECK( !db.find( sell_id ) ); + BOOST_CHECK( !db.find( sell_id ) ); // now call4 has the lowest CR // call4 will match with the settle order, since it is small and has too few collateral, it will be fully closed @@ -1270,16 +1270,16 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) BOOST_CHECK_EQUAL( expected_seller_core_balance.value, get_balance(seller, core) ); // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // sell_high is not matched - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 700 ); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 700 ); // sell_swan is not matched if( i == 1 ) - BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 100 ); + BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 100 ); else if( i == 2 ) - BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); + BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); // check gs fund BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).settlement_fund.value, expected_gs_fund.value ); @@ -1296,16 +1296,16 @@ BOOST_AUTO_TEST_CASE(call_settle_blackswan) BOOST_TEST_MESSAGE( "Check again" ); // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // sell_high is not matched - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 700 ); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 700 ); // sell_swan is not matched if( i == 1 ) - BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 100 ); + BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 100 ); else if( i == 2 ) - BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); + BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); // check gs fund BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).settlement_fund.value, expected_gs_fund.value ); @@ -1340,7 +1340,7 @@ BOOST_AUTO_TEST_CASE(call_settle_limit_settle) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); asset_id_type core_id; { @@ -1362,7 +1362,7 @@ BOOST_AUTO_TEST_CASE(call_settle_limit_settle) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1371,13 +1371,13 @@ BOOST_AUTO_TEST_CASE(call_settle_limit_settle) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/175 CORE/USD = 60/700, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(100000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 360% collateral, call price is 18/175 CORE/USD = 72/700, no tcr const call_order_object& call2 = *borrow( borrower2, bitusd.amount(100000), asset(18000) ); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 800% collateral, call price is 40/175 CORE/USD = 160/700, no tcr const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(40000) ); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(100000)); transfer(borrower2, seller, bitusd.amount(100000)); @@ -1401,15 +1401,15 @@ BOOST_AUTO_TEST_CASE(call_settle_limit_settle) BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) ); // Create a sell order which will trigger a blackswan event if matched, price 100/16 - limit_order_id_type sell_swan = create_sell_order(seller2, bitusd.amount(10000), core.amount(1600) )->id; - BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); + limit_order_id_type sell_swan = create_sell_order(seller2, bitusd.amount(10000), core.amount(1600) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_swan )->for_sale.value, 10000 ); // Create a force settlement, will be matched with several call orders later auto result = force_settle( seller, bitusd.amount(200000) ); BOOST_REQUIRE( result.is_type() ); BOOST_REQUIRE( result.get().value.new_objects.valid() ); BOOST_REQUIRE( !result.get().value.new_objects->empty() ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK( db.find( settle_id ) != nullptr ); BOOST_CHECK_EQUAL( 200000, settle_id(db).balance.amount.value ); diff --git a/tests/tests/grouped_orders_api_tests.cpp b/tests/tests/grouped_orders_api_tests.cpp index 3d8b308055..42a1fbdec3 100644 --- a/tests/tests/grouped_orders_api_tests.cpp +++ b/tests/tests/grouped_orders_api_tests.cpp @@ -45,7 +45,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_grouped_limit_orders) { create_bitasset("USD", account_id_type()); create_account("dan"); create_account("bob"); - asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").get_id(); generate_block(); fc::usleep(fc::milliseconds(100)); auto core = std::string( asset_id_type() ); diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index 143d2e3369..c1a5f31a75 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -142,7 +142,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_virtual_operation_test) { try { graphene::app::history_api hist_api(app); - asset_id_type usd_id = create_user_issued_asset("USD").id; + asset_id_type usd_id = create_user_issued_asset("USD").get_id(); ACTORS( (dan)(bob) ); fund( dan, asset(100) ); @@ -251,11 +251,11 @@ BOOST_AUTO_TEST_CASE(get_account_history_additional) { const account_object& dan = create_account("dan"); // create op 1 - create_bitasset("CNY", dan.id); // create op 2 + create_bitasset("CNY", dan.get_id()); // create op 2 create_bitasset("BTC", account_id_type()); // create op 3 - create_bitasset("XMR", dan.id); // create op 4 + create_bitasset("XMR", dan.get_id()); // create op 4 create_bitasset("EUR", account_id_type()); // create op 5 - create_bitasset("OIL", dan.id); // create op 6 + create_bitasset("OIL", dan.get_id()); // create op 6 generate_block(); fc::usleep(fc::milliseconds(100)); @@ -605,11 +605,11 @@ BOOST_AUTO_TEST_CASE(get_account_history_by_time) { const account_object& dan = create_account("dan"); // create op 1 - create_bitasset("CNY", dan.id); // create op 2 + create_bitasset("CNY", dan.get_id()); // create op 2 create_bitasset("BTC", account_id_type()); // create op 3 - create_bitasset("XMR", dan.id); // create op 4 + create_bitasset("XMR", dan.get_id()); // create op 4 create_bitasset("EUR", account_id_type()); // create op 5 - create_bitasset("OIL", dan.id); // create op 6 + create_bitasset("OIL", dan.get_id()); // create op 6 generate_block(); fc::usleep(fc::milliseconds(100)); @@ -712,7 +712,7 @@ BOOST_AUTO_TEST_CASE(track_account) { // account_id_type() creates dan(account tracked) const account_object& dan = create_account("dan"); - auto dan_id = dan.id; + auto dan_id = dan.get_id(); // dan makes 1 op create_bitasset("EUR", dan_id); @@ -786,7 +786,7 @@ BOOST_AUTO_TEST_CASE(track_account2) { // account_id_type() creates alice(tracked account) const account_object& alice = create_account("alice"); - auto alice_id = alice.id; + auto alice_id = alice.get_id(); //account_id_type() creates some ops create_bitasset("CNY", account_id_type()); @@ -870,7 +870,7 @@ BOOST_AUTO_TEST_CASE(min_blocks_to_keep_test) { histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); BOOST_REQUIRE_EQUAL(histories.size(), 3u); - operation_history_id_type oldest_op_hist_id = histories.back().id; + operation_history_id_type oldest_op_hist_id { histories.back().id }; BOOST_CHECK( db.find(oldest_op_hist_id) ); generate_block(); diff --git a/tests/tests/htlc_tests.cpp b/tests/tests/htlc_tests.cpp index 15ad4caa8c..833b7feebf 100644 --- a/tests/tests/htlc_tests.cpp +++ b/tests/tests/htlc_tests.cpp @@ -115,7 +115,7 @@ try { // make sure Bob (or anyone) can see the details of the transaction graphene::app::database_api db_api(db); - auto obj = db_api.get_objects( {alice_htlc_id }).front(); + auto obj = db_api.get_objects( { object_id_type(alice_htlc_id) }).front(); graphene::chain::htlc_object htlc = obj.template as(GRAPHENE_MAX_NESTED_OBJECTS); // someone else attempts to extend it (bob says he's alice, but he's not) @@ -304,7 +304,7 @@ try { asset(3 * GRAPHENE_BLOCKCHAIN_PRECISION), hash_it(pre_image), 0, 60); trx.operations.push_back(create_operation); sign(trx, alice_private_key); - htlc_id_type htlc_id = PUSH_TX(db, trx, ~0).operation_results[0].get(); + htlc_id_type htlc_id { PUSH_TX(db, trx, ~0).operation_results[0].get() }; trx.clear(); BOOST_TEST_MESSAGE("Bob attempts to redeem, but can't because preimage size is 0 (should fail)"); graphene::chain::htlc_redeem_operation redeem; @@ -319,7 +319,7 @@ try { // Alice creates an asset BOOST_TEST_MESSAGE("Create ALICECOIN so transfer_restricted can be controlled"); - const asset_id_type uia_id = create_user_issued_asset( "ALICECOIN", alice, transfer_restricted).id; + const asset_id_type uia_id = create_user_issued_asset( "ALICECOIN", alice, transfer_restricted).get_id(); BOOST_TEST_MESSAGE("Issuing ALICECOIN to Bob"); issue_uia(bob, asset(10000, uia_id) ); // verify transfer restrictions are in place @@ -462,7 +462,7 @@ try { sign(trx, alice_private_key); graphene::protocol::processed_transaction results = PUSH_TX(db, trx, ~0); trx.operations.clear(); - htlc_id_type htlc_id = results.operation_results[0].get(); + htlc_id_type htlc_id { results.operation_results[0].get() }; BOOST_TEST_MESSAGE("Attempt to redeem HTLC that has no preimage, but include one anyway (should fail)"); htlc_redeem_operation redeem; redeem.htlc_id = htlc_id; @@ -489,7 +489,7 @@ try { sign(trx, alice_private_key); graphene::protocol::processed_transaction results = PUSH_TX(db, trx, ~0); trx.operations.clear(); - htlc_id_type htlc_id = results.operation_results[0].get(); + htlc_id_type htlc_id { results.operation_results[0].get() }; BOOST_TEST_MESSAGE("Attempt to redeem with no preimage (should fail)"); htlc_redeem_operation redeem; redeem.htlc_id = htlc_id; @@ -962,7 +962,7 @@ try { upgrade_to_lifetime_member( nathan ); // create a UIA - const asset_id_type uia_id = create_user_issued_asset( "NATHANCOIN", nathan, white_list ).id; + const asset_id_type uia_id = create_user_issued_asset( "NATHANCOIN", nathan, white_list ).get_id(); // Make a whitelist authority { BOOST_TEST_MESSAGE( "Changing the whitelist authority" ); diff --git a/tests/tests/liquidity_pool_tests.cpp b/tests/tests/liquidity_pool_tests.cpp index 770851cee8..d0c9d1c085 100644 --- a/tests/tests/liquidity_pool_tests.cpp +++ b/tests/tests/liquidity_pool_tests.cpp @@ -57,7 +57,8 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_hardfork_time_test ) // Before the hard fork, unable to create a liquidity pool or transact against a liquidity pool, // or do any of them with proposals - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), lpa.get_id(), 0, 0 ), + fc::exception ); liquidity_pool_id_type tmp_lp_id; BOOST_CHECK_THROW( delete_liquidity_pool( sam_id, tmp_lp_id ), fc::exception ); @@ -69,7 +70,7 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_hardfork_time_test ) fc::exception ); liquidity_pool_create_operation cop = - make_liquidity_pool_create_op( sam_id, core.id, usd.id, lpa.id, 0, 0 ); + make_liquidity_pool_create_op( sam_id, core.get_id(), usd.get_id(), lpa.get_id(), 0, 0 ); BOOST_CHECK_THROW( propose( cop ), fc::exception ); liquidity_pool_delete_operation delop = make_liquidity_pool_delete_op( sam_id, tmp_lp_id ); @@ -131,7 +132,7 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_create_delete_proposal_test ) // Able to propose { liquidity_pool_create_operation cop = - make_liquidity_pool_create_op( sam_id, core.id, usd.id, lpa.id, 0, 0 ); + make_liquidity_pool_create_op( sam_id, core.get_id(), usd.get_id(), lpa.get_id(), 0, 0 ); propose( cop ); liquidity_pool_id_type tmp_lp_id; @@ -153,7 +154,8 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_create_delete_proposal_test ) } // Able to create liquidity pools with valid data - const liquidity_pool_object& lpo1 = create_liquidity_pool( sam_id, core.id, usd.id, lpa1.id, 0, 0 ); + const liquidity_pool_object& lpo1 = create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), lpa1.get_id(), + 0, 0 ); BOOST_CHECK( lpo1.asset_a == core.id ); BOOST_CHECK( lpo1.asset_b == usd.id ); BOOST_CHECK( lpo1.balance_a == 0 ); @@ -163,11 +165,12 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_create_delete_proposal_test ) BOOST_CHECK( lpo1.withdrawal_fee_percent == 0 ); BOOST_CHECK( lpo1.virtual_value == 0 ); - liquidity_pool_id_type lp_id1 = lpo1.id; + liquidity_pool_id_type lp_id1 = lpo1.get_id(); BOOST_CHECK( lpa1.is_liquidity_pool_share_asset() ); BOOST_CHECK( *lpa1.for_liquidity_pool == lp_id1 ); - const liquidity_pool_object& lpo2 = create_liquidity_pool( sam_id, core.id, usd.id, lpa2.id, 200, 300 ); + const liquidity_pool_object& lpo2 = create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), lpa2.get_id(), + 200, 300 ); BOOST_CHECK( lpo2.asset_a == core.id ); BOOST_CHECK( lpo2.asset_b == usd.id ); BOOST_CHECK( lpo2.balance_a == 0 ); @@ -177,11 +180,12 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_create_delete_proposal_test ) BOOST_CHECK( lpo2.withdrawal_fee_percent == 300 ); BOOST_CHECK( lpo2.virtual_value == 0 ); - liquidity_pool_id_type lp_id2 = lpo2.id; + liquidity_pool_id_type lp_id2 = lpo2.get_id(); BOOST_CHECK( lpa2.is_liquidity_pool_share_asset() ); BOOST_CHECK( *lpa2.for_liquidity_pool == lp_id2 ); - const liquidity_pool_object& lpo3 = create_liquidity_pool( sam_id, usd.id, mpa.id, lpa3.id, 50, 50 ); + const liquidity_pool_object& lpo3 = create_liquidity_pool( sam_id, usd.get_id(), mpa.get_id(), lpa3.get_id(), + 50, 50 ); BOOST_CHECK( lpo3.asset_a == usd.id ); BOOST_CHECK( lpo3.asset_b == mpa.id ); @@ -192,42 +196,58 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_create_delete_proposal_test ) BOOST_CHECK( lpo3.withdrawal_fee_percent == 50 ); BOOST_CHECK( lpo3.virtual_value == 0 ); - liquidity_pool_id_type lp_id3 = lpo3.id; + liquidity_pool_id_type lp_id3 = lpo3.get_id(); BOOST_CHECK( lpa3.is_liquidity_pool_share_asset() ); BOOST_CHECK( *lpa3.for_liquidity_pool == lp_id3 ); // Unable to create a liquidity pool with invalid data // the same assets in pool - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, core.id, lpa.id, 0, 0 ), fc::exception ); - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.id, usd.id, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), core.get_id(), lpa.get_id(), 0, 0 ), + fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.get_id(), usd.get_id(), lpa.get_id(), 0, 0 ), + fc::exception ); // ID of the first asset is greater - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.id, core.id, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.get_id(), core.get_id(), lpa.get_id(), 0, 0 ), + fc::exception ); // the share asset is one of the assets in pool - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.id, lpa.id, lpa.id, 0, 0 ), fc::exception ); - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, lpa.id, pm.id, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.get_id(), lpa.get_id(), lpa.get_id(), 0, 0 ), + fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, lpa.get_id(), pm.get_id(), lpa.get_id(), 0, 0 ), + fc::exception ); // percentage too big - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 10001, 0 ), fc::exception ); - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 0, 10001 ), fc::exception ); - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 10001, 10001 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), lpa.get_id(), 10001, 0 ), + fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), lpa.get_id(), 0, 10001 ), + fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), lpa.get_id(), 10001, 10001 ), + fc::exception ); // asset does not exist - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, no_asset_id1, 0, 0 ), fc::exception ); - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, no_asset_id1, lpa.id, 0, 0 ), fc::exception ); - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, no_asset_id1, no_asset_id2, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), no_asset_id1, 0, 0 ), + fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), no_asset_id1, lpa.get_id(), 0, 0 ), + fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, no_asset_id1, no_asset_id2, lpa.get_id(), 0, 0 ), + fc::exception ); // the account does not own the share asset - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, ted_lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), ted_lpa.get_id(), 0, 0 ), + fc::exception ); // the share asset is a MPA or a PM - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, mpa.id, 0, 0 ), fc::exception ); - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, pm.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), mpa.get_id(), 0, 0 ), + fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), pm.get_id(), 0, 0 ), + fc::exception ); // the share asset is already bound to a liquidity pool - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa1.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), usd.get_id(), lpa1.get_id(), 0, 0 ), + fc::exception ); // current supply of the share asset is not zero - BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, lpa.id, usd.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.get_id(), lpa.get_id(), usd.get_id(), 0, 0 ), + fc::exception ); // Unable to issue a liquidity pool share asset BOOST_CHECK_THROW( issue_uia( sam, lpa1.amount(1) ), fc::exception ); // Sam is able to delete an empty pool owned by him - generic_operation_result result = delete_liquidity_pool( sam_id, lpo1.id ); + generic_operation_result result = delete_liquidity_pool( sam_id, lpo1.get_id() ); BOOST_CHECK( !db.find( lp_id1 ) ); BOOST_CHECK( !lpa1.is_liquidity_pool_share_asset() ); BOOST_CHECK_EQUAL( result.new_objects.size(), 0u ); @@ -278,9 +298,9 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_deposit_withdrawal_test ) const asset_object& lpa = create_user_issued_asset( "LPATEST", sam, charge_market_fee ); asset_id_type core_id = asset_id_type(); - asset_id_type eur_id = eur.id; - asset_id_type usd_id = usd.id; - asset_id_type lpa_id = lpa.id; + asset_id_type eur_id = eur.get_id(); + asset_id_type usd_id = usd.get_id(); + asset_id_type lpa_id = lpa.get_id(); int64_t init_amount = 10000000 * GRAPHENE_BLOCKCHAIN_PRECISION; fund( sam, asset(init_amount) ); @@ -313,8 +333,9 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_deposit_withdrawal_test ) int64_t expected_lp_supply = 0; // create a liquidity pool - const liquidity_pool_object& lpo = create_liquidity_pool( sam_id, eur.id, usd.id, lpa.id, 200, 300 ); - liquidity_pool_id_type lp_id = lpo.id; + const liquidity_pool_object& lpo = create_liquidity_pool( sam_id, eur.get_id(), usd.get_id(), lpa.get_id(), + 200, 300 ); + liquidity_pool_id_type lp_id = lpo.get_id(); BOOST_CHECK( lpo.asset_a == eur_id ); BOOST_CHECK( lpo.asset_b == usd_id ); @@ -672,7 +693,7 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_deposit_withdrawal_test ) generate_block(); graphene::market_history::liquidity_pool_ticker_id_type ticker_id( lp_id.instance ); - const auto& ticker = db.get< graphene::market_history::liquidity_pool_ticker_object >( ticker_id ); + const auto& ticker = db.get( ticker_id ); BOOST_CHECK_EQUAL( ticker._24h_deposit_count, 7u ); BOOST_CHECK_EQUAL( ticker.total_deposit_count, 7u ); BOOST_CHECK_EQUAL( ticker._24h_withdrawal_count, 2u ); @@ -713,9 +734,9 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_exchange_test ) const asset_object& lpa = create_user_issued_asset( "LPATEST", sam, charge_market_fee ); asset_id_type core_id = asset_id_type(); - asset_id_type eur_id = eur.id; - asset_id_type usd_id = usd.id; - asset_id_type lpa_id = lpa.id; + asset_id_type eur_id = eur.get_id(); + asset_id_type usd_id = usd.get_id(); + asset_id_type lpa_id = lpa.get_id(); int64_t init_amount = 10000000 * GRAPHENE_BLOCKCHAIN_PRECISION; fund( sam, asset(init_amount) ); @@ -751,8 +772,9 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_exchange_test ) int64_t expected_lp_supply = 0; // create a liquidity pool - const liquidity_pool_object& lpo = create_liquidity_pool( sam_id, eur.id, usd.id, lpa.id, 200, 300 ); - liquidity_pool_id_type lp_id = lpo.id; + const liquidity_pool_object& lpo = create_liquidity_pool( sam_id, eur.get_id(), usd.get_id(), lpa.get_id(), + 200, 300 ); + liquidity_pool_id_type lp_id = lpo.get_id(); BOOST_CHECK( lpo.asset_a == eur_id ); BOOST_CHECK( lpo.asset_b == usd_id ); @@ -974,7 +996,7 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_exchange_test ) BOOST_CHECK_EQUAL( eur_id(db).dynamic_data(db).accumulated_fees.value, expected_accumulated_fees_eur ); graphene::market_history::liquidity_pool_ticker_id_type ticker_id( lp_id.instance ); - const auto& ticker = db.get< graphene::market_history::liquidity_pool_ticker_object >( ticker_id ); + const auto& ticker = db.get( ticker_id ); BOOST_CHECK_EQUAL( ticker._24h_exchange_a2b_count, 1u ); BOOST_CHECK_EQUAL( ticker.total_exchange_a2b_count, 1u ); BOOST_CHECK_EQUAL( ticker._24h_exchange_b2a_count, 1u ); @@ -1368,17 +1390,16 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_apis_test ) const asset_object ted_lp3 = create_user_issued_asset( "TEDLP3", ted, charge_market_fee ); // create liquidity pools - const liquidity_pool_object sam_lpo1 = create_liquidity_pool( sam_id, sam_eur.id, sam_usd.id, - sam_lp1.id, 100, 310 ); - const liquidity_pool_object sam_lpo2 = create_liquidity_pool( sam_id, sam_usd.id, ted_usd.id, - sam_lp2.id, 200, 320 ); - const liquidity_pool_object ted_lpo1 = create_liquidity_pool( ted_id, sam_usd.id, ted_usd.id, - ted_lp1.id, 300, 330 ); - const liquidity_pool_object ted_lpo2 = create_liquidity_pool( ted_id, sam_usd.id, ted_eur.id, - ted_lp2.id, 400, 340 ); - const liquidity_pool_object ted_lpo3 = create_liquidity_pool( ted_id, ted_eur.id, ted_usd.id, - ted_lp3.id, 500, 350 ); - + const liquidity_pool_object sam_lpo1 = create_liquidity_pool( sam_id, sam_eur.get_id(), sam_usd.get_id(), + sam_lp1.get_id(), 100, 310 ); + const liquidity_pool_object sam_lpo2 = create_liquidity_pool( sam_id, sam_usd.get_id(), ted_usd.get_id(), + sam_lp2.get_id(), 200, 320 ); + const liquidity_pool_object ted_lpo1 = create_liquidity_pool( ted_id, sam_usd.get_id(), ted_usd.get_id(), + ted_lp1.get_id(), 300, 330 ); + const liquidity_pool_object ted_lpo2 = create_liquidity_pool( ted_id, sam_usd.get_id(), ted_eur.get_id(), + ted_lp2.get_id(), 400, 340 ); + const liquidity_pool_object ted_lpo3 = create_liquidity_pool( ted_id, ted_eur.get_id(), ted_usd.get_id(), + ted_lp3.get_id(), 500, 350 ); generate_block(); // Check database API @@ -1388,73 +1409,73 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_apis_test ) auto pools = db_api.list_liquidity_pools(); BOOST_REQUIRE_EQUAL( pools.size(), 5u ); BOOST_CHECK( !pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo1.id ); - BOOST_CHECK( pools.back().id == ted_lpo3.id ); + BOOST_CHECK( pools.front().id == sam_lpo1.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo3.get_id() ); // pagination - pools = db_api.list_liquidity_pools( 5, sam_lpo2.id ); + pools = db_api.list_liquidity_pools( 5, sam_lpo2.get_id() ); BOOST_REQUIRE_EQUAL( pools.size(), 4u ); BOOST_CHECK( !pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo2.id ); - BOOST_CHECK( pools.back().id == ted_lpo3.id ); + BOOST_CHECK( pools.front().id == sam_lpo2.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo3.get_id() ); // with statistics - pools = db_api.list_liquidity_pools( 2, sam_lpo2.id, true ); + pools = db_api.list_liquidity_pools( 2, sam_lpo2.get_id(), true ); BOOST_REQUIRE_EQUAL( pools.size(), 2u ); BOOST_CHECK( pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo2.id ); - BOOST_CHECK( pools.back().id == ted_lpo1.id ); + BOOST_CHECK( pools.front().id == sam_lpo2.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo1.get_id() ); // get_liquidity_pools_by_asset_a pools = db_api.get_liquidity_pools_by_asset_a( "SAMUSD" ); BOOST_REQUIRE_EQUAL( pools.size(), 3u ); BOOST_CHECK( !pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo2.id ); - BOOST_CHECK( pools.back().id == ted_lpo2.id ); + BOOST_CHECK( pools.front().id == sam_lpo2.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo2.get_id() ); // pagination and with statistics - pools = db_api.get_liquidity_pools_by_asset_a( "SAMUSD", 2, ted_lpo2.id, true ); + pools = db_api.get_liquidity_pools_by_asset_a( "SAMUSD", 2, ted_lpo2.get_id(), true ); BOOST_REQUIRE_EQUAL( pools.size(), 1u ); BOOST_CHECK( pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == ted_lpo2.id ); + BOOST_CHECK( pools.front().id == ted_lpo2.get_id() ); // get_liquidity_pools_by_asset_b pools = db_api.get_liquidity_pools_by_asset_b( "TEDUSD" ); BOOST_REQUIRE_EQUAL( pools.size(), 3u ); BOOST_CHECK( !pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo2.id ); - BOOST_CHECK( pools.back().id == ted_lpo3.id ); + BOOST_CHECK( pools.front().id == sam_lpo2.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo3.get_id() ); // pagination and with statistics - pools = db_api.get_liquidity_pools_by_asset_b( "TEDUSD", 2, sam_lpo1.id, true ); + pools = db_api.get_liquidity_pools_by_asset_b( "TEDUSD", 2, sam_lpo1.get_id(), true ); BOOST_REQUIRE_EQUAL( pools.size(), 2u ); BOOST_CHECK( pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo2.id ); - BOOST_CHECK( pools.back().id == ted_lpo1.id ); + BOOST_CHECK( pools.front().id == sam_lpo2.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo1.get_id() ); // get_liquidity_pools_by_one_asset pools = db_api.get_liquidity_pools_by_one_asset( "SAMUSD" ); BOOST_REQUIRE_EQUAL( pools.size(), 4u ); BOOST_CHECK( !pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo1.id ); - BOOST_CHECK( pools.back().id == ted_lpo2.id ); + BOOST_CHECK( pools.front().id == sam_lpo1.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo2.get_id() ); // pagination and with statistics pools = db_api.get_liquidity_pools_by_one_asset( "SAMUSD", 3, liquidity_pool_id_type(), true ); BOOST_REQUIRE_EQUAL( pools.size(), 3u ); BOOST_CHECK( pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo1.id ); - BOOST_CHECK( pools.back().id == ted_lpo1.id ); + BOOST_CHECK( pools.front().id == sam_lpo1.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo1.get_id() ); // get_liquidity_pools_by_both_asset pools = db_api.get_liquidity_pools_by_both_assets( "SAMUSD", "TEDUSD" ); BOOST_REQUIRE_EQUAL( pools.size(), 2u ); BOOST_CHECK( !pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo2.id ); - BOOST_CHECK( pools.back().id == ted_lpo1.id ); + BOOST_CHECK( pools.front().id == sam_lpo2.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo1.get_id() ); // pagination and with statistics - pools = db_api.get_liquidity_pools_by_both_assets( "SAMUSD", "TEDUSD", 3, ted_lpo2.id, true ); + pools = db_api.get_liquidity_pools_by_both_assets( "SAMUSD", "TEDUSD", 3, ted_lpo2.get_id(), true ); BOOST_REQUIRE_EQUAL( pools.size(), 0u ); // get_liquidity_pools_by_share_asset @@ -1462,27 +1483,23 @@ BOOST_AUTO_TEST_CASE( liquidity_pool_apis_test ) BOOST_REQUIRE_EQUAL( opools.size(), 2u ); BOOST_CHECK( opools.front().valid() ); BOOST_CHECK( opools.front()->statistics.valid() ); - BOOST_CHECK( opools.front()->id == sam_lpo1.id ); + BOOST_CHECK( opools.front()->id == sam_lpo1.get_id() ); BOOST_CHECK( !opools.back().valid() ); // get_liquidity_pools_by_owner pools = db_api.get_liquidity_pools_by_owner( "sam" ); BOOST_REQUIRE_EQUAL( pools.size(), 2u ); BOOST_CHECK( !pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == sam_lpo1.id ); - BOOST_CHECK( pools.back().id == sam_lpo2.id ); + BOOST_CHECK( pools.front().id == sam_lpo1.get_id() ); + BOOST_CHECK( pools.back().id == sam_lpo2.get_id() ); // pagination and with statistics - pools = db_api.get_liquidity_pools_by_owner( "ted", 5, sam_lpo1.id, true ); - BOOST_REQUIRE_EQUAL( pools.size(), 3u ); + pools = db_api.get_liquidity_pools_by_owner( "ted", 5, ted_lp2.get_id(), true ); + BOOST_REQUIRE_EQUAL( pools.size(), 2u ); BOOST_CHECK( pools.front().statistics.valid() ); - BOOST_CHECK( pools.front().id == ted_lpo1.id ); - BOOST_CHECK( pools.back().id == ted_lpo3.id ); + BOOST_CHECK( pools.front().id == ted_lpo2.get_id() ); + BOOST_CHECK( pools.back().id == ted_lpo3.get_id() ); - } catch (fc::exception& e) { - edump((e.to_detail_string())); - throw; - } -} +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/margin_call_fee_tests.cpp b/tests/tests/margin_call_fee_tests.cpp index f86f3ce09f..acb8592ea6 100644 --- a/tests/tests/margin_call_fee_tests.cpp +++ b/tests/tests/margin_call_fee_tests.cpp @@ -320,7 +320,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Initialize tokens // CORE asset exists by default const asset_object &core = asset_id_type()(db); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const int64_t CORE_UNIT = asset::scaled_precision(core.precision).value; // 100000 satoshi CORE in 1 CORE // Create the SMARTBIT asset @@ -333,7 +333,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); const asset_object smartbit = get_asset("SMARTBIT"); - const asset_id_type smartbit_id = smartbit.id; + const asset_id_type smartbit_id = smartbit.get_id(); update_feed_producers(smartbit, {feedproducer_id}); // Initialize token balance of actors @@ -371,7 +371,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Bob retains the asset in his own balances, or transfers it, or sells it is not critical // because his debt position is what will be tracked. ////// - call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core)).id; + call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core)).get_id(); BOOST_REQUIRE_EQUAL(get_balance(bob, smartbit), 200 * SMARTBIT_UNIT); BOOST_CHECK(!smartbit.bitasset_data(db).has_settlement()); // No global settlement const price bob_initial_cr = bob_call_id(db).collateralization(); // Units of collateral / debt @@ -441,7 +441,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; // Margin call should exchange all of the available debt (X) for X*(MSSR-MCFR)/settlement_price // The match price should be the settlement_price/(MSSR-MCFR) = settlement_price/(MSSR-MCFR) @@ -589,7 +589,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Initialize tokens // CORE asset exists by default const asset_object &core = asset_id_type()(db); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const int64_t CORE_UNIT = asset::scaled_precision(core.precision).value; // 100000 satoshi CORE in 1 CORE // Create the SMARTBIT asset @@ -602,7 +602,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); const asset_object smartbit = get_asset("SMARTBIT"); - const asset_id_type smartbit_id = smartbit.id; + const asset_id_type smartbit_id = smartbit.get_id(); update_feed_producers(smartbit, {feedproducer_id}); // Initialize token balance of actors @@ -650,7 +650,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Bob retains the asset in his own balances, or transfers it, or sells it is not critical // because his debt position is what will be tracked. ////// - call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core)).id; + call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core)).get_id(); BOOST_REQUIRE_EQUAL(get_balance(bob, smartbit), 200 * SMARTBIT_UNIT); BOOST_CHECK(!smartbit.bitasset_data(db).has_settlement()); // No global settlement const price bob_initial_cr = bob_call_id(db).collateralization(); // Units of collateral / debt @@ -697,7 +697,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // **Bob's margin call SHOULD NOT be affected.** ////// // Charlie obtains his SMARTBIT by borrowing it from the blockchain - call_order_id_type charlie_call_id = (*borrow(charlie, charlie_initial_smart, charlie_initial_core)).id; + call_order_id_type charlie_call_id = (*borrow(charlie, charlie_initial_smart, charlie_initial_core)).get_id(); BOOST_REQUIRE_EQUAL(get_balance(charlie, smartbit), 200 * SMARTBIT_UNIT); BOOST_CHECK(!smartbit.bitasset_data(db).has_settlement()); // No global settlement const price charlie_initial_cr = charlie_call_id(db).collateralization(); // Units of collateral / debt @@ -743,7 +743,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // asset charlie_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, charlie_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type charlie_order_id = ptx.operation_results[0].get(); + limit_order_id_type charlie_order_id { ptx.operation_results[0].get() }; // Check Charlies's limit order is still open BOOST_CHECK(db.find(charlie_order_id)); @@ -792,7 +792,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; // Margin call should exchange all of the available debt (X) for X*(MSSR-MCFR)/settlement_price // Payment to limit order = X*(MSSR-MCFR)/settlement_price @@ -920,7 +920,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Initialize tokens // CORE asset exists by default const asset_object &core = asset_id_type()(db); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const int64_t CORE_UNIT = asset::scaled_precision(core.precision).value; // 100000 satoshi CORE in 1 CORE // Create the SMARTBIT asset @@ -933,7 +933,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); const asset_object smartbit = get_asset("SMARTBIT"); - const asset_id_type smartbit_id = smartbit.id; + const asset_id_type smartbit_id = smartbit.get_id(); update_feed_producers(smartbit, {feedproducer_id}); // Initialize token balance of actors @@ -972,7 +972,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // because his debt position is what will be tracked. ////// const uint16_t tcr = 2200; // Bob's target collateral ratio (TCR) 220% expressed in terms of GRAPHENE_COLLATERAL_RATIO_DENOM - call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core, tcr)).id; + call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core, tcr)).get_id(); BOOST_REQUIRE_EQUAL(get_balance(bob, smartbit), 200 * SMARTBIT_UNIT); BOOST_CHECK(!smartbit.bitasset_data(db).has_settlement()); // No global settlement const price bob_initial_cr = bob_call_id(db).collateralization(); // Units of collateral / debt @@ -1044,7 +1044,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; // The match price **as maker** should be the settlement_price/(MSSR-MCFR) = settlement_price/(MSSR-MCFR) const uint16_t ratio_numerator = current_feed.maximum_short_squeeze_ratio - smartbit_margin_call_fee_ratio; @@ -1267,7 +1267,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Initialize tokens // CORE asset exists by default const asset_object &core = asset_id_type()(db); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const int64_t CORE_UNIT = asset::scaled_precision(core.precision).value; // 100000 satoshi CORE in 1 CORE // Create the SMARTBIT asset @@ -1280,7 +1280,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); const asset_object smartbit = get_asset("SMARTBIT"); - const asset_id_type smartbit_id = smartbit.id; + const asset_id_type smartbit_id = smartbit.get_id(); update_feed_producers(smartbit, {feedproducer_id}); // Initialize token balance of actors @@ -1340,7 +1340,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // 3. (Order 1: Limit order) Alice places a **"large"** limit order to sell SMARTBIT. ////// // Alice borrows SMARTBIT - call_order_id_type alice_call_id = (*borrow(alice, alice_initial_smart, alice_initial_core)).id; + call_order_id_type alice_call_id = (*borrow(alice, alice_initial_smart, alice_initial_core)).get_id(); BOOST_CHECK_EQUAL(get_balance(alice_id(db), smartbit_id(db)), 500 * SMARTBIT_UNIT); BOOST_CHECK_EQUAL(get_balance(alice_id, core_id), 0 * CORE_UNIT); BOOST_CHECK(!smartbit.bitasset_data(db).has_settlement()); // No global settlement @@ -1363,7 +1363,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; // Alice should have no balance BOOST_CHECK_EQUAL(get_balance(alice_id(db), smartbit_id(db)), 0 * SMARTBIT_UNIT); @@ -1377,7 +1377,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) ////// const asset bob_initial_debt_smart = bob_initial_smart; const asset bob_initial_debt_collateral = bob_initial_core; - call_order_id_type bob_call_id = (*borrow(bob, bob_initial_debt_smart, bob_initial_debt_collateral)).id; + call_order_id_type bob_call_id = (*borrow(bob, bob_initial_debt_smart, bob_initial_debt_collateral)).get_id(); // Bobs's balances should reflect that CORE was used to create SMARTBIT BOOST_CHECK_EQUAL(get_balance(bob_id, smartbit_id), 200 * SMARTBIT_UNIT); @@ -1565,7 +1565,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Initialize tokens // CORE asset exists by default const asset_object &core = asset_id_type()(db); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const int64_t CORE_UNIT = asset::scaled_precision(core.precision).value; // 100000 satoshi CORE in 1 CORE // Create the SMARTBIT asset @@ -1578,7 +1578,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); const asset_object smartbit = get_asset("SMARTBIT"); - const asset_id_type smartbit_id = smartbit.id; + const asset_id_type smartbit_id = smartbit.get_id(); update_feed_producers(smartbit, {feedproducer_id}); // Initialize token balance of actors @@ -1664,7 +1664,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; // Alice should have no balance BOOST_CHECK_EQUAL(get_balance(alice_id(db), smartbit_id(db)), 0 * SMARTBIT_UNIT); @@ -1677,7 +1677,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // because his debt position is what will be tracked. ////// const uint16_t tcr = 2200; // Bob's target collateral ratio (TCR) 220% expressed in terms of GRAPHENE_COLLATERAL_RATIO_DENOM - call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core, tcr)).id; + call_order_id_type bob_call_id = (*borrow(bob, bob_initial_smart, bob_initial_core, tcr)).get_id(); BOOST_REQUIRE_EQUAL(get_balance(bob, smartbit), 200 * SMARTBIT_UNIT); BOOST_CHECK(!smartbit.bitasset_data(db).has_settlement()); // No global settlement const price bob_initial_cr = bob_call_id(db).collateralization(); // Units of collateral / debt @@ -1934,7 +1934,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Initialize tokens // CORE asset exists by default const asset_object &core = asset_id_type()(db); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const int64_t CORE_UNIT = asset::scaled_precision(core.precision).value; // 100000 satoshi CORE in 1 CORE // Create the SMARTBIT asset @@ -1948,7 +1948,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); const asset_object smartbit = get_asset("SMARTBIT"); - const asset_id_type smartbit_id = smartbit.id; + const asset_id_type smartbit_id = smartbit.get_id(); update_feed_producers(smartbit, {feedproducer_id}); // Initialize token balance of actors @@ -2037,7 +2037,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // 3. (Order 1: Limit order) Alice places a **"large"** limit order to sell SMARTBIT. ////// // Alice borrows SMARTBIT - call_order_id_type alice_call_id = (*borrow(alice, alice_initial_smart, alice_initial_core)).id; + call_order_id_type alice_call_id = (*borrow(alice, alice_initial_smart, alice_initial_core)).get_id(); BOOST_CHECK_EQUAL(get_balance(alice_id(db), smartbit_id(db)), 500 * SMARTBIT_UNIT); BOOST_CHECK_EQUAL(get_balance(alice_id, core_id), 0 * CORE_UNIT); BOOST_CHECK(!smartbit.bitasset_data(db).has_settlement()); // No global settlement @@ -2065,7 +2065,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; // Alice should have no balance BOOST_CHECK_EQUAL(get_balance(alice_id(db), smartbit_id(db)), 0 * SMARTBIT_UNIT); @@ -2080,7 +2080,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) ////// const asset bob_initial_debt_smart = bob_initial_smart; const asset bob_initial_debt_collateral = bob_initial_core; - call_order_id_type bob_call_id = (*borrow(bob, bob_initial_debt_smart, bob_initial_debt_collateral)).id; + call_order_id_type bob_call_id = (*borrow(bob, bob_initial_debt_smart, bob_initial_debt_collateral)).get_id(); // Bobs's balances should reflect that CORE was used to create SMARTBIT BOOST_CHECK_EQUAL(get_balance(bob_id, smartbit_id), 200 * SMARTBIT_UNIT); @@ -2313,7 +2313,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // CORE asset exists by default asset_object core = asset_id_type()(db); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); // Fund actors uint64_t initial_balance_core = 10000000; @@ -2493,7 +2493,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Approve the proposal - proposal_id_type pid = processed.operation_results[0].get(); + proposal_id_type pid { processed.operation_results[0].get() }; proposal_update_operation pup; pup.fee_paying_account = assetowner_id; @@ -2563,7 +2563,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Approve the proposal - proposal_id_type pid = processed.operation_results[0].get(); + proposal_id_type pid { processed.operation_results[0].get() }; proposal_update_operation pup; pup.fee_paying_account = assetowner_id; @@ -2635,7 +2635,7 @@ BOOST_FIXTURE_TEST_SUITE(margin_call_fee_tests, bitasset_database_fixture) // Approve the proposal - proposal_id_type pid = processed.operation_results[0].get(); + proposal_id_type pid { processed.operation_results[0].get() }; proposal_update_operation pup; pup.fee_paying_account = assetowner_id; diff --git a/tests/tests/market_fee_sharing_tests.cpp b/tests/tests/market_fee_sharing_tests.cpp index 4e6b12b35f..6e980c4689 100644 --- a/tests/tests/market_fee_sharing_tests.cpp +++ b/tests/tests/market_fee_sharing_tests.cpp @@ -35,7 +35,7 @@ struct reward_database_fixture : database_fixture const fc::ecc::private_key& private_key, const asset_id_type& asset_id, uint16_t reward_percent, - const whitelist_market_fee_sharing_t &whitelist_market_fee_sharing = whitelist_market_fee_sharing_t{}, + const whitelist_market_fee_sharing_t &whitelist_market_fee_sharing = {}, const flat_set &blacklist = flat_set()) { asset_update_operation op; @@ -286,8 +286,8 @@ BOOST_AUTO_TEST_CASE(asset_rewards_test) auto alice = register_account("alice", alicereferrer); auto bob = register_account("bob", bobreferrer); - transfer( committee_account, alice.id, core_asset(1000000) ); - transfer( committee_account, bob.id, core_asset(1000000) ); + transfer( committee_account, alice.get_id(), core_asset(1000000) ); + transfer( committee_account, bob.get_id(), core_asset(1000000) ); transfer( committee_account, izzy_id, core_asset(1000000) ); transfer( committee_account, jill_id, core_asset(1000000) ); @@ -297,8 +297,8 @@ BOOST_AUTO_TEST_CASE(asset_rewards_test) constexpr auto izzycoin_market_percent = 10*GRAPHENE_1_PERCENT; constexpr auto jillcoin_market_percent = 20*GRAPHENE_1_PERCENT; - asset_id_type izzycoin_id = create_bitasset( "IZZYCOIN", izzy_id, izzycoin_market_percent ).id; - asset_id_type jillcoin_id = create_bitasset( "JILLCOIN", jill_id, jillcoin_market_percent ).id; + asset_id_type izzycoin_id = create_bitasset( "IZZYCOIN", izzy_id, izzycoin_market_percent ).get_id(); + asset_id_type jillcoin_id = create_bitasset( "JILLCOIN", jill_id, jillcoin_market_percent ).get_id(); update_asset(izzy_id, izzy_private_key, izzycoin_id, izzycoin_reward_percent); update_asset(jill_id, jill_private_key, jillcoin_id, jillcoin_reward_percent); @@ -330,12 +330,14 @@ BOOST_AUTO_TEST_CASE(asset_rewards_test) enable_fees(); // Alice and Bob create some coins - borrow( alice.id, _izzy( 1500), core_asset( 600000) ); - borrow( bob.id, _jill(2000), core_asset(180000) ); + borrow( alice.get_id(), _izzy( 1500), core_asset( 600000) ); + borrow( bob.get_id(), _jill(2000), core_asset(180000) ); // Alice and Bob place orders which match - create_sell_order( alice.id, _izzy(1000), _jill(1500) ); // Alice is willing to sell her 1000 Izzy's for 1.5 Jill - create_sell_order( bob.id, _jill(1500), _izzy(1000) ); // Bob is buying up to 1500 Izzy's for up to 0.6 Jill + create_sell_order( alice.get_id(), _izzy(1000), _jill(1500) ); // Alice is willing to sell her 1000 Izzy's + // for 1.5 Jill + create_sell_order( bob.get_id(), _jill(1500), _izzy(1000) ); // Bob is buying up to 1500 Izzy's + // for up to 0.6 Jill // 1000 Izzys and 1500 Jills are matched, so the fees should be // 100 Izzy (10%) and 300 Jill (20%). @@ -368,7 +370,8 @@ BOOST_AUTO_TEST_CASE(asset_rewards_test) const auto jillcoin_market_fee = calculate_percent(_jill(1500).amount, jillcoin_market_percent); const auto jillcoin_reward = calculate_percent(jillcoin_market_fee, jillcoin_reward_percent); BOOST_CHECK_EQUAL( jillcoin_reward, alice_refereer_reward + alice_registrar_reward ); - BOOST_CHECK_EQUAL( calculate_percent(jillcoin_reward, alice.referrer_rewards_percentage), alice_refereer_reward ); + BOOST_CHECK_EQUAL( calculate_percent(jillcoin_reward, alice.referrer_rewards_percentage), + alice_refereer_reward ); } FC_LOG_AND_RETHROW() } @@ -384,7 +387,8 @@ BOOST_AUTO_TEST_CASE(asset_claim_reward_test) price price(asset(1, asset_id_type(1)), asset(1)); uint16_t market_fee_percent = 20 * GRAPHENE_1_PERCENT; - const asset_object jillcoin = create_user_issued_asset( "JCOIN", jill, charge_market_fee, price, 2, market_fee_percent ); + const asset_object jillcoin = create_user_issued_asset( "JCOIN", jill, charge_market_fee, price, + 2, market_fee_percent ); const account_object alice = create_account("alice", izzy, izzy, 50/*0.5%*/); const account_object bob = create_account("bob", izzy, izzy, 50/*0.5%*/); @@ -739,7 +743,7 @@ BOOST_AUTO_TEST_CASE(create_asset_via_proposal_test) price core_exchange_rate(asset(1, asset_id_type(1)), asset(1)); asset_create_operation create_op; - create_op.issuer = issuer.id; + create_op.issuer = issuer.get_id(); create_op.fee = asset(); create_op.symbol = "ASSET"; create_op.common_options.max_supply = 0; @@ -827,10 +831,12 @@ BOOST_AUTO_TEST_CASE(issue_asset){ price price(asset(1, asset_id_type(1)), asset(1)); constexpr auto izzycoin_market_percent = 10*GRAPHENE_1_PERCENT; - asset_object izzycoin = create_user_issued_asset( "IZZYCOIN", izzy, charge_market_fee, price, 2, izzycoin_market_percent ); + asset_object izzycoin = create_user_issued_asset( "IZZYCOIN", izzy, charge_market_fee, price, + 2, izzycoin_market_percent ); constexpr auto jillcoin_market_percent = 20*GRAPHENE_1_PERCENT; - asset_object jillcoin = create_user_issued_asset( "JILLCOIN", jill, charge_market_fee, price, 2, jillcoin_market_percent ); + asset_object jillcoin = create_user_issued_asset( "JILLCOIN", jill, charge_market_fee, price, + 2, jillcoin_market_percent ); // Alice and Bob create some coins issue_uia( alice, izzycoin.amount( 100000 ) ); @@ -852,8 +858,10 @@ BOOST_AUTO_TEST_CASE(accumulated_fees_before_hf_test) GET_ACTOR(bob); // Alice and Bob place orders which match - create_sell_order( alice_id, izzycoin.amount(100), jillcoin.amount(300) ); // Alice is willing to sell her Izzy's for 3 Jill - create_sell_order( bob_id, jillcoin.amount(700), izzycoin.amount(200) ); // Bob is buying up to 200 Izzy's for up to 3.5 Jill + create_sell_order( alice_id, izzycoin.amount(100), jillcoin.amount(300) ); // Alice is willing to sell + // her Izzy's for 3 Jill + create_sell_order( bob_id, jillcoin.amount(700), izzycoin.amount(200) ); // Bob is buying up to 200 Izzy's + // for up to 3.5 Jill // 100 Izzys and 300 Jills are matched, so the fees should be // 10 Izzy (10%) and 60 Jill (20%). @@ -876,8 +884,10 @@ BOOST_AUTO_TEST_CASE(accumulated_fees_after_hf_test) GET_ACTOR(bob); // Alice and Bob place orders which match - create_sell_order( alice_id, izzycoin.amount(100), jillcoin.amount(300) ); // Alice is willing to sell her Izzy's for 3 Jill - create_sell_order( bob_id, jillcoin.amount(700), izzycoin.amount(200) ); // Bob is buying up to 200 Izzy's for up to 3.5 Jill + create_sell_order( alice_id, izzycoin.amount(100), jillcoin.amount(300) ); // Alice is willing to sell + // her Izzy's for 3 Jill + create_sell_order( bob_id, jillcoin.amount(700), izzycoin.amount(200) ); // Bob is buying up to 200 Izzy's + // for up to 3.5 Jill // 100 Izzys and 300 Jills are matched, so the fees should be // 10 Izzy (10%) and 60 Jill (20%). @@ -907,8 +917,10 @@ BOOST_AUTO_TEST_CASE(accumulated_fees_with_additional_options_after_hf_test) GET_ACTOR(bob); // Alice and Bob place orders which match - create_sell_order( alice_id, izzycoin.amount(100), jillcoin.amount(300) ); // Alice is willing to sell her Izzy's for 3 Jill - create_sell_order( bob_id, jillcoin.amount(700), izzycoin.amount(200) ); // Bob is buying up to 200 Izzy's for up to 3.5 Jill + create_sell_order( alice_id, izzycoin.amount(100), jillcoin.amount(300) ); // Alice is willing to sell + // her Izzy's for 3 Jill + create_sell_order( bob_id, jillcoin.amount(700), izzycoin.amount(200) ); // Bob is buying up to 200 Izzy's + // for up to 3.5 Jill // 100 Izzys and 300 Jills are matched, so the fees should be // 10 Izzy (10%) and 60 Jill (20%). @@ -937,7 +949,7 @@ BOOST_AUTO_TEST_CASE( create_vesting_balance_with_instant_vesting_policy_test ) set_expiration( db, trx ); processed_transaction ptx = PUSH_TX( db, trx, ~0 ); - const vesting_balance_id_type& vbid = ptx.operation_results.back().get(); + const vesting_balance_id_type vbid { ptx.operation_results.back().get() }; auto withdraw = [&](const asset& amount) { vesting_balance_withdraw_operation withdraw_op; @@ -1023,8 +1035,10 @@ BOOST_AUTO_TEST_CASE(white_list_asset_rewards_test) price price(asset(1, asset_id_type(1)), asset(1)); constexpr auto izzycoin_market_percent = 10*GRAPHENE_1_PERCENT; constexpr auto jillcoin_market_percent = 20*GRAPHENE_1_PERCENT; - const asset_id_type izzycoin_id = create_user_issued_asset( "IZZYCOIN", izzy, charge_market_fee|white_list, price, 0, izzycoin_market_percent ).id; - const asset_id_type jillcoin_id = create_user_issued_asset( "JILLCOIN", jill, charge_market_fee|white_list, price, 0, jillcoin_market_percent ).id; + const asset_id_type izzycoin_id = create_user_issued_asset( "IZZYCOIN", izzy, charge_market_fee|white_list, + price, 0, izzycoin_market_percent ).get_id(); + const asset_id_type jillcoin_id = create_user_issued_asset( "JILLCOIN", jill, charge_market_fee|white_list, + price, 0, jillcoin_market_percent ).get_id(); // Alice and Bob create some coins issue_uia( alice, izzycoin_id(db).amount( 200000 ) ); @@ -1047,8 +1061,10 @@ BOOST_AUTO_TEST_CASE(white_list_asset_rewards_test) BOOST_CHECK( !(is_authorized_asset( db, aliceregistrar_id(db), jillcoin_id(db) )) ); // Alice and Bob place orders which match - create_sell_order( alice.id, izzycoin_id(db).amount(1000), jillcoin_id(db).amount(1500) ); // Alice is willing to sell her 1000 Izzy's for 1.5 Jill - create_sell_order( bob.id, jillcoin_id(db).amount(1500), izzycoin_id(db).amount(1000) ); // Bob is buying up to 1500 Izzy's for up to 0.6 Jill + // Alice is willing to sell her 1000 Izzy's for 1.5 Jill + create_sell_order( alice.get_id(), izzycoin_id(db).amount(1000), jillcoin_id(db).amount(1500) ); + // Bob is buying up to 1500 Izzy's for up to 0.6 Jill + create_sell_order( bob.get_id(), jillcoin_id(db).amount(1500), izzycoin_id(db).amount(1000) ); // 1000 Izzys and 1500 Jills are matched, so the fees should be // 100 Izzy (10%) and 300 Jill (20%). @@ -1087,7 +1103,8 @@ BOOST_AUTO_TEST_CASE( create_vesting_balance_object_test ) create_vesting_balance_object(actor_id, vesting_balance_type::worker); create_vesting_balance_object(actor_id, vesting_balance_type::market_fee_sharing); - GRAPHENE_CHECK_THROW(create_vesting_balance_object(actor_id, vesting_balance_type::market_fee_sharing), fc::exception); + GRAPHENE_CHECK_THROW(create_vesting_balance_object(actor_id, vesting_balance_type::market_fee_sharing), + fc::exception); } FC_LOG_AND_RETHROW() } diff --git a/tests/tests/market_rounding_tests.cpp b/tests/tests/market_rounding_tests.cpp index 66698bee45..c3e55d24c0 100644 --- a/tests/tests/market_rounding_tests.cpp +++ b/tests/tests/market_rounding_tests.cpp @@ -49,9 +49,9 @@ BOOST_AUTO_TEST_CASE( trade_amount_equals_zero ) set_expiration( db, trx ); const asset_object& test = create_user_issued_asset( "UIATEST" ); - const asset_id_type test_id = test.id; + const asset_id_type test_id = test.get_id(); const asset_object& core = get_asset( GRAPHENE_SYMBOL ); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const account_object& core_seller = create_account( "seller1" ); const account_object& core_buyer = create_account("buyer1"); @@ -105,9 +105,9 @@ BOOST_AUTO_TEST_CASE( trade_amount_equals_zero_after_hf_184 ) set_expiration( db, trx ); const asset_object& test = create_user_issued_asset( "UIATEST" ); - const asset_id_type test_id = test.id; + const asset_id_type test_id = test.get_id(); const asset_object& core = get_asset( GRAPHENE_SYMBOL ); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); const account_object& core_seller = create_account( "seller1" ); const account_object& core_buyer = create_account("buyer1"); @@ -158,9 +158,9 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1 ) ACTORS( (seller)(buyer) ); const asset_object& test = create_user_issued_asset( "UIATEST" ); - const asset_id_type test_id = test.id; + const asset_id_type test_id = test.get_id(); const asset_object& core = get_asset( GRAPHENE_SYMBOL ); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); transfer( committee_account(db), seller, asset( 100000000 ) ); @@ -172,7 +172,7 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1 ) BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); // seller sells 3 core for 31 test, price 10.33 test per core - limit_order_id_type sell_id = create_sell_order( seller, core.amount(3), test.amount(31) )->id; + limit_order_id_type sell_id = create_sell_order( seller, core.amount(3), test.amount(31) )->get_id(); // buyer buys 2 core with 25 test, price 12.5 test per core // the order is filled immediately @@ -189,11 +189,11 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1 ) generate_block(); // buyer buys 2 core with 25 test, price 12.5 test per core - limit_order_id_type buy_id = create_sell_order( buyer_id, asset(25,test_id), asset(2,core_id) )->id; + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(25,test_id), asset(2,core_id) )->get_id(); generate_block(); - BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK( !db.find( sell_id ) ); // sell order is filled BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 15 ); // 10 test sold, 15 remaining BOOST_CHECK_EQUAL(get_balance(seller_id, core_id), 99999997); @@ -224,9 +224,9 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1_after_hf_342 ) ACTORS( (seller)(buyer) ); const asset_object& test = create_user_issued_asset( "UIATEST" ); - const asset_id_type test_id = test.id; + const asset_id_type test_id = test.get_id(); const asset_object& core = get_asset( GRAPHENE_SYMBOL ); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); transfer( committee_account(db), seller, asset( 100000000 ) ); @@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1_after_hf_342 ) BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); // seller sells 3 core for 31 test, price 10.33 test per core - limit_order_id_type sell_id = create_sell_order( seller, core.amount(3), test.amount(31) )->id; + limit_order_id_type sell_id = create_sell_order( seller, core.amount(3), test.amount(31) )->get_id(); // buyer buys 2 core with 25 test, price 12.5 test per core // the order is filled immediately @@ -255,11 +255,11 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1_after_hf_342 ) set_expiration( db, trx ); // buyer buys 2 core with 25 test, price 12.5 test per core - limit_order_id_type buy_id = create_sell_order( buyer_id, asset(25,test_id), asset(2,core_id) )->id; + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(25,test_id), asset(2,core_id) )->get_id(); generate_block(); - BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK( !db.find( sell_id ) ); // sell order is filled BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 15 ); // 10 test sold according to price 10.33, and 15 remaining BOOST_CHECK_EQUAL(get_balance(buyer_id, core_id), 3); // buyer got 1 more core @@ -287,9 +287,9 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2 ) ACTORS( (seller)(buyer) ); const asset_object& test = create_user_issued_asset( "UIATEST" ); - const asset_id_type test_id = test.id; + const asset_id_type test_id = test.get_id(); const asset_object& core = get_asset( GRAPHENE_SYMBOL ); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); transfer( committee_account(db), seller, asset( 100000000 ) ); @@ -301,11 +301,11 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2 ) BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); // buyer buys 17 core with 3 test, price 3/17 = 0.176 test per core - limit_order_id_type tmp_buy_id = create_sell_order( buyer, test.amount(3), core.amount(17) )->id; + limit_order_id_type tmp_buy_id = create_sell_order( buyer, test.amount(3), core.amount(17) )->get_id(); // seller sells 33 core for 5 test, price 5/33 = 0.1515 test per core - limit_order_id_type sell_id = create_sell_order( seller, core.amount(33), test.amount(5) )->id; + limit_order_id_type sell_id = create_sell_order( seller, core.amount(33), test.amount(5) )->get_id(); - BOOST_CHECK( !db.find_object( tmp_buy_id ) ); // buy order is filled + BOOST_CHECK( !db.find( tmp_buy_id ) ); // buy order is filled BOOST_CHECK_EQUAL( sell_id(db).for_sale.value, 16 ); // 17 core sold, 16 remaining BOOST_CHECK_EQUAL(get_balance(seller, core), 99999967); @@ -318,11 +318,11 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2 ) // buyer buys 15 core with 3 test, price 3/15 = 0.2 test per core // even 15 < 16, since it's taker, we'll check with maker's price, then turns out the buy order is bigger - limit_order_id_type buy_id = create_sell_order( buyer_id, asset(3,test_id), asset(15,core_id) )->id; + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(3,test_id), asset(15,core_id) )->get_id(); generate_block(); - BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK( !db.find( sell_id ) ); // sell order is filled BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 1 ); // 2 test sold, 1 remaining BOOST_CHECK_EQUAL(get_balance(seller_id, core_id), 99999967); // seller paid the 16 core which was remaining in the order @@ -354,9 +354,9 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2_after_hf_342 ) ACTORS( (seller)(buyer) ); const asset_object& test = create_user_issued_asset( "UIATEST" ); - const asset_id_type test_id = test.id; + const asset_id_type test_id = test.get_id(); const asset_object& core = get_asset( GRAPHENE_SYMBOL ); - const asset_id_type core_id = core.id; + const asset_id_type core_id = core.get_id(); transfer( committee_account(db), seller, asset( 100000000 ) ); @@ -368,11 +368,11 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2_after_hf_342 ) BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); // buyer buys 17 core with 3 test, price 3/17 = 0.176 test per core - limit_order_id_type tmp_buy_id = create_sell_order( buyer, test.amount(3), core.amount(17) )->id; + limit_order_id_type tmp_buy_id = create_sell_order( buyer, test.amount(3), core.amount(17) )->get_id(); // seller sells 33 core for 5 test, price 5/33 = 0.1515 test per core - limit_order_id_type sell_id = create_sell_order( seller, core.amount(33), test.amount(5) )->id; + limit_order_id_type sell_id = create_sell_order( seller, core.amount(33), test.amount(5) )->get_id(); - BOOST_CHECK( !db.find_object( tmp_buy_id ) ); // buy order is filled + BOOST_CHECK( !db.find( tmp_buy_id ) ); // buy order is filled BOOST_CHECK_EQUAL( sell_id(db).for_sale.value, 16 ); // 17 core sold, 16 remaining BOOST_CHECK_EQUAL(get_balance(seller, core), 99999967); @@ -385,11 +385,11 @@ BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2_after_hf_342 ) // buyer buys 15 core with 3 test, price 3/15 = 0.2 test per core // even 15 < 16, since it's taker, we'll check with maker's price, then turns out the buy order is bigger - limit_order_id_type buy_id = create_sell_order( buyer_id, asset(3,test_id), asset(15,core_id) )->id; + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(3,test_id), asset(15,core_id) )->get_id(); generate_block(); - BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK( !db.find( sell_id ) ); // sell order is filled BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 1 ); // 2 test sold, 1 remaining BOOST_CHECK_EQUAL(get_balance(buyer_id, core_id), 31); // buyer got 14 more core according to price 0.1515 @@ -426,7 +426,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -435,7 +435,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/175 CORE/USD = 62/700 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(100000), asset(15500)); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 @@ -465,7 +465,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1 ) // so the seller will pay 10 USD but get nothing. // The remaining 1 USD is too little to get any CORE, so the limit order will be cancelled BOOST_CHECK( !create_sell_order(seller, bitusd.amount(11), core.amount(1)) ); - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 200000, get_balance(seller, bitusd) ); // the seller paid 10 USD BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // the seller got nothing BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); @@ -501,7 +501,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -510,7 +510,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(10)); @@ -534,8 +534,8 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2 ) // This would match with call at price 33 USD / 3 CORE, but call only owes 10 USD, // so the seller will pay 10 USD but get nothing. // The remaining USD will be left in the order on the market - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // the seller got nothing BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -564,8 +564,8 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3 ) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - const asset_id_type bitusd_id = bitusd.id; - const asset_id_type core_id = core.id; + const asset_id_type bitusd_id = bitusd.get_id(); + const asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -574,7 +574,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -583,7 +583,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(10)); @@ -600,7 +600,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3 ) BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); // create a limit order which will be matched later - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); @@ -615,7 +615,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3 ) // the limit order will match with call at price 33 USD / 3 CORE, but call only owes 10 USD, // so the seller will pay 10 USD but get nothing. // The remaining USD will be in the order on the market - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100010-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 0, get_balance(seller_id, core_id) ); // the seller got nothing BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -649,7 +649,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1_after_hardfork ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -658,7 +658,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1_after_hardfork ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/175 CORE/USD = 62/700 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(100000), asset(15500)); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 @@ -689,7 +689,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1_after_hardfork ) // Since the call would pay off all debt, let it pay 1 CORE from collateral // The remaining 1 USD is too little to get any CORE, so the limit order will be cancelled BOOST_CHECK( !create_sell_order(seller, bitusd.amount(11), core.amount(1)) ); - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 200000, get_balance(seller, bitusd) ); // the seller paid 10 USD BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); @@ -725,7 +725,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2_after_hardfork ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -734,7 +734,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2_after_hardfork ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(10)); @@ -759,8 +759,8 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2_after_hardfork ) // but call only owes 10 USD, // Since the call would pay off all debt, let it pay 1 CORE from collateral // The remaining USD will be left in the order on the market - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -790,8 +790,8 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3_after_hardfork ) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - const asset_id_type bitusd_id = bitusd.id; - const asset_id_type core_id = core.id; + const asset_id_type bitusd_id = bitusd.get_id(); + const asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -800,7 +800,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3_after_hardfork ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -809,7 +809,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3_after_hardfork ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(10)); @@ -826,7 +826,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3_after_hardfork ) BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); // create a limit order which will be matched later - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); @@ -841,7 +841,7 @@ BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3_after_hardfork ) // the limit order will match with call at price 33 USD / 3 CORE, but call only owes 10 USD, // Since the call would pay off all debt, let it pay 1 CORE from collateral // The remaining USD will be in the order on the market - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100010-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 1, get_balance(seller_id, core_id) ); // the seller got 1 CORE BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -876,7 +876,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test1 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -885,7 +885,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test1 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(20)); @@ -910,8 +910,8 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test1 ) // so the seller will pay the whole 20 USD and get 1 CORE, since 20 USD doesn't worth 2 CORE according to price 33/3, // effective price is 20/1 which is worse than the limit order's desired 33/3. // The remaining USD will be left in the order on the market - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -947,7 +947,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test1_after_hf_342 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -956,7 +956,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test1_after_hf_342 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(20)); @@ -982,8 +982,8 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test1_after_hf_342 ) // so the seller will pay 20 USD and get 2 CORE, since 20 USD worths a little more than 1 CORE according to price 120/11, // effective price is 20/2 which is not worse than the limit order's desired 33/3. // The remaining USD will be left in the order on the market - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 2, get_balance(seller, core) ); // the seller got 2 CORE BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -1021,7 +1021,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test2 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1030,7 +1030,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test2 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(20)); @@ -1055,7 +1055,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test2 ) // so the seller will pay 15 USD and get 1 CORE, // effective price is 15/1. BOOST_CHECK( !create_sell_order(seller, bitusd.amount(15), core.amount(1)) ); // the sell order is filled - BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled BOOST_CHECK_EQUAL( 20-15, call.debt.value ); // call paid 15 USD BOOST_CHECK_EQUAL( 2-1, call.collateral.value ); // call got 1 CORE BOOST_CHECK_EQUAL( 100020-15, get_balance(seller, bitusd) ); // the seller paid 15 USD @@ -1093,7 +1093,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test2_after_hf_342 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1102,7 +1102,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test2_after_hf_342 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(20)); @@ -1128,7 +1128,7 @@ BOOST_AUTO_TEST_CASE( limit_call_rounding_test2_after_hf_342 ) // and the extra 4 USD will be returned but not overpaid, // effective price is 11/1 which is close to 120/11. BOOST_CHECK( !create_sell_order(seller, bitusd.amount(15), core.amount(1)) ); // the sell order is filled - BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled BOOST_CHECK_EQUAL( 20-11, call.debt.value ); // call paid 11 USD BOOST_CHECK_EQUAL( 2-1, call.collateral.value ); // call got 1 CORE BOOST_CHECK_EQUAL( 100020-11, get_balance(seller, bitusd) ); // the seller paid 11 USD @@ -1156,8 +1156,8 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1 ) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - const asset_id_type bitusd_id = bitusd.id; - const asset_id_type core_id = core.id; + const asset_id_type bitusd_id = bitusd.get_id(); + const asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -1166,7 +1166,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1175,7 +1175,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(20)); @@ -1192,7 +1192,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1 ) BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); // create a limit order which will be matched later - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); @@ -1208,7 +1208,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1 ) // so the seller will pay the whole 20 USD and get 1 CORE, since 20 USD doesn't worth 2 CORE according to price 33/3, // effective price is 20/1 which is worse than the limit order's desired 33/3. // The remaining USD will be left in the order on the market - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100020-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 1, get_balance(seller_id, core_id) ); // the seller got 1 CORE BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -1236,8 +1236,8 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1_after_hf_342 ) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - const asset_id_type bitusd_id = bitusd.id; - const asset_id_type core_id = core.id; + const asset_id_type bitusd_id = bitusd.get_id(); + const asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -1246,7 +1246,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1_after_hf_342 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1255,7 +1255,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1_after_hf_342 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(20)); @@ -1272,7 +1272,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1_after_hf_342 ) BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); // create a limit order which will be matched later - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->get_id(); BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); @@ -1288,7 +1288,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test1_after_hf_342 ) // so the seller will pay 20 USD and get 2 CORE, since 20 USD worths a little more than 1 CORE according to price 33/3, // effective price is 20/2 which is not worse than the limit order's desired 33/3. // The remaining USD will be left in the order on the market - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled BOOST_CHECK_EQUAL( 100020-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD BOOST_CHECK_EQUAL( 2, get_balance(seller_id, core_id) ); // the seller got 2 CORE BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left @@ -1315,8 +1315,8 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2 ) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - const asset_id_type bitusd_id = bitusd.id; - const asset_id_type core_id = core.id; + const asset_id_type bitusd_id = bitusd.get_id(); + const asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -1325,7 +1325,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1334,7 +1334,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(50), asset(5)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(50)); @@ -1352,14 +1352,14 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2 ) BOOST_CHECK_EQUAL( init_balance-5, get_balance(borrower, core) ); // create a buy order which will be matched - limit_order_id_type buy_id = create_sell_order(buyer, core.amount(1), bitusd.amount(10))->id; + limit_order_id_type buy_id = create_sell_order(buyer, core.amount(1), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 1, buy_id(db).for_sale.value ); BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); // create a limit order to fill the buy order, and remaining amounts will be matched later - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(31), core.amount(2))->id; - BOOST_CHECK( !db.find( buy_id ) ); // the buy order is filled + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(31), core.amount(2))->get_id(); + BOOST_CHECK( !db.find( buy_id ) ); // the buy order is filled BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); BOOST_CHECK_EQUAL( 10, get_balance(buyer, bitusd) ); // buyer got 10 usd BOOST_CHECK_EQUAL( 21, sell_id(db).for_sale.value ); // remaining amount of sell order is 21 @@ -1367,7 +1367,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2 ) BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // seller got 1 core // create another limit order which will be matched later - limit_order_id_type sell_id2 = create_sell_order(seller2, bitusd.amount(14), core.amount(1))->id; + limit_order_id_type sell_id2 = create_sell_order(seller2, bitusd.amount(14), core.amount(1))->get_id(); BOOST_CHECK_EQUAL( 14, sell_id2(db).for_sale.value ); BOOST_CHECK_EQUAL( 100000-14, get_balance(seller2, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller2, core) ); @@ -1385,9 +1385,9 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2 ) // effective price is 21/1 which is much bigger than 31/2; // then, call will match with sell_id2, which has 14 USD remaining, with price 14 USD / 1 CORE, // so the seller will pay 14 USD, get 1 CORE since 14 USD worths just 1 CORE according to price 14/1 - BOOST_CHECK( !db.find( sell_id ) ); // the sell order is filled - BOOST_CHECK( !db.find( sell_id2 ) ); // the other sell order is filled - BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK( !db.find( sell_id ) ); // the sell order is filled + BOOST_CHECK( !db.find( sell_id2 ) ); // the other sell order is filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled BOOST_CHECK_EQUAL( 50-14-21, call_id(db).debt.value ); // call paid 14 USD and 21 USD BOOST_CHECK_EQUAL( 5-1-1, call_id(db).collateral.value ); // call got 1 CORE and 1 CORE BOOST_CHECK_EQUAL( 50-31, get_balance(seller_id, bitusd_id) ); // seller paid 31 USD in total @@ -1418,8 +1418,8 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2_after_hf_342 ) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - const asset_id_type bitusd_id = bitusd.id; - const asset_id_type core_id = core.id; + const asset_id_type bitusd_id = bitusd.get_id(); + const asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -1428,7 +1428,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2_after_hf_342 ) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1437,7 +1437,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2_after_hf_342 ) publish_feed( bitusd, feedproducer, current_feed ); // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 const call_order_object& call = *borrow( borrower, bitusd.amount(50), asset(5)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); transfer(borrower, seller, bitusd.amount(50)); @@ -1455,14 +1455,14 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2_after_hf_342 ) BOOST_CHECK_EQUAL( init_balance-5, get_balance(borrower, core) ); // create a buy order which will be matched - limit_order_id_type buy_id = create_sell_order(buyer, core.amount(1), bitusd.amount(10))->id; + limit_order_id_type buy_id = create_sell_order(buyer, core.amount(1), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 1, buy_id(db).for_sale.value ); BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); // create a limit order to fill the buy order, and remaining amounts will be matched later - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(31), core.amount(2))->id; - BOOST_CHECK( !db.find( buy_id ) ); // the buy order is filled + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(31), core.amount(2))->get_id(); + BOOST_CHECK( !db.find( buy_id ) ); // the buy order is filled BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); BOOST_CHECK_EQUAL( 10, get_balance(buyer, bitusd) ); // buyer got 10 usd BOOST_CHECK_EQUAL( 21, sell_id(db).for_sale.value ); // remaining amount of sell order is 21 @@ -1470,7 +1470,7 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2_after_hf_342 ) BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // seller got 1 core // create another limit order which will be matched later - limit_order_id_type sell_id2 = create_sell_order(seller2, bitusd.amount(14), core.amount(1))->id; + limit_order_id_type sell_id2 = create_sell_order(seller2, bitusd.amount(14), core.amount(1))->get_id(); BOOST_CHECK_EQUAL( 14, sell_id2(db).for_sale.value ); BOOST_CHECK_EQUAL( 100000-14, get_balance(seller2, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller2, core) ); @@ -1490,9 +1490,9 @@ BOOST_AUTO_TEST_CASE( call_limit_rounding_test2_after_hf_342 ) // effective price is 16/1 which is close to 31/2; // secondly, call will match with sell_id2, which has 14 USD remaining, with price 14 USD / 1 CORE, // so the seller will get 1 CORE and pay 14 USD since 14 USD just worths 1 CORE according to price 14/1 - BOOST_CHECK( !db.find( sell_id ) ); // the sell order is filled - BOOST_CHECK( !db.find( sell_id2 ) ); // the other sell order is filled - BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK( !db.find( sell_id ) ); // the sell order is filled + BOOST_CHECK( !db.find( sell_id2 ) ); // the other sell order is filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled BOOST_CHECK_EQUAL( 50-14-16, call_id(db).debt.value ); // call paid 14 USD and 16 USD BOOST_CHECK_EQUAL( 5-1-1, call_id(db).collateral.value ); // call got 1 CORE and 1 CORE BOOST_CHECK_EQUAL( 50-31+(21-16), get_balance(seller_id, bitusd_id) ); // seller paid 31 USD then get refunded 5 USD diff --git a/tests/tests/market_tests.cpp b/tests/tests/market_tests.cpp index 3cbfcb84bf..aa0be76841 100644 --- a/tests/tests/market_tests.cpp +++ b/tests/tests/market_tests.cpp @@ -50,8 +50,8 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type usd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -59,7 +59,7 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -68,13 +68,13 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(1000)); BOOST_CHECK_EQUAL( 1000, call.debt.value ); @@ -88,11 +88,11 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) // settlement price = 1/10, mssp = 1/11 // This order slightly below the call price will not be matched #606 - limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(7), core.amount(59))->id; + limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(7), core.amount(59))->get_id(); // This order above the MSSP will not be matched - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(60))->id; + limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(60))->get_id(); cancel_limit_order( sell_med(db) ); cancel_limit_order( sell_high(db) ); @@ -106,7 +106,7 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) BOOST_CHECK_EQUAL( 993, call.debt.value ); BOOST_CHECK_EQUAL( 14940, call.collateral.value ); - limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->get_id(); // margin call takes precedence BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(60)) ); BOOST_CHECK_EQUAL( 986, get_balance(seller, bitusd) ); @@ -114,7 +114,7 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) BOOST_CHECK_EQUAL( 986, call.debt.value ); BOOST_CHECK_EQUAL( 14880, call.collateral.value ); - limit_order_id_type buy_med = create_sell_order(buyer, asset(105), bitusd.amount(10))->id; + limit_order_id_type buy_med = create_sell_order(buyer, asset(105), bitusd.amount(10))->get_id(); // margin call takes precedence BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(70)) ); BOOST_CHECK_EQUAL( 979, get_balance(seller, bitusd) ); @@ -122,7 +122,7 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) BOOST_CHECK_EQUAL( 979, call.debt.value ); BOOST_CHECK_EQUAL( 14810, call.collateral.value ); - limit_order_id_type buy_high = create_sell_order(buyer, asset(115), bitusd.amount(10))->id; + limit_order_id_type buy_high = create_sell_order(buyer, asset(115), bitusd.amount(10))->get_id(); // margin call still has precedence (!) #625 BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(77)) ); BOOST_CHECK_EQUAL( 972, get_balance(seller, bitusd) ); @@ -203,26 +203,26 @@ BOOST_AUTO_TEST_CASE(issue_338_etc) transfer(borrower3_id, seller_id, asset(1000, usd_id)); // Re-create sell_low, slightly below the call price, will not be matched, will expire soon - sell_low = create_sell_order(seller_id(db), asset(7, usd_id), asset(59), db.head_block_time()+fc::seconds(300) )->id; + sell_low = create_sell_order(seller_id(db), asset(7, usd_id), asset(59), db.head_block_time()+fc::seconds(300) )->get_id(); // This would match but is blocked by sell_low, it has an amount same as call's debt which will be full filled later - sell_med = create_sell_order(seller_id(db), asset(262, usd_id), asset(2620))->id; // 1/10 + sell_med = create_sell_order(seller_id(db), asset(262, usd_id), asset(2620))->get_id(); // 1/10 // Another big order above sell_med, blocked - limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->id; // 1/10.1 + limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->get_id(); // 1/10.1 // Another small order above sell_med2, blocked - limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->id; // 1/10.2 + limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->get_id(); // 1/10.2 // generate a block, sell_low will expire BOOST_TEST_MESSAGE( "Expire sell_low" ); generate_blocks( HARDFORK_615_TIME + fc::hours(26) ); - BOOST_CHECK( db.find( sell_low ) == nullptr ); + BOOST_CHECK( db.find( sell_low ) == nullptr ); // #453 multiple order matching issue occurs - BOOST_CHECK( db.find( sell_med ) == nullptr ); // sell_med get filled - BOOST_CHECK( db.find( sell_med2 ) != nullptr ); // sell_med2 is still there - BOOST_CHECK( db.find( sell_med3 ) == nullptr ); // sell_med3 get filled - BOOST_CHECK( db.find( call_id ) == nullptr ); // the first call order get filled - BOOST_CHECK( db.find( call2_id ) == nullptr ); // the second call order get filled - BOOST_CHECK( db.find( call3_id ) != nullptr ); // the third call order is still there + BOOST_CHECK( db.find( sell_med ) == nullptr ); // sell_med get filled + BOOST_CHECK( db.find( sell_med2 ) != nullptr ); // sell_med2 is still there + BOOST_CHECK( db.find( sell_med3 ) == nullptr ); // sell_med3 get filled + BOOST_CHECK( db.find( call_id ) == nullptr ); // the first call order get filled + BOOST_CHECK( db.find( call2_id ) == nullptr ); // the second call order get filled + BOOST_CHECK( db.find( call3_id ) != nullptr ); // the third call order is still there } FC_LOG_AND_RETHROW() } @@ -250,8 +250,8 @@ BOOST_AUTO_TEST_CASE(hardfork_core_338_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type usd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -259,7 +259,7 @@ BOOST_AUTO_TEST_CASE(hardfork_core_338_test) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -268,13 +268,13 @@ BOOST_AUTO_TEST_CASE(hardfork_core_338_test) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); transfer(borrower3, seller, bitusd.amount(1000)); @@ -300,11 +300,11 @@ BOOST_AUTO_TEST_CASE(hardfork_core_338_test) BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->get_id(); // This buy order at MSSP will be matched only if no margin call (margin call takes precedence) - limit_order_id_type buy_med = create_sell_order(buyer, asset(110), bitusd.amount(10))->id; + limit_order_id_type buy_med = create_sell_order(buyer, asset(110), bitusd.amount(10))->get_id(); // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence) - limit_order_id_type buy_high = create_sell_order(buyer, asset(111), bitusd.amount(10))->id; + limit_order_id_type buy_high = create_sell_order(buyer, asset(111), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 90 - 110 - 111, get_balance(buyer, core) ); @@ -313,9 +313,9 @@ BOOST_AUTO_TEST_CASE(hardfork_core_338_test) BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(5900) ) ); // firstly it will match with buy_high, at buy_high's price: #625 fixed - BOOST_CHECK( !db.find( buy_high ) ); - BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 ); - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 ); + BOOST_CHECK( !db.find( buy_high ) ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 ); // buy_high pays 111 CORE, receives 10 USD goes to buyer's balance BOOST_CHECK_EQUAL( 10, get_balance(buyer, bitusd) ); @@ -339,8 +339,8 @@ BOOST_AUTO_TEST_CASE(hardfork_core_338_test) // This would match with call before, but would match with call2 after #343 fixed BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(6000) ) ); - BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 ); - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 ); // fill price would be mssp: 1/11 = 700/7700 : #338 fixed BOOST_CHECK_EQUAL( 1593, get_balance(seller, bitusd) ); @@ -420,9 +420,9 @@ BOOST_AUTO_TEST_CASE(hardfork_core_338_test) // black swan event will occur: #649 fixed BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); // short positions will be closed - BOOST_CHECK( !db.find( call_id ) ); - BOOST_CHECK( !db.find( call2_id ) ); - BOOST_CHECK( !db.find( call3_id ) ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call3_id ) ); // generate a block generate_block(); @@ -453,7 +453,7 @@ BOOST_AUTO_TEST_CASE(hardfork_core_453_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); int64_t init_balance(1000000); @@ -461,7 +461,7 @@ BOOST_AUTO_TEST_CASE(hardfork_core_453_test) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -470,13 +470,13 @@ BOOST_AUTO_TEST_CASE(hardfork_core_453_test) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); transfer(borrower3, seller, bitusd.amount(1000)); @@ -493,11 +493,11 @@ BOOST_AUTO_TEST_CASE(hardfork_core_453_test) // no margin call so far // This order would match call when it's margin called, it has an amount same as call's debt which will be full filled later - limit_order_id_type sell_med = create_sell_order(seller_id(db), asset(1000, usd_id), asset(10000))->id; // 1/10 + limit_order_id_type sell_med = create_sell_order(seller_id(db), asset(1000, usd_id), asset(10000))->get_id(); // 1/10 // Another big order above sell_med, amount bigger than call2's debt - limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->id; // 1/10.1 + limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->get_id(); // 1/10.1 // Another small order above sell_med2 - limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->id; // 1/10.2 + limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->get_id(); // 1/10.2 // adjust price feed to get the call orders into margin call territory current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); @@ -505,12 +505,12 @@ BOOST_AUTO_TEST_CASE(hardfork_core_453_test) // settlement price = 1/10, mssp = 1/11 // Fixed #453 multiple order matching issue occurs - BOOST_CHECK( !db.find( sell_med ) ); // sell_med get filled - BOOST_CHECK( !db.find( sell_med2 ) ); // sell_med2 get filled - BOOST_CHECK( !db.find( sell_med3 ) ); // sell_med3 get filled - BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled - BOOST_CHECK( !db.find( call2_id ) ); // the second call order get filled - BOOST_CHECK( db.find( call3_id ) ); // the third call order is still there + BOOST_CHECK( !db.find( sell_med ) ); // sell_med get filled + BOOST_CHECK( !db.find( sell_med2 ) ); // sell_med2 get filled + BOOST_CHECK( !db.find( sell_med3 ) ); // sell_med3 get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call2_id ) ); // the second call order get filled + BOOST_CHECK( db.find( call3_id ) ); // the third call order is still there // generate a block generate_block(); @@ -549,7 +549,7 @@ BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -558,10 +558,10 @@ BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); transfer(borrower, seller, bitusd.amount(1000)); @@ -590,18 +590,18 @@ BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) // settlement price = 1/10, mssp = 1/11 // This sell order above MSSP will not be matched with a call - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->get_id(); // This buy order at MSSP will be matched only if no margin call (margin call takes precedence) - limit_order_id_type buy_med = create_sell_order(buyer2, asset(11000), bitusd.amount(1000))->id; + limit_order_id_type buy_med = create_sell_order(buyer2, asset(11000), bitusd.amount(1000))->get_id(); // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence) - limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->id; + limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(buyer2, bitusd) ); @@ -614,19 +614,19 @@ BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700*4), core.amount(5900*4) ) ); // firstly it will match with buy_high, at buy_high's price - BOOST_CHECK( !db.find( buy_high ) ); + BOOST_CHECK( !db.find( buy_high ) ); // buy_high pays 111 CORE, receives 10 USD goes to buyer3's balance BOOST_CHECK_EQUAL( 10, get_balance(buyer3, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) ); // then it will match with call, at mssp: 1/11 = 1000/11000 - BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call_id ) ); // call pays 11000 CORE, receives 1000 USD to cover borrower's position, remaining CORE goes to borrower's balance BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(borrower, core) ); BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); // then it will match with call2, at mssp: 1/11 = 1000/11000 - BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); // call2 pays 11000 CORE, receives 1000 USD to cover borrower2's position, remaining CORE goes to borrower2's balance BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(borrower2, core) ); BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) ); @@ -636,14 +636,14 @@ BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) BOOST_CHECK_EQUAL( 783, get_balance(buyer2, bitusd) ); // 700*4-10-1000-1000=790, minus 1% market fee 790*100/10000=7 BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(buyer2, core) ); // buy_med pays at 1/11 = 790/8690 - BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 11000-8690 ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 11000-8690 ); // call3 is not in margin call territory so won't be matched BOOST_CHECK_EQUAL( 1000, call3.debt.value ); BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // check seller balance BOOST_CHECK_EQUAL( 193, get_balance(seller, bitusd) ); // 3000 - 7 - 700*4 @@ -651,13 +651,13 @@ BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) // Cancel buy_med cancel_limit_order( buy_med(db) ); - BOOST_CHECK( !db.find( buy_med ) ); + BOOST_CHECK( !db.find( buy_med ) ); BOOST_CHECK_EQUAL( 783, get_balance(buyer2, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 8690, get_balance(buyer2, core) ); // Create another sell order slightly below the call price, won't fill - limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->id; - BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 ); + limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 ); // check seller balance BOOST_CHECK_EQUAL( 193-7, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); @@ -667,7 +667,7 @@ BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // generate a block generate_block(); @@ -691,10 +691,10 @@ BOOST_AUTO_TEST_CASE(hard_fork_453_cross_test) const auto& biteur = create_bitasset("EURBIT", feedproducer_id); const auto& bitcny = create_bitasset("CNYBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; - asset_id_type eur_id = biteur.id; - asset_id_type cny_id = bitcny.id; - asset_id_type core_id = core.id; + asset_id_type usd_id = bitusd.get_id(); + asset_id_type eur_id = biteur.get_id(); + asset_id_type cny_id = bitcny.get_id(); + asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -702,9 +702,9 @@ BOOST_AUTO_TEST_CASE(hard_fork_453_cross_test) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); - update_feed_producers( biteur, {feedproducer.id} ); - update_feed_producers( bitcny, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); + update_feed_producers( biteur, {feedproducer.get_id()} ); + update_feed_producers( bitcny, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -717,25 +717,25 @@ BOOST_AUTO_TEST_CASE(hard_fork_453_cross_test) publish_feed( bitcny, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call_usd = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_usd_id = call_usd.id; + call_order_id_type call_usd_id = call_usd.get_id(); const call_order_object& call_eur = *borrow( borrower, biteur.amount(1000), asset(15000)); - call_order_id_type call_eur_id = call_eur.id; + call_order_id_type call_eur_id = call_eur.get_id(); const call_order_object& call_cny = *borrow( borrower, bitcny.amount(1000), asset(15000)); - call_order_id_type call_cny_id = call_cny.id; + call_order_id_type call_cny_id = call_cny.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call_usd2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call_usd2_id = call_usd2.id; + call_order_id_type call_usd2_id = call_usd2.get_id(); const call_order_object& call_eur2 = *borrow( borrower2, biteur.amount(1000), asset(15500)); - call_order_id_type call_eur2_id = call_eur2.id; + call_order_id_type call_eur2_id = call_eur2.get_id(); const call_order_object& call_cny2 = *borrow( borrower2, bitcny.amount(1000), asset(15500)); - call_order_id_type call_cny2_id = call_cny2.id; + call_order_id_type call_cny2_id = call_cny2.get_id(); // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 const call_order_object& call_usd3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); - call_order_id_type call_usd3_id = call_usd3.id; + call_order_id_type call_usd3_id = call_usd3.get_id(); const call_order_object& call_eur3 = *borrow( borrower3, biteur.amount(1000), asset(16000)); - call_order_id_type call_eur3_id = call_eur3.id; + call_order_id_type call_eur3_id = call_eur3.get_id(); const call_order_object& call_cny3 = *borrow( borrower3, bitcny.amount(1000), asset(16000)); - call_order_id_type call_cny3_id = call_cny3.id; + call_order_id_type call_cny3_id = call_cny3.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); transfer(borrower3, seller, bitusd.amount(1000)); @@ -779,37 +779,37 @@ BOOST_AUTO_TEST_CASE(hard_fork_453_cross_test) // settlement price = 1/10, mssp = 1/11 // This order below the call price will not be matched before hard fork: 1/8 #606 - limit_order_id_type sell_usd_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->id; + limit_order_id_type sell_usd_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->get_id(); // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 - limit_order_id_type sell_usd_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->id; + limit_order_id_type sell_usd_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->get_id(); // This order above the MSSP will not be matched before hard fork - limit_order_id_type sell_usd_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + limit_order_id_type sell_usd_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_usd_med = create_sell_order(seller, bitusd.amount(700), core.amount(6400))->id; + limit_order_id_type sell_usd_med = create_sell_order(seller, bitusd.amount(700), core.amount(6400))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_usd_med2 = create_sell_order(seller, bitusd.amount(7), core.amount(65))->id; + limit_order_id_type sell_usd_med2 = create_sell_order(seller, bitusd.amount(7), core.amount(65))->get_id(); // This order below the call price will not be matched before hard fork: 1/8 #606 - limit_order_id_type sell_eur_low = create_sell_order(seller, biteur.amount(1000), core.amount(7000))->id; + limit_order_id_type sell_eur_low = create_sell_order(seller, biteur.amount(1000), core.amount(7000))->get_id(); // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 - limit_order_id_type sell_eur_low2 = create_sell_order(seller, biteur.amount(1007), core.amount(8056))->id; + limit_order_id_type sell_eur_low2 = create_sell_order(seller, biteur.amount(1007), core.amount(8056))->get_id(); // This order above the MSSP will not be matched before hard fork - limit_order_id_type sell_eur_high = create_sell_order(seller, biteur.amount(7), core.amount(78))->id; + limit_order_id_type sell_eur_high = create_sell_order(seller, biteur.amount(7), core.amount(78))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_eur_med = create_sell_order(seller, biteur.amount(700), core.amount(6400))->id; + limit_order_id_type sell_eur_med = create_sell_order(seller, biteur.amount(700), core.amount(6400))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_eur_med2 = create_sell_order(seller, biteur.amount(7), core.amount(65))->id; + limit_order_id_type sell_eur_med2 = create_sell_order(seller, biteur.amount(7), core.amount(65))->get_id(); // This order below the call price will not be matched before hard fork: 1/8 #606 - limit_order_id_type sell_cny_low = create_sell_order(seller, bitcny.amount(1000), core.amount(7000))->id; + limit_order_id_type sell_cny_low = create_sell_order(seller, bitcny.amount(1000), core.amount(7000))->get_id(); // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 - limit_order_id_type sell_cny_low2 = create_sell_order(seller, bitcny.amount(1007), core.amount(8056))->id; + limit_order_id_type sell_cny_low2 = create_sell_order(seller, bitcny.amount(1007), core.amount(8056))->get_id(); // This order above the MSSP will not be matched before hard fork - limit_order_id_type sell_cny_high = create_sell_order(seller, bitcny.amount(7), core.amount(78))->id; + limit_order_id_type sell_cny_high = create_sell_order(seller, bitcny.amount(7), core.amount(78))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_cny_med = create_sell_order(seller, bitcny.amount(700), core.amount(6400))->id; + limit_order_id_type sell_cny_med = create_sell_order(seller, bitcny.amount(700), core.amount(6400))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_cny_med2 = create_sell_order(seller, bitcny.amount(7), core.amount(65))->id; + limit_order_id_type sell_cny_med2 = create_sell_order(seller, bitcny.amount(7), core.amount(65))->get_id(); BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, usd_id) ); BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, eur_id) ); @@ -822,46 +822,46 @@ BOOST_AUTO_TEST_CASE(hard_fork_453_cross_test) generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); // sell_low and call should get matched first - BOOST_CHECK( !db.find( sell_usd_low ) ); - BOOST_CHECK( !db.find( call_usd_id ) ); + BOOST_CHECK( !db.find( sell_usd_low ) ); + BOOST_CHECK( !db.find( call_usd_id ) ); // sell_low2 and call2 should get matched - BOOST_CHECK( !db.find( call_usd2_id ) ); + BOOST_CHECK( !db.find( call_usd2_id ) ); // sell_low2 and call3 should get matched: fixed #453 - BOOST_CHECK( !db.find( sell_usd_low2 ) ); + BOOST_CHECK( !db.find( sell_usd_low2 ) ); // sell_med and call3 should get matched - BOOST_CHECK( !db.find( sell_usd_med ) ); + BOOST_CHECK( !db.find( sell_usd_med ) ); // call3 now is not at margin call state, so sell_med2 won't get matched - BOOST_CHECK_EQUAL( db.find( sell_usd_med2 )->for_sale.value, 7 ); + BOOST_CHECK_EQUAL( db.find( sell_usd_med2 )->for_sale.value, 7 ); // sell_high should still be there, didn't match anything - BOOST_CHECK_EQUAL( db.find( sell_usd_high )->for_sale.value, 7 ); + BOOST_CHECK_EQUAL( db.find( sell_usd_high )->for_sale.value, 7 ); // sell_low and call should get matched first - BOOST_CHECK( !db.find( sell_eur_low ) ); - BOOST_CHECK( !db.find( call_eur_id ) ); + BOOST_CHECK( !db.find( sell_eur_low ) ); + BOOST_CHECK( !db.find( call_eur_id ) ); // sell_low2 and call2 should get matched - BOOST_CHECK( !db.find( call_eur2_id ) ); + BOOST_CHECK( !db.find( call_eur2_id ) ); // sell_low2 and call3 should get matched: fixed #453 - BOOST_CHECK( !db.find( sell_eur_low2 ) ); + BOOST_CHECK( !db.find( sell_eur_low2 ) ); // sell_med and call3 should get matched - BOOST_CHECK( !db.find( sell_eur_med ) ); + BOOST_CHECK( !db.find( sell_eur_med ) ); // call3 now is not at margin call state, so sell_med2 won't get matched - BOOST_CHECK_EQUAL( db.find( sell_eur_med2 )->for_sale.value, 7 ); + BOOST_CHECK_EQUAL( db.find( sell_eur_med2 )->for_sale.value, 7 ); // sell_high should still be there, didn't match anything - BOOST_CHECK_EQUAL( db.find( sell_eur_high )->for_sale.value, 7 ); + BOOST_CHECK_EQUAL( db.find( sell_eur_high )->for_sale.value, 7 ); // sell_low and call should get matched first - BOOST_CHECK( !db.find( sell_cny_low ) ); - BOOST_CHECK( !db.find( call_cny_id ) ); + BOOST_CHECK( !db.find( sell_cny_low ) ); + BOOST_CHECK( !db.find( call_cny_id ) ); // sell_low2 and call2 should get matched - BOOST_CHECK( !db.find( call_cny2_id ) ); + BOOST_CHECK( !db.find( call_cny2_id ) ); // sell_low2 and call3 should get matched: fixed #453 - BOOST_CHECK( !db.find( sell_cny_low2 ) ); + BOOST_CHECK( !db.find( sell_cny_low2 ) ); // sell_med and call3 should get matched - BOOST_CHECK( !db.find( sell_cny_med ) ); + BOOST_CHECK( !db.find( sell_cny_med ) ); // call3 now is not at margin call state, so sell_med2 won't get matched - BOOST_CHECK_EQUAL( db.find( sell_cny_med2 )->for_sale.value, 7 ); + BOOST_CHECK_EQUAL( db.find( sell_cny_med2 )->for_sale.value, 7 ); // sell_high should still be there, didn't match anything - BOOST_CHECK_EQUAL( db.find( sell_cny_high )->for_sale.value, 7 ); + BOOST_CHECK_EQUAL( db.find( sell_cny_high )->for_sale.value, 7 ); // all match price would be limit order price BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, usd_id) ); @@ -898,8 +898,8 @@ BOOST_AUTO_TEST_CASE(hard_fork_338_cross_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type usd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -908,7 +908,7 @@ BOOST_AUTO_TEST_CASE(hard_fork_338_cross_test) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -917,16 +917,16 @@ BOOST_AUTO_TEST_CASE(hard_fork_338_cross_test) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); // create yet another position with 400% collateral, call price is 20/1.75 CORE/USD = 80/7 const call_order_object& call4 = *borrow( borrower4, bitusd.amount(1000), asset(20000)); - call_order_id_type call4_id = call4.id; + call_order_id_type call4_id = call4.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); transfer(borrower3, seller, bitusd.amount(1000)); @@ -946,11 +946,11 @@ BOOST_AUTO_TEST_CASE(hard_fork_338_cross_test) // settlement price = 1/10, mssp = 1/11 // This order below the call price will not be matched before hard fork: 1/8 #606 - limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->id; + limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->get_id(); // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 - limit_order_id_type sell_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->id; + limit_order_id_type sell_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->get_id(); // This would match but is blocked by sell_low?! #606 - limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(64))->id; + limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(64))->get_id(); // adjust price feed to get call_order into black swan territory current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(16); @@ -969,22 +969,22 @@ BOOST_AUTO_TEST_CASE(hard_fork_338_cross_test) generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); // sell_low and call should get matched first - BOOST_CHECK( !db.find( sell_low ) ); - BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( sell_low ) ); + BOOST_CHECK( !db.find( call_id ) ); // sell_low2 and call2 should get matched - BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); // sell_low2 and call3 should get matched: fixed #453 - BOOST_CHECK( !db.find( sell_low2 ) ); + BOOST_CHECK( !db.find( sell_low2 ) ); // sell_med and call3 should get matched - BOOST_CHECK( !db.find( sell_med ) ); + BOOST_CHECK( !db.find( sell_med ) ); // at this moment, // collateralization of call3 is (16000-56-64) / (1000-7-7) = 15880/986 = 16.1, it's > 16 but < 17.6 // although there is no sell order, it should trigger a black swan event right away, // because after hard fork new limit order won't trigger black swan event BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call3_id ) ); - BOOST_CHECK( !db.find( call4_id ) ); + BOOST_CHECK( !db.find( call3_id ) ); + BOOST_CHECK( !db.find( call4_id ) ); // since 16.1 > 16, global settlement should at feed price 16/1 // so settlement fund should be 986*16 + 1000*16 @@ -1018,8 +1018,8 @@ BOOST_AUTO_TEST_CASE(hard_fork_649_cross_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type usd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -1028,7 +1028,7 @@ BOOST_AUTO_TEST_CASE(hard_fork_649_cross_test) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1037,13 +1037,13 @@ BOOST_AUTO_TEST_CASE(hard_fork_649_cross_test) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); transfer(borrower3, seller, bitusd.amount(1000)); @@ -1094,9 +1094,9 @@ BOOST_AUTO_TEST_CASE(hard_fork_649_cross_test) // a black swan event should occur BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call_id ) ); - BOOST_CHECK( !db.find( call2_id ) ); - BOOST_CHECK( !db.find( call3_id ) ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call3_id ) ); // since least collateral ratio 15.5 < 20, global settlement should execute at price = least collateral ratio 15.5/1 // so settlement fund should be 15500 + 15500 + round_up(15.5 * 293) @@ -1132,8 +1132,8 @@ BOOST_AUTO_TEST_CASE(hard_fork_343_cross_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type usd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); int64_t init_balance(1000000); @@ -1142,7 +1142,7 @@ BOOST_AUTO_TEST_CASE(hard_fork_343_cross_test) transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); transfer(committee_account, borrower4_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1151,13 +1151,13 @@ BOOST_AUTO_TEST_CASE(hard_fork_343_cross_test) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 350% collateral, call price is 17.5/1.75 CORE/USD = 77/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(17500)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); transfer(borrower3, seller, bitusd.amount(1000)); @@ -1248,7 +1248,7 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); int64_t init_balance(1000000); @@ -1268,7 +1268,7 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test) PUSH_TX(db, trx, ~0); } - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1278,10 +1278,10 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test) // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 400% collateral, call price is 20/1.75 CORE/USD = 80/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(20000)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); @@ -1295,7 +1295,7 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test) // No margin call at this moment // This order is sufficient to close the first debt position and no GS if margin call fee ratio is 0 - limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(1000), core.amount(14900))->id; + limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(1000), core.amount(14900))->get_id(); BOOST_CHECK_EQUAL( 1000, sell_mid(db).for_sale.value ); @@ -1314,8 +1314,8 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test) // GS occurs even when there is a good sell order BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call_id ) ); - BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); // GS price is 1/18, but the first call order has only 15000 thus capped BOOST_CHECK_EQUAL( 15000 + 18000, usd_id(db).bitasset_data(db).settlement_fund.value ); @@ -1345,7 +1345,7 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test_after_hf_core_2481) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); int64_t init_balance(1000000); @@ -1366,7 +1366,7 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test_after_hf_core_2481) PUSH_TX(db, trx, ~0); } - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1376,13 +1376,13 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test_after_hf_core_2481) // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 400% collateral, call price is 20/1.75 CORE/USD = 80/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(20000)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 800% collateral, call price is 40/1.75 CORE/USD = 160/7 const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(40000)); - call_order_id_type call3_id = call3.id; + call_order_id_type call3_id = call3.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); transfer(borrower3, seller, bitusd.amount(1000)); @@ -1399,7 +1399,7 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test_after_hf_core_2481) // No margin call at this moment // This order is sufficient to close the first debt position and no GS if margin call fee ratio is 0 - limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(1000), core.amount(14900))->id; + limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(1000), core.amount(14900))->get_id(); BOOST_CHECK_EQUAL( 1000, sell_mid(db).for_sale.value ); @@ -1420,9 +1420,9 @@ BOOST_AUTO_TEST_CASE(mcfr_blackswan_test_after_hf_core_2481) // GS occurs even when there is a good sell order BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call_id ) ); - BOOST_CHECK( !db.find( call2_id ) ); - BOOST_CHECK( !db.find( call3_id ) ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call3_id ) ); // after the core-2481 hard fork, GS price is not 1/18. // * the first call order would pay all collateral. @@ -1464,14 +1464,14 @@ BOOST_AUTO_TEST_CASE(gs_price_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); int64_t init_balance(1000000); transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1481,10 +1481,10 @@ BOOST_AUTO_TEST_CASE(gs_price_test) // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 800% collateral, call price is 40/1.75 CORE/USD = 160/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(40000)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); @@ -1498,7 +1498,7 @@ BOOST_AUTO_TEST_CASE(gs_price_test) // No margin call at this moment // This order is right at MSSP of the first debt position - limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(2000), core.amount(30000))->id; + limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(2000), core.amount(30000))->get_id(); BOOST_CHECK_EQUAL( 2000, sell_mid(db).for_sale.value ); @@ -1518,8 +1518,8 @@ BOOST_AUTO_TEST_CASE(gs_price_test) { // GS occurs BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call_id ) ); - BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); // sell order did not change BOOST_CHECK_EQUAL( 2000, sell_mid(db).for_sale.value ); } @@ -1527,7 +1527,7 @@ BOOST_AUTO_TEST_CASE(gs_price_test) { // GS does not occur, call got filled BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call_id ) ); // sell order got half-filled BOOST_CHECK_EQUAL( 1000, sell_mid(db).for_sale.value ); @@ -1572,7 +1572,7 @@ BOOST_AUTO_TEST_CASE(mcfr_rounding_test) const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); int64_t init_balance(1000000); @@ -1604,10 +1604,10 @@ BOOST_AUTO_TEST_CASE(mcfr_rounding_test) // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 800% collateral, call price is 40/1.75 CORE/USD = 160/7 const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(40000)); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); transfer(borrower, seller, bitusd.amount(1000)); transfer(borrower2, seller, bitusd.amount(1000)); @@ -1623,7 +1623,7 @@ BOOST_AUTO_TEST_CASE(mcfr_rounding_test) // No margin call at this moment // This order would be matched later - limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(1100), core.amount(15451))->id; + limit_order_id_type sell_mid = create_sell_order(seller, bitusd.amount(1100), core.amount(15451))->get_id(); // call_pays_price = (15451 / 1100) * 1100 / (1100-70) = 15451 / 1030 // debt * call_pays_price = 1000 * 15451 / 1030 = 15000.9 @@ -1649,8 +1649,8 @@ BOOST_AUTO_TEST_CASE(mcfr_rounding_test) // blackswan BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call_id ) ); - BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); int64_t call_pays_to_fund = (15000 * 10 + 10) / 11; BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).settlement_fund.value, call_pays_to_fund * 2 ); @@ -1697,8 +1697,8 @@ BOOST_AUTO_TEST_CASE(mcfr_rounding_test) // The first call order should have been filled BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() ); - BOOST_CHECK( !db.find( call_id ) ); - BOOST_REQUIRE( db.find( call2_id ) ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_REQUIRE( db.find( call2_id ) ); BOOST_CHECK_EQUAL( 100, sell_mid(db).for_sale.value ); @@ -1746,7 +1746,7 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1755,10 +1755,10 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% is higher than 175% const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); transfer(borrower, seller, bitusd.amount(1000)); @@ -1787,18 +1787,18 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) // settlement price = 1/10, mssp = 1/11 // This sell order above MSSP will not be matched with a call - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->get_id(); // This buy order at MSSP will be matched only if no margin call (margin call takes precedence) - limit_order_id_type buy_med = create_sell_order(buyer2, asset(33000), bitusd.amount(3000))->id; + limit_order_id_type buy_med = create_sell_order(buyer2, asset(33000), bitusd.amount(3000))->get_id(); // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence) - limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->id; + limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(buyer2, bitusd) ); @@ -1820,13 +1820,13 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700*4), core.amount(5900*4) ) ); // firstly it will match with buy_high, at buy_high's price - BOOST_CHECK( !db.find( buy_high ) ); + BOOST_CHECK( !db.find( buy_high ) ); // buy_high pays 111 CORE, receives 10 USD goes to buyer3's balance BOOST_CHECK_EQUAL( 10, get_balance(buyer3, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) ); // then it will match with call, at mssp: 1/11 = 1000/11000 - const call_order_object* tmp_call = db.find( call_id ); + const call_order_object* tmp_call = db.find( call_id ); BOOST_CHECK( tmp_call != nullptr ); // call will receive call_to_cover, pay 11*call_to_cover @@ -1841,7 +1841,7 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); // the limit order then will match with call2, at mssp: 1/11 = 1000/11000 - const call_order_object* tmp_call2 = db.find( call2_id ); + const call_order_object* tmp_call2 = db.find( call2_id ); BOOST_CHECK( tmp_call2 != nullptr ); // call2 will receive call2_to_cover, pay 11*call2_to_cover @@ -1862,14 +1862,14 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) buy_med_get -= (buy_med_get/100); // minus 1% market fee BOOST_CHECK_EQUAL( buy_med_get.value, get_balance(buyer2, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 33000, get_balance(buyer2, core) ); - BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 33000-buy_med_pay.value ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 33000-buy_med_pay.value ); // call3 is not in margin call territory so won't be matched BOOST_CHECK_EQUAL( 1000, call3.debt.value ); BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // check seller balance BOOST_CHECK_EQUAL( 193, get_balance(seller, bitusd) ); // 3000 - 7 - 700*4 @@ -1877,13 +1877,13 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) // Cancel buy_med cancel_limit_order( buy_med(db) ); - BOOST_CHECK( !db.find( buy_med ) ); + BOOST_CHECK( !db.find( buy_med ) ); BOOST_CHECK_EQUAL( buy_med_get.value, get_balance(buyer2, bitusd) ); BOOST_CHECK_EQUAL( init_balance - buy_med_pay.value, get_balance(buyer2, core) ); // Create another sell order slightly below the call price, won't fill - limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->id; - BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 ); + limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 ); // check seller balance BOOST_CHECK_EQUAL( 193-7, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); @@ -1893,7 +1893,7 @@ BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // generate a block generate_block(); @@ -1930,7 +1930,7 @@ BOOST_AUTO_TEST_CASE(target_cr_test_call_limit) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); transfer(committee_account, borrower3_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; @@ -1939,10 +1939,10 @@ BOOST_AUTO_TEST_CASE(target_cr_test_call_limit) publish_feed( bitusd, feedproducer, current_feed ); // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175% const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700); - call_order_id_type call_id = call.id; + call_order_id_type call_id = call.get_id(); // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% is higher than 175% const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000); - call_order_id_type call2_id = call2.id; + call_order_id_type call2_id = call2.get_id(); // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); transfer(borrower, seller, bitusd.amount(1000)); @@ -1966,21 +1966,21 @@ BOOST_AUTO_TEST_CASE(target_cr_test_call_limit) BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) ); // This sell order above MSSP will not be matched with a call - limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; - BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // This buy order is too low will not be matched with a sell order - limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->get_id(); BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) ); // Create a sell order which will be matched with several call orders later, price 1/9 - limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->id; - BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 ); + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->get_id(); + BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 ); // prepare price feed to get call and call2 (but not call3) into margin call territory current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); @@ -1999,7 +1999,7 @@ BOOST_AUTO_TEST_CASE(target_cr_test_call_limit) // settlement price = 1/10, mssp = 1/11 // firstly the limit order will match with call, at limit order's price: 1/9 - const call_order_object* tmp_call = db.find( call_id ); + const call_order_object* tmp_call = db.find( call_id ); BOOST_CHECK( tmp_call != nullptr ); // call will receive call_to_cover, pay 9*call_to_cover @@ -2014,7 +2014,7 @@ BOOST_AUTO_TEST_CASE(target_cr_test_call_limit) BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); // the limit order then will match with call2, at limit order's price: 1/9 - const call_order_object* tmp_call2 = db.find( call2_id ); + const call_order_object* tmp_call2 = db.find( call2_id ); BOOST_CHECK( tmp_call2 != nullptr ); // if the limit is big enough, call2 will receive call2_to_cover, pay 11*call2_to_cover @@ -2033,14 +2033,14 @@ BOOST_AUTO_TEST_CASE(target_cr_test_call_limit) BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); // sell_id is completely filled - BOOST_CHECK( !db.find( sell_id ) ); + BOOST_CHECK( !db.find( sell_id ) ); // check seller balance BOOST_CHECK_EQUAL( 2493, get_balance(seller, bitusd) ); // 3000 - 7 - 500 BOOST_CHECK_EQUAL( 4500, get_balance(seller, core) ); // 500*9 // buy_low's price is too low that won't be matched - BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); // generate a block generate_block(); @@ -2066,7 +2066,7 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_before1270) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); @@ -2075,9 +2075,9 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_before1270) publish_feed( bitusd, feedproducer, current_feed ); const call_order_object& b1 = *borrow( borrower, bitusd.amount(1000), asset(1800)); - auto b1_id = b1.id; + auto b1_id = b1.get_id(); const call_order_object& b2 = *borrow( borrower2, bitusd.amount(1000), asset(2000) ); - auto b2_id = b2.id; + auto b2_id = b2.get_id(); BOOST_CHECK_EQUAL( get_balance( borrower, bitusd ), 1000 ); BOOST_CHECK_EQUAL( get_balance( borrower2, bitusd ), 1000 ); @@ -2093,8 +2093,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_before1270) BOOST_CHECK_EQUAL( get_balance( borrower , core ), 998200 ); BOOST_CHECK_EQUAL( get_balance( borrower2, core ), 998000 ); - BOOST_CHECK( db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); // attempt to trade the margin call create_sell_order( borrower2, bitusd.amount(1000), core.amount(1100) ); @@ -2107,8 +2107,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_before1270) print_market(bitusd.symbol, core.symbol); // both calls are still there, no margin call, mcr bug - BOOST_CHECK( db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); } FC_LOG_AND_RETHROW() } @@ -2134,7 +2134,7 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_after1270) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); @@ -2143,9 +2143,9 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_after1270) publish_feed( bitusd, feedproducer, current_feed ); const call_order_object& b1 = *borrow( borrower, bitusd.amount(1000), asset(1800)); - auto b1_id = b1.id; + auto b1_id = b1.get_id(); const call_order_object& b2 = *borrow( borrower2, bitusd.amount(1000), asset(2000) ); - auto b2_id = b2.id; + auto b2_id = b2.get_id(); BOOST_CHECK_EQUAL( get_balance( borrower, bitusd ), 1000 ); BOOST_CHECK_EQUAL( get_balance( borrower2, bitusd ), 1000 ); @@ -2161,8 +2161,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_after1270) BOOST_CHECK_EQUAL( get_balance( borrower , core ), 998200 ); BOOST_CHECK_EQUAL( get_balance( borrower2, core ), 998000 ); - BOOST_CHECK( db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); // attempt to trade the margin call create_sell_order( borrower2, bitusd.amount(1000), core.amount(1100) ); @@ -2175,8 +2175,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_increase_after1270) print_market(bitusd.symbol, core.symbol); // b1 is margin called - BOOST_CHECK( ! db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( ! db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); } FC_LOG_AND_RETHROW() } @@ -2200,7 +2200,7 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_before1270) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); @@ -2209,9 +2209,9 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_before1270) publish_feed( bitusd, feedproducer, current_feed ); const call_order_object& b1 = *borrow( borrower, bitusd.amount(1000), asset(1800)); - auto b1_id = b1.id; + auto b1_id = b1.get_id(); const call_order_object& b2 = *borrow( borrower2, bitusd.amount(1000), asset(2000) ); - auto b2_id = b2.id; + auto b2_id = b2.get_id(); BOOST_CHECK_EQUAL( get_balance( borrower, bitusd ), 1000 ); BOOST_CHECK_EQUAL( get_balance( borrower2, bitusd ), 1000 ); @@ -2231,8 +2231,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_before1270) BOOST_CHECK_EQUAL( get_balance( borrower , core ), 998200 ); BOOST_CHECK_EQUAL( get_balance( borrower2, core ), 998000 ); - BOOST_CHECK( db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); // attempt to trade the margin call create_sell_order( borrower2, bitusd.amount(1000), core.amount(1100) ); @@ -2245,8 +2245,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_before1270) print_market(bitusd.symbol, core.symbol); // margin call at b1, mcr bug - BOOST_CHECK( !db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( !db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); } FC_LOG_AND_RETHROW() } @@ -2273,7 +2273,7 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_after1270) transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); @@ -2282,9 +2282,9 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_after1270) publish_feed( bitusd, feedproducer, current_feed ); const call_order_object& b1 = *borrow( borrower, bitusd.amount(1000), asset(1800)); - auto b1_id = b1.id; + auto b1_id = b1.get_id(); const call_order_object& b2 = *borrow( borrower2, bitusd.amount(1000), asset(2000) ); - auto b2_id = b2.id; + auto b2_id = b2.get_id(); BOOST_CHECK_EQUAL( get_balance( borrower, bitusd ), 1000 ); BOOST_CHECK_EQUAL( get_balance( borrower2, bitusd ), 1000 ); @@ -2304,8 +2304,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_after1270) BOOST_CHECK_EQUAL( get_balance( borrower , core ), 998200 ); BOOST_CHECK_EQUAL( get_balance( borrower2, core ), 998000 ); - BOOST_CHECK( db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); // attempt to trade the margin call create_sell_order( borrower2, bitusd.amount(1000), core.amount(1100) ); @@ -2318,8 +2318,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_decrease_after1270) print_market(bitusd.symbol, core.symbol); // both calls are there, no margin call, good - BOOST_CHECK( db.find( b1_id ) ); - BOOST_CHECK( db.find( b2_id ) ); + BOOST_CHECK( db.find( b1_id ) ); + BOOST_CHECK( db.find( b2_id ) ); } FC_LOG_AND_RETHROW() } @@ -2334,7 +2334,7 @@ BOOST_AUTO_TEST_CASE(mcr_bug_cross1270) const asset_object& core = get_asset(GRAPHENE_SYMBOL); const asset_object& bitusd = get_asset("USDBIT"); - const asset_id_type bitusd_id = bitusd.id; + const asset_id_type bitusd_id = bitusd.get_id(); const account_object& feedproducer = get_account("feedproducer"); // feed is expired @@ -2363,8 +2363,8 @@ BOOST_AUTO_TEST_CASE(mcr_bug_cross1270) print_market(asset_id_type(1)(db).symbol, asset_id_type()(db).symbol); // call b1 not there anymore - BOOST_CHECK( !db.find( call_order_id_type() ) ); - BOOST_CHECK( db.find( call_order_id_type(1) ) ); + BOOST_CHECK( !db.find( call_order_id_type() ) ); + BOOST_CHECK( db.find( call_order_id_type(1) ) ); } FC_LOG_AND_RETHROW() } diff --git a/tests/tests/network_broadcast_api_tests.cpp b/tests/tests/network_broadcast_api_tests.cpp index 17dd7e8857..0a80d1409b 100644 --- a/tests/tests/network_broadcast_api_tests.cpp +++ b/tests/tests/network_broadcast_api_tests.cpp @@ -49,7 +49,7 @@ BOOST_AUTO_TEST_CASE( broadcast_transaction_with_callback_test ) { }; fc::ecc::private_key cid_key = fc::ecc::private_key::regenerate( fc::digest("key") ); - const account_id_type cid_id = create_account( "cid", cid_key.get_public_key() ).id; + const account_id_type cid_id = create_account( "cid", cid_key.get_public_key() ).get_id(); fund( cid_id(db) ); auto nb_api = std::make_shared< graphene::app::network_broadcast_api >( app ); @@ -85,7 +85,7 @@ BOOST_AUTO_TEST_CASE( broadcast_transaction_disabled_p2p_test ) { }; fc::ecc::private_key cid_key = fc::ecc::private_key::regenerate( fc::digest("key") ); - const account_id_type cid_id = create_account( "cid", cid_key.get_public_key() ).id; + const account_id_type cid_id = create_account( "cid", cid_key.get_public_key() ).get_id(); fund( cid_id(db) ); auto nb_api = std::make_shared< graphene::app::network_broadcast_api >( app ); @@ -109,7 +109,7 @@ BOOST_AUTO_TEST_CASE( broadcast_transaction_too_large ) { try { fc::ecc::private_key cid_key = fc::ecc::private_key::regenerate( fc::digest("key") ); - const account_id_type cid_id = create_account( "cid", cid_key.get_public_key() ).id; + const account_id_type cid_id = create_account( "cid", cid_key.get_public_key() ).get_id(); fund( cid_id(db) ); auto nb_api = std::make_shared< graphene::app::network_broadcast_api >( app ); diff --git a/tests/tests/operation_tests.cpp b/tests/tests/operation_tests.cpp index 81f678d3c2..88f3d9e012 100644 --- a/tests/tests/operation_tests.cpp +++ b/tests/tests/operation_tests.cpp @@ -85,12 +85,12 @@ BOOST_AUTO_TEST_CASE( call_order_update_test ) try { ACTORS((dan)(sam)); - const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& bitusd = create_bitasset("USDBIT", sam.get_id()); const auto& core = asset_id_type()(db); transfer(committee_account, dan_id, asset(10000000)); transfer(committee_account, sam_id, asset(10000000)); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default @@ -192,12 +192,12 @@ BOOST_AUTO_TEST_CASE( old_call_order_update_test_after_hardfork_583 ) set_expiration( db, trx ); ACTORS((dan)(sam)); - const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& bitusd = create_bitasset("USDBIT", sam.get_id()); const auto& core = asset_id_type()(db); transfer(committee_account, dan_id, asset(10000000)); transfer(committee_account, sam_id, asset(10000000)); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default @@ -296,15 +296,15 @@ BOOST_AUTO_TEST_CASE( call_order_update_asset_auth_test ) ACTORS((dan)(sam)); const auto& backasset = create_user_issued_asset("BACK", sam, white_list | charge_market_fee); - asset_id_type back_id = backasset.id; + asset_id_type back_id = backasset.get_id(); - const auto& bitusd = create_bitasset("USDBIT", sam.id, 10, white_list | charge_market_fee, 3, back_id); - asset_id_type usd_id = bitusd.id; + const auto& bitusd = create_bitasset("USDBIT", sam.get_id(), 10, white_list | charge_market_fee, 3, back_id); + asset_id_type usd_id = bitusd.get_id(); issue_uia( dan_id, backasset.amount(10000000) ); issue_uia( sam_id, backasset.amount(10000000) ); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.core_exchange_rate = bitusd.amount( 100 ) / asset( 100 ); @@ -453,15 +453,15 @@ BOOST_AUTO_TEST_CASE( asset_settle_operation_asset_auth_test ) ACTORS((dan)(sam)); const auto& backasset = create_user_issued_asset("BACK", sam, white_list | charge_market_fee); - asset_id_type back_id = backasset.id; + asset_id_type back_id = backasset.get_id(); - const auto& bitusd = create_bitasset("USDBIT", sam.id, 10, white_list | charge_market_fee, 3, back_id); - asset_id_type usd_id = bitusd.id; + const auto& bitusd = create_bitasset("USDBIT", sam.get_id(), 10, white_list | charge_market_fee, 3, back_id); + asset_id_type usd_id = bitusd.get_id(); issue_uia( dan_id, backasset.amount(10000000) ); issue_uia( sam_id, backasset.amount(10000000) ); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.core_exchange_rate = bitusd.amount( 100 ) / asset( 100 ); @@ -619,15 +619,15 @@ BOOST_AUTO_TEST_CASE( bid_collateral_operation_asset_auth_test ) ACTORS((dan)(sam)); const auto& backasset = create_user_issued_asset("BACK", sam, white_list | charge_market_fee); - asset_id_type back_id = backasset.id; + asset_id_type back_id = backasset.get_id(); - const auto& bitusd = create_bitasset("USDBIT", sam.id, 10, white_list | charge_market_fee, 3, back_id); - asset_id_type usd_id = bitusd.id; + const auto& bitusd = create_bitasset("USDBIT", sam.get_id(), 10, white_list | charge_market_fee, 3, back_id); + asset_id_type usd_id = bitusd.get_id(); issue_uia( dan_id, backasset.amount(10000000) ); issue_uia( sam_id, backasset.amount(10000000) ); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.core_exchange_rate = bitusd.amount( 100 ) / asset( 100 ); @@ -859,9 +859,9 @@ BOOST_AUTO_TEST_CASE( bsip77_hardfork_time_and_param_valid_range_test ) GRAPHENE_MAX_SHARE_SUPPLY, 32001 ), fc::exception ); // Can create a bitasset without ICR - const auto& bitusd = create_bitasset( "USDBIT", sam.id, 100, charge_market_fee, 2, {}, + const auto& bitusd = create_bitasset( "USDBIT", sam.get_id(), 100, charge_market_fee, 2, {}, GRAPHENE_MAX_SHARE_SUPPLY ); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); // helper function for setting ICR for an asset auto set_icr_for_asset = [&](asset_id_type aid, optional icr) { @@ -958,15 +958,15 @@ BOOST_AUTO_TEST_CASE( bsip77_hardfork_time_and_param_valid_range_test ) BOOST_CHECK_THROW( create_bitasset( "USDBITB", sam_id, 32001, charge_market_fee, 2, {}, GRAPHENE_MAX_SHARE_SUPPLY, 0 ), fc::exception ); // Able to create a bitasset with a valid ICR - asset_id_type usdc_id = create_bitasset( "USDBITC", sam.id, 100, charge_market_fee, 2, {}, - GRAPHENE_MAX_SHARE_SUPPLY, 1001 ).id; - asset_id_type usdd_id = create_bitasset( "USDBITD", sam.id, 100, charge_market_fee, 2, {}, - GRAPHENE_MAX_SHARE_SUPPLY, 1750 ).id; - asset_id_type usde_id = create_bitasset( "USDBITE", sam.id, 100, charge_market_fee, 2, {}, - GRAPHENE_MAX_SHARE_SUPPLY, 32000 ).id; + asset_id_type usdc_id = create_bitasset( "USDBITC", sam.get_id(), 100, charge_market_fee, 2, {}, + GRAPHENE_MAX_SHARE_SUPPLY, 1001 ).get_id(); + asset_id_type usdd_id = create_bitasset( "USDBITD", sam.get_id(), 100, charge_market_fee, 2, {}, + GRAPHENE_MAX_SHARE_SUPPLY, 1750 ).get_id(); + asset_id_type usde_id = create_bitasset( "USDBITE", sam.get_id(), 100, charge_market_fee, 2, {}, + GRAPHENE_MAX_SHARE_SUPPLY, 32000 ).get_id(); // Able to create a bitasset without ICR - asset_id_type usdf_id = create_bitasset( "USDBITF", sam.id, 100, charge_market_fee, 2, {}, - GRAPHENE_MAX_SHARE_SUPPLY, {} ).id; + asset_id_type usdf_id = create_bitasset( "USDBITF", sam.get_id(), 100, charge_market_fee, 2, {}, + GRAPHENE_MAX_SHARE_SUPPLY, {} ).get_id(); BOOST_CHECK( usdc_id(db).bitasset_data(db).options.extensions.value.initial_collateral_ratio == 1001 ); BOOST_CHECK( usdd_id(db).bitasset_data(db).options.extensions.value.initial_collateral_ratio == 1750 ); @@ -1030,14 +1030,14 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test ) try { ACTORS((dan)(sam)(alice)(bob)); - const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& bitusd = create_bitasset("USDBIT", sam.get_id()); const auto& core = asset_id_type()(db); transfer(committee_account, dan_id, asset(10000000)); transfer(committee_account, sam_id, asset(10000000)); transfer(committee_account, alice_id, asset(10000000)); transfer(committee_account, bob_id, asset(10000000)); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default @@ -1055,7 +1055,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test ) BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); BOOST_TEST_MESSAGE( "alice place an order to sell usd at 1.05" ); - const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->id; + const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->get_id(); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); @@ -1070,7 +1070,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test ) BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 ); BOOST_TEST_MESSAGE( "bob attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); - const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->id; + const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->get_id(); BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 200 ); @@ -1083,7 +1083,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test ) BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 105 ); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 + 105 ); - BOOST_CHECK( !db.find( bob_call_id ) ); + BOOST_CHECK( !db.find( bob_call_id ) ); BOOST_TEST_MESSAGE( "alice cancel sell order" ); cancel_limit_order( alice_sell_id(db) ); @@ -1140,14 +1140,14 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_583 ) set_expiration( db, trx ); ACTORS((dan)(sam)(alice)(bob)); - const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& bitusd = create_bitasset("USDBIT", sam.get_id()); const auto& core = asset_id_type()(db); transfer(committee_account, dan_id, asset(10000000)); transfer(committee_account, sam_id, asset(10000000)); transfer(committee_account, alice_id, asset(10000000)); transfer(committee_account, bob_id, asset(10000000)); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default @@ -1165,7 +1165,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_583 ) BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); BOOST_TEST_MESSAGE( "alice place an order to sell usd at 1.05" ); - const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->id; + const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->get_id(); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); @@ -1180,7 +1180,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_583 ) BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 ); BOOST_TEST_MESSAGE( "bob attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); - const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->id; + const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->get_id(); BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 200 ); @@ -1193,7 +1193,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_583 ) BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 105 ); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 + 105 ); - BOOST_CHECK( !db.find( bob_call_id ) ); + BOOST_CHECK( !db.find( bob_call_id ) ); BOOST_TEST_MESSAGE( "alice cancel sell order" ); cancel_limit_order( alice_sell_id(db) ); @@ -1260,11 +1260,11 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr set_expiration( db, trx ); ACTORS((dan)(sam)(alice)(bob)); - const auto& bitusd = create_bitasset( "USDBIT", sam.id, 100, charge_market_fee, 2, {}, + const auto& bitusd = create_bitasset( "USDBIT", sam.get_id(), 100, charge_market_fee, 2, {}, GRAPHENE_MAX_SHARE_SUPPLY, 1050 ); // ICR = 1.05 const auto& core = asset_id_type()(db); - asset_id_type usd_id = bitusd.id; + asset_id_type usd_id = bitusd.get_id(); // helper function for setting ICR for an asset auto set_icr_for_asset = [&](asset_id_type aid, optional icr) { @@ -1286,7 +1286,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr transfer(committee_account, sam_id, asset(10000000)); transfer(committee_account, alice_id, asset(10000000)); transfer(committee_account, bob_id, asset(10000000)); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default @@ -1342,7 +1342,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_TEST_MESSAGE( "ICR 1.85, MCR 1.75, Alice CR 4.0000" ); BOOST_TEST_MESSAGE( "alice place an order to sell usd at 1.05" ); - const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->id; + const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->get_id(); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); @@ -1357,7 +1357,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 ); BOOST_TEST_MESSAGE( "bob attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); - const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->id; + const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->get_id(); BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 200 ); @@ -1370,7 +1370,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 105 ); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 + 105 ); - BOOST_CHECK( !db.find( bob_call_id ) ); + BOOST_CHECK( !db.find( bob_call_id ) ); BOOST_TEST_MESSAGE( "alice cancel sell order" ); cancel_limit_order( alice_sell_id(db) ); @@ -1419,7 +1419,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_TEST_MESSAGE( "ICR 1.85, MCR 1.75, Alice CR 2.222222" ); BOOST_TEST_MESSAGE( "alice adding more collateral should be allowed" ); - const call_order_id_type alice_call_id = borrow( alice, bitusd.amount(0), asset(1))->id; + const call_order_id_type alice_call_id = borrow( alice, bitusd.amount(0), asset(1))->get_id(); BOOST_CHECK_EQUAL( alice_call_id(db).collateral.value, 400000 + 1 ); BOOST_CHECK_EQUAL( alice_call_id(db).debt.value, 100000 ); BOOST_TEST_MESSAGE( "ICR 1.85, MCR 1.75, Alice CR 2.222228" ); @@ -1461,7 +1461,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr set_expiration( db, trx ); ACTORS((dan)(sam)(alice)(bob)); - const auto& bitusd = create_bitasset( "USDBIT", sam.id, 100, charge_market_fee, 2, {}, + const auto& bitusd = create_bitasset( "USDBIT", sam.get_id(), 100, charge_market_fee, 2, {}, GRAPHENE_MAX_SHARE_SUPPLY, {} ); // ICR is not set const auto& core = asset_id_type()(db); @@ -1469,7 +1469,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr transfer(committee_account, sam_id, asset(10000000)); transfer(committee_account, alice_id, asset(10000000)); transfer(committee_account, bob_id, asset(10000000)); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default @@ -1525,7 +1525,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_TEST_MESSAGE( "ICR 1.85, MCR 1.75, Alice CR 4.0000" ); BOOST_TEST_MESSAGE( "alice place an order to sell usd at 1.05" ); - const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->id; + const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->get_id(); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); @@ -1540,7 +1540,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 ); BOOST_TEST_MESSAGE( "bob attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); - const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->id; + const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->get_id(); BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 200 ); @@ -1553,7 +1553,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 105 ); BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 + 105 ); - BOOST_CHECK( !db.find( bob_call_id ) ); + BOOST_CHECK( !db.find( bob_call_id ) ); BOOST_TEST_MESSAGE( "alice cancel sell order" ); cancel_limit_order( alice_sell_id(db) ); @@ -1602,7 +1602,7 @@ BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_bsip77_when_icr BOOST_TEST_MESSAGE( "ICR 1.85, MCR 1.75, Alice CR 2.222222" ); BOOST_TEST_MESSAGE( "alice adding more collateral should be allowed" ); - const call_order_id_type alice_call_id = borrow( alice, bitusd.amount(0), asset(1))->id; + const call_order_id_type alice_call_id = borrow( alice, bitusd.amount(0), asset(1))->get_id(); BOOST_CHECK_EQUAL( alice_call_id(db).collateral.value, 400000 + 1 ); BOOST_CHECK_EQUAL( alice_call_id(db).debt.value, 100000 ); BOOST_TEST_MESSAGE( "ICR 1.85, MCR 1.75, Alice CR 2.222228" ); @@ -1694,7 +1694,7 @@ BOOST_AUTO_TEST_CASE( margin_call_limit_test ) transfer(committee_account, buyer_id, asset(init_balance)); transfer(committee_account, borrower_id, asset(init_balance)); transfer(committee_account, borrower2_id, asset(init_balance)); - update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( bitusd, {feedproducer.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); @@ -1765,7 +1765,7 @@ BOOST_AUTO_TEST_CASE( prediction_market ) update_feed_producers( pmark, { judge_id }); price_feed feed; - feed.settlement_price = asset( 1, pmark.id ) / asset( 1 ); + feed.settlement_price = asset( 1, pmark.get_id() ) / asset( 1 ); publish_feed( pmark, judge, feed ); BOOST_TEST_MESSAGE( "Require throw for mismatch collateral amounts" ); @@ -1823,7 +1823,7 @@ BOOST_AUTO_TEST_CASE( prediction_market_resolves_to_0 ) update_feed_producers( pmark, { judge_id }); price_feed feed; - feed.settlement_price = asset( 1, pmark.id ) / asset( 1 ); + feed.settlement_price = asset( 1, pmark.get_id() ) / asset( 1 ); publish_feed( pmark, judge, feed ); borrow( dan, pmark.amount(1000), asset(1000) ); @@ -1866,13 +1866,13 @@ BOOST_AUTO_TEST_CASE( prediction_market_black_swan ) update_feed_producers( pmark, { judge_id }); price_feed feed; - feed.settlement_price = asset( 1, pmark.id ) / asset( 1 ); + feed.settlement_price = asset( 1, pmark.get_id() ) / asset( 1 ); publish_feed( pmark, judge, feed ); borrow( dan, pmark.amount(1000), asset(1000) ); // feed a price that will cause a black swan - feed.settlement_price = asset( 1, pmark.id ) / asset( 1000 ); + feed.settlement_price = asset( 1, pmark.get_id() ) / asset( 1000 ); publish_feed( pmark, judge, feed ); // verify a black swan happened @@ -1887,13 +1887,13 @@ BOOST_AUTO_TEST_CASE( prediction_market_black_swan ) const auto& pmark2 = create_prediction_market("PMARKII", judge_id); update_feed_producers( pmark2, { judge_id }); price_feed feed2; - feed2.settlement_price = asset( 1, pmark2.id ) / asset( 1 ); + feed2.settlement_price = asset( 1, pmark2.get_id() ) / asset( 1 ); publish_feed( pmark2, judge, feed2 ); borrow( dan, pmark2.amount(1000), asset(1000) ); // feed a price that would have caused a black swan - feed2.settlement_price = asset( 1, pmark2.id ) / asset( 1000 ); + feed2.settlement_price = asset( 1, pmark2.get_id() ) / asset( 1000 ); publish_feed( pmark2, judge, feed2 ); // verify a black swan did not happen @@ -1975,7 +1975,7 @@ BOOST_AUTO_TEST_CASE( create_account_test ) BOOST_CHECK(statistics.id.space() == implementation_ids); BOOST_CHECK(statistics.id.type() == impl_account_statistics_object_type); - account_id_type nathan_id = nathan_account.id; + account_id_type nathan_id = nathan_account.get_id(); generate_block(); @@ -2098,7 +2098,7 @@ BOOST_AUTO_TEST_CASE( create_committee_member ) REQUIRE_THROW_WITH_VALUE(op, fee, asset(-600)); trx.operations.back() = op; - committee_member_id_type committee_member_id = db.get_index_type().get_next_id(); + committee_member_id_type committee_member_id { db.get_index_type().get_next_id() }; PUSH_TX( db, trx, ~0 ); const committee_member_object& d = committee_member_id(db); @@ -2182,7 +2182,7 @@ BOOST_AUTO_TEST_CASE( update_mia ) BOOST_AUTO_TEST_CASE( create_uia ) { try { - asset_id_type test_asset_id = db.get_index().get_next_id(); + asset_id_type test_asset_id { db.get_index().get_next_id() }; asset_create_operation creator; creator.issuer = account_id_type(); creator.fee = asset(); @@ -2413,12 +2413,12 @@ BOOST_AUTO_TEST_CASE( update_uia_issuer ) // Create accounts const auto& alice = create_account_2_keys("alice", alice_active, alice_owner); const auto& bob = create_account_2_keys("bob", bob_active, bob_owner); - const account_id_type alice_id = alice.id; - const account_id_type bob_id = bob.id; + const account_id_type alice_id = alice.get_id(); + const account_id_type bob_id = bob.get_id(); // Create asset const auto& test = create_user_issued_asset("UPDATEISSUER", alice_id(db), 0); - const asset_id_type test_id = test.id; + const asset_id_type test_id = test.get_id(); // Fast Forward to Hardfork time generate_blocks( HARDFORK_CORE_199_TIME ); @@ -2532,9 +2532,9 @@ BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new ) BOOST_CHECK_EQUAL( get_balance( buyer_account, test_asset ), 10000 ); - limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(100) )->id; - limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(200) )->id; - limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(300) )->id; + limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(100) )->get_id(); + limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(200) )->get_id(); + limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(300) )->get_id(); BOOST_CHECK_EQUAL( get_balance( buyer_account, test_asset ), 9700 ); @@ -2572,9 +2572,9 @@ BOOST_AUTO_TEST_CASE( create_buy_exact_match_uia ) BOOST_CHECK_EQUAL( get_balance( buyer_account, test_asset ), 10000 ); - limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(100) )->id; - limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(200) )->id; - limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(300) )->id; + limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(100) )->get_id(); + limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(200) )->get_id(); + limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(300) )->get_id(); BOOST_CHECK_EQUAL( get_balance( buyer_account, test_asset ), 9700 ); @@ -2613,9 +2613,9 @@ BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new_reverse ) BOOST_CHECK_EQUAL( get_balance( buyer_account, test_asset ), 10000 ); - limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(100) )->id; - limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(200) )->id; - limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(300) )->id; + limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(100) )->get_id(); + limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(200) )->get_id(); + limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(300) )->get_id(); BOOST_CHECK_EQUAL( get_balance( buyer_account, test_asset ), 9700 ); @@ -2655,9 +2655,9 @@ BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new_reverse_fract ) BOOST_CHECK_EQUAL( get_balance( buyer_account, core_asset ), 0 ); BOOST_CHECK_EQUAL( get_balance( seller_account, core_asset ), 30 ); - limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(10) )->id; - limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(20) )->id; - limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(30) )->id; + limit_order_id_type first_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(10) )->get_id(); + limit_order_id_type second_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(20) )->get_id(); + limit_order_id_type third_id = create_sell_order( buyer_account, test_asset.amount(100), core_asset.amount(30) )->get_id(); BOOST_CHECK_EQUAL( get_balance( buyer_account, test_asset ), 9700 ); @@ -3033,7 +3033,7 @@ BOOST_AUTO_TEST_CASE( reserve_asset_test ) BOOST_TEST_MESSAGE( "Test reserve operation on market issued asset" ); transfer( committee_account, alice_id, casset.amount( init_balance*100 ) ); - update_feed_producers( basset, {sam.id} ); + update_feed_producers( basset, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = basset.amount( 2 ) / casset.amount(100); current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default @@ -3095,7 +3095,7 @@ BOOST_AUTO_TEST_CASE( call_order_update_evaluator_test ) call_order_update_operation op; op.funding_account = alice_id; op.delta_collateral = asset( 1000000 * GRAPHENE_BLOCKCHAIN_PRECISION ); - op.delta_debt = asset( bitjmj.options.max_supply + 1, bitjmj.id ); + op.delta_debt = asset( bitjmj.options.max_supply + 1, bitjmj.get_id() ); transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); @@ -3128,7 +3128,7 @@ BOOST_AUTO_TEST_CASE( call_order_update_evaluator_test ) call_order_update_operation op; op.funding_account = alice_id; op.delta_collateral = asset( 1000000 * GRAPHENE_BLOCKCHAIN_PRECISION ); - op.delta_debt = asset( bitusd.options.max_supply + 1, bitusd.id ); + op.delta_debt = asset( bitusd.options.max_supply + 1, bitusd.get_id() ); transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); @@ -3140,12 +3140,12 @@ BOOST_AUTO_TEST_CASE( call_order_update_evaluator_test ) call_order_update_operation op; op.funding_account = alice_id; op.delta_collateral = asset( 100 * GRAPHENE_BLOCKCHAIN_PRECISION ); - op.delta_debt = asset( 2, bitusd.id ); + op.delta_debt = asset( 2, bitusd.get_id() ); transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); PUSH_TX( db, tx, database::skip_tapos_check | database::skip_transaction_signatures ); - transfer( alice_id(db), bob_id(db), asset( 2, bitusd.id ) ); + transfer( alice_id(db), bob_id(db), asset( 2, bitusd.get_id() ) ); } { @@ -3153,7 +3153,7 @@ BOOST_AUTO_TEST_CASE( call_order_update_evaluator_test ) call_order_update_operation op; op.funding_account = alice_id; op.delta_collateral = asset( 100000 * GRAPHENE_BLOCKCHAIN_PRECISION ); - op.delta_debt = asset( bitusd.options.max_supply - 1, bitusd.id ); + op.delta_debt = asset( bitusd.options.max_supply - 1, bitusd.get_id() ); transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); @@ -3165,7 +3165,7 @@ BOOST_AUTO_TEST_CASE( call_order_update_evaluator_test ) call_order_update_operation op; op.funding_account = alice_id; op.delta_collateral = asset( 100000 * GRAPHENE_BLOCKCHAIN_PRECISION ); - op.delta_debt = asset( bitusd.options.max_supply - 2, bitusd.id ); + op.delta_debt = asset( bitusd.options.max_supply - 2, bitusd.get_id() ); transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); @@ -3188,7 +3188,7 @@ BOOST_AUTO_TEST_CASE( cover_with_collateral_test ) BOOST_TEST_MESSAGE( "Setting price feed to $0.02 / 100" ); transfer(committee_account, alice_id, asset(10000000)); - update_feed_producers( bitusd, {sam.id} ); + update_feed_producers( bitusd, {sam.get_id()} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 2 ) / core.amount(100); @@ -3229,7 +3229,7 @@ BOOST_AUTO_TEST_CASE( cover_with_collateral_test ) BOOST_TEST_MESSAGE( "Bob offers to sell most of the BitUSD at the feed" ); const limit_order_object* order = create_sell_order( bob_id, bitusd.amount(99), asset(4950) ); BOOST_REQUIRE( order != nullptr ); - limit_order_id_type order1_id = order->id; + limit_order_id_type order1_id = order->get_id(); BOOST_CHECK_EQUAL( order->for_sale.value, 99 ); // wdump( (*call_order) ); @@ -3240,7 +3240,7 @@ BOOST_AUTO_TEST_CASE( cover_with_collateral_test ) BOOST_TEST_MESSAGE( "Bob offers to sell the last of his BitUSD in another order" ); order = create_sell_order( bob_id, bitusd.amount(1), asset(50) ); BOOST_REQUIRE( order != nullptr ); - limit_order_id_type order2_id = order->id; + limit_order_id_type order2_id = order->get_id(); BOOST_CHECK_EQUAL( order->for_sale.value, 1 ); // wdump( (*call_order) ); @@ -3532,7 +3532,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_withdraw_test ) { // Try withdrawing a single satoshi const vesting_balance_object& vbo = create_vbo( - alice_account.id, alice_account.id, core.amount( 10000 ), 1000, 0); + alice_account.get_id(), alice_account.get_id(), core.amount( 10000 ), 1000, 0); FC_ASSERT( db.get_balance( alice_account, core ).amount == 990000 ); @@ -3559,7 +3559,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_withdraw_test ) // Make sure we can withdraw the correct amount after 999 seconds { const vesting_balance_object& vbo = create_vbo( - alice_account.id, alice_account.id, core.amount( 10000 ), 1000, 999); + alice_account.get_id(), alice_account.get_id(), core.amount( 10000 ), 1000, 999); FC_ASSERT( db.get_balance( alice_account, core ).amount == 990000 ); @@ -3576,7 +3576,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_withdraw_test ) // Make sure we can withdraw the whole thing after 1000 seconds { const vesting_balance_object& vbo = create_vbo( - alice_account.id, alice_account.id, core.amount( 10000 ), 1000, 1000); + alice_account.get_id(), alice_account.get_id(), core.amount( 10000 ), 1000, 1000); FC_ASSERT( db.get_balance( alice_account, core ).amount == 990000 ); @@ -3592,7 +3592,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_withdraw_test ) // Make sure that we can't withdraw a single extra satoshi no matter how old it is { const vesting_balance_object& vbo = create_vbo( - alice_account.id, alice_account.id, core.amount( 10000 ), 1000, 123456); + alice_account.get_id(), alice_account.get_id(), core.amount( 10000 ), 1000, 123456); FC_ASSERT( db.get_balance( alice_account, core ).amount == 990000 ); @@ -3611,7 +3611,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_withdraw_test ) // 3000 after 1000 more seconds { const vesting_balance_object& vbo = create_vbo( - alice_account.id, alice_account.id, core.amount( 10000 ), 1000, 0); + alice_account.get_id(), alice_account.get_id(), core.amount( 10000 ), 1000, 0); FC_ASSERT( db.get_balance( alice_account, core ).amount == 990000 ); @@ -3648,7 +3648,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_withdraw_test ) // { const vesting_balance_object& vbo = create_vbo( - alice_account.id, alice_account.id, core.amount( 10000 ), 1000, 0); + alice_account.get_id(), alice_account.get_id(), core.amount( 10000 ), 1000, 0); FC_ASSERT( db.get_balance( alice_account, core ).amount == 990000 ); diff --git a/tests/tests/operation_tests2.cpp b/tests/tests/operation_tests2.cpp index f154d807d5..90318ae635 100644 --- a/tests/tests/operation_tests2.cpp +++ b/tests/tests/operation_tests2.cpp @@ -108,8 +108,8 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_create ) { try { auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); - account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).id; - account_id_type dan_id = create_account("dan", dan_private_key.get_public_key()).id; + account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).get_id(); + account_id_type dan_id = create_account("dan", dan_private_key.get_public_key()).get_id(); transfer(account_id_type(), nathan_id, asset(1000)); generate_block(); @@ -153,8 +153,8 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); - account_id_type nathan_id = get_account("nathan").id; - account_id_type dan_id = get_account("dan").id; + account_id_type nathan_id = get_account("nathan").get_id(); + account_id_type dan_id = get_account("dan").get_id(); withdraw_permission_id_type permit; set_expiration( db, trx ); @@ -273,7 +273,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) generate_blocks(permit_object.expiration); } // Ensure the permit object has been garbage collected - BOOST_CHECK(db.find_object(permit) == nullptr); + BOOST_CHECK(db.find(permit) == nullptr); { withdraw_permission_claim_operation op; @@ -295,8 +295,8 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_nominal_case ) auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); - account_id_type nathan_id = get_account("nathan").id; - account_id_type dan_id = get_account("dan").id; + account_id_type nathan_id = get_account("nathan").get_id(); + account_id_type dan_id = get_account("dan").get_id(); withdraw_permission_id_type permit; // Wait until the permission period's start time @@ -320,7 +320,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_nominal_case ) PUSH_TX( db, trx ); // tx's involving withdraw_permissions can't delete it even // if no further withdrawals are possible - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); BOOST_CHECK( permit_object.claimed_this_period == 5 ); BOOST_CHECK_EQUAL( permit_object.available_this_period(db.head_block_time()).amount.value, 0 ); BOOST_CHECK_EQUAL( current_period(permit_object, db.head_block_time()).available_this_period.amount.value, 0 ); @@ -328,7 +328,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_nominal_case ) generate_blocks( permit_object.period_start_time + permit_object.withdrawal_period_sec ); - if( db.find_object(permit) == nullptr ) + if( db.find(permit) == nullptr ) break; } @@ -360,7 +360,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_whitelist_asset_test ) ACTORS( (nathan)(dan)(izzy) ); - const asset_id_type uia_id = create_user_issued_asset( "ADVANCED", izzy_id(db), white_list ).id; + const asset_id_type uia_id = create_user_issued_asset( "ADVANCED", izzy_id(db), white_list ).get_id(); issue_uia( nathan_id, asset(1000, uia_id) ); @@ -459,8 +459,8 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); - account_id_type nathan_id = get_account("nathan").id; - account_id_type dan_id = get_account("dan").id; + account_id_type nathan_id = get_account("nathan").get_id(); + account_id_type dan_id = get_account("dan").get_id(); withdraw_permission_id_type permit; // Wait until the permission period's start time @@ -481,7 +481,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) { // Before claiming, check the period description const withdraw_permission_object& permit_object = permit(db); - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 0)); @@ -499,7 +499,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) PUSH_TX( db, trx ); // After claiming, check the period description - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); BOOST_CHECK( permit_object.claimed_this_period == 4 ); BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 4 ); period_descriptor = current_period(permit_object, db.head_block_time()); @@ -518,7 +518,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) { // Before claiming, check the period description const withdraw_permission_object& permit_object = permit(db); - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 1)); @@ -536,7 +536,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) PUSH_TX( db, trx ); // After claiming, check the period description - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); BOOST_CHECK( permit_object.claimed_this_period == 1 ); BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 1 ); period_descriptor = current_period(permit_object, db.head_block_time()); @@ -555,7 +555,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) { // Before claiming, check the period description const withdraw_permission_object& permit_object = permit(db); - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 2)); @@ -578,7 +578,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) { // Before claiming, check the period description const withdraw_permission_object& permit_object = permit(db); - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 3)); @@ -596,7 +596,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) PUSH_TX( db, trx ); // After claiming, check the period description - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); BOOST_CHECK( permit_object.claimed_this_period == 5 ); BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 5 ); period_descriptor = current_period(permit_object, db.head_block_time()); @@ -615,7 +615,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) { // Before claiming, check the period description const withdraw_permission_object& permit_object = permit(db); - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 4)); @@ -633,7 +633,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) PUSH_TX( db, trx ); // After claiming, check the period description - BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK(db.find(permit) != nullptr); BOOST_CHECK( permit_object.claimed_this_period == 3 ); BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 3 ); period_descriptor = current_period(permit_object, db.head_block_time()); @@ -649,7 +649,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) } // Withdrawal periods completed - BOOST_CHECK(db.find_object(permit) == nullptr); + BOOST_CHECK(db.find(permit) == nullptr); BOOST_CHECK_EQUAL(get_balance(nathan_id, asset_id_type()), 987); BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 13); @@ -660,8 +660,8 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_update ) INVOKE(withdraw_permission_create); auto nathan_private_key = generate_private_key("nathan"); - account_id_type nathan_id = get_account("nathan").id; - account_id_type dan_id = get_account("dan").id; + account_id_type nathan_id = get_account("nathan").get_id(); + account_id_type dan_id = get_account("dan").get_id(); withdraw_permission_id_type permit; set_expiration( db, trx ); @@ -714,7 +714,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_delete ) BOOST_AUTO_TEST_CASE( mia_feeds ) { try { ACTORS((nathan)(dan)(ben)(vikram)); - asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + asset_id_type bit_usd_id = create_bitasset("USDBIT").get_id(); { asset_update_operation op; @@ -855,7 +855,7 @@ BOOST_AUTO_TEST_CASE( witness_create ) trx.clear(); // create witness - witness_id_type nathan_witness_id = create_witness(nathan_id, nathan_private_key, skip).id; + witness_id_type nathan_witness_id = create_witness(nathan_id, nathan_private_key, skip).get_id(); // nathan should be in the cache BOOST_CHECK_EQUAL( caching_witnesses.count(nathan_witness_id), 1u ); @@ -1420,7 +1420,7 @@ BOOST_AUTO_TEST_CASE( force_settle_test ) nathan_id, 100, disable_force_settle - ).id; + ).get_id(); asset_id_type core_id = asset_id_type(); @@ -1471,11 +1471,11 @@ BOOST_AUTO_TEST_CASE( force_settle_test ) BOOST_TEST_MESSAGE( "First short batch" ); - call_order_id_type call1_id = borrow( shorter1_id, asset(1000, bitusd_id), asset(2*1000, core_id) )->id; // 2.0000 - call_order_id_type call2_id = borrow( shorter2_id, asset(2000, bitusd_id), asset(2*1999, core_id) )->id; // 1.9990 - call_order_id_type call3_id = borrow( shorter3_id, asset(3000, bitusd_id), asset(2*2890, core_id) )->id; // 1.9267 - call_order_id_type call4_id = borrow( shorter4_id, asset(4000, bitusd_id), asset(2*3950, core_id) )->id; // 1.9750 - call_order_id_type call5_id = borrow( shorter5_id, asset(5000, bitusd_id), asset(2*4900, core_id) )->id; // 1.9600 + call_order_id_type call1_id = borrow( shorter1_id, asset(1000, bitusd_id), asset(2*1000, core_id) )->get_id(); // 2.0000 + call_order_id_type call2_id = borrow( shorter2_id, asset(2000, bitusd_id), asset(2*1999, core_id) )->get_id(); // 1.9990 + call_order_id_type call3_id = borrow( shorter3_id, asset(3000, bitusd_id), asset(2*2890, core_id) )->get_id(); // 1.9267 + call_order_id_type call4_id = borrow( shorter4_id, asset(4000, bitusd_id), asset(2*3950, core_id) )->get_id(); // 1.9750 + call_order_id_type call5_id = borrow( shorter5_id, asset(5000, bitusd_id), asset(2*4900, core_id) )->get_id(); // 1.9600 transfer( shorter1_id, nathan_id, asset(1000, bitusd_id) ); transfer( shorter2_id, nathan_id, asset(2000, bitusd_id) ); @@ -1512,8 +1512,8 @@ BOOST_AUTO_TEST_CASE( force_settle_test ) BOOST_TEST_MESSAGE( "Verify partial settlement of call" ); // Partially settle a call - force_settlement_id_type settle_id = *force_settle( nathan_id, asset( 50, bitusd_id ) ) - .get< extendable_operation_result >().value.new_objects->begin(); + force_settlement_id_type settle_id { *force_settle( nathan_id, asset( 50, bitusd_id ) ) + .get< extendable_operation_result >().value.new_objects->begin() }; // Call does not take effect immediately BOOST_CHECK_EQUAL( get_balance(nathan_id, bitusd_id), 14950); @@ -1618,7 +1618,7 @@ BOOST_AUTO_TEST_CASE( assert_op_test ) // create some objects auto nathan_private_key = generate_private_key("nathan"); public_key_type nathan_public_key = nathan_private_key.get_public_key(); - account_id_type nathan_id = create_account("nathan", nathan_public_key).id; + account_id_type nathan_id = create_account("nathan", nathan_public_key).get_id(); assert_operation op; @@ -1694,8 +1694,8 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) // Not using fixture's get_balance() here because it uses fixture's db, not my override BOOST_CHECK_EQUAL(db.get_balance(op.deposit_to_account, asset_id_type()).amount.value, 1); - BOOST_CHECK(db.find_object(balance_id_type()) == nullptr); - BOOST_CHECK(db.find_object(balance_id_type(1)) != nullptr); + BOOST_CHECK(db.find(balance_id_type()) == nullptr); + BOOST_CHECK(db.find(balance_id_type(1)) != nullptr); auto slot = db.get_slot_at_time(starting_time); db.generate_block(starting_time, db.get_scheduled_witness(slot), init_account_priv_key, skip_flags); @@ -1762,7 +1762,7 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) _sign( trx, n_key ); _sign( trx, v1_key ); PUSH_TX(db, trx); - BOOST_CHECK(db.find_object(op.balance_to_claim) == nullptr); + BOOST_CHECK(db.find(op.balance_to_claim) == nullptr); BOOST_CHECK_EQUAL(db.get_balance(op.deposit_to_account, asset_id_type()).amount.value, 601); op.balance_to_claim = vesting_balance_2.id; @@ -1786,7 +1786,7 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) _sign( trx, n_key ); _sign( trx, v2_key ); PUSH_TX(db, trx); - BOOST_CHECK(db.find_object(op.balance_to_claim) == nullptr); + BOOST_CHECK(db.find(op.balance_to_claim) == nullptr); BOOST_CHECK_EQUAL(db.get_balance(op.deposit_to_account, asset_id_type()).amount.value, 901); } FC_LOG_AND_RETHROW() } @@ -1883,7 +1883,7 @@ BOOST_AUTO_TEST_CASE(zero_second_vbo) sign(create_tx, alice_private_key); processed_transaction ptx = PUSH_TX( db, create_tx ); - vesting_balance_id_type vbid = ptx.operation_results[0].get(); + vesting_balance_id_type vbid { ptx.operation_results[0].get() }; check_vesting_1b( vbid ); } @@ -1902,7 +1902,7 @@ BOOST_AUTO_TEST_CASE(zero_second_vbo) set_expiration( db, create_tx ); sign(create_tx, alice_private_key); processed_transaction ptx = PUSH_TX( db, create_tx ); - worker_id_type wid = ptx.operation_results[0].get(); + worker_id_type wid { ptx.operation_results[0].get() }; // vote it in account_update_operation vote_op; @@ -1943,7 +1943,7 @@ BOOST_AUTO_TEST_CASE( vbo_withdraw_different ) // transfer(account_id_type(), alice_id, asset(1000)); - asset_id_type stuff_id = create_user_issued_asset( "STUFF", izzy_id(db), 0 ).id; + asset_id_type stuff_id = create_user_issued_asset( "STUFF", izzy_id(db), 0 ).get_id(); issue_uia( alice_id, asset( 1000, stuff_id ) ); // deposit STUFF with linear vesting policy @@ -2022,7 +2022,7 @@ BOOST_AUTO_TEST_CASE( top_n_special ) // Alice, Bob, Chloe, Dan (ABCD) // - asset_id_type topn_id = create_user_issued_asset( "TOPN", izzy_id(db), 0 ).id; + asset_id_type topn_id = create_user_issued_asset( "TOPN", izzy_id(db), 0 ).get_id(); authority stan_owner_auth = stan_id(db).owner; authority stan_active_auth = stan_id(db).active; @@ -2174,8 +2174,8 @@ BOOST_AUTO_TEST_CASE( buyback ) // Philbin (registrar) // - asset_id_type nono_id = create_user_issued_asset( "NONO", izzy_id(db), 0 ).id; - asset_id_type buyme_id = create_user_issued_asset( "BUYME", izzy_id(db), 0 ).id; + asset_id_type nono_id = create_user_issued_asset( "NONO", izzy_id(db), 0 ).get_id(); + asset_id_type buyme_id = create_user_issued_asset( "BUYME", izzy_id(db), 0 ).get_id(); // Create a buyback account account_id_type rex_id; @@ -2230,7 +2230,7 @@ BOOST_AUTO_TEST_CASE( buyback ) issue_uia( alice_id, asset( 1000, nono_id ) ); // Alice wants to sell 100 BUYME for 1000 BTS, a middle price. - limit_order_id_type order_id_mid = create_sell_order( alice_id, asset( 100, buyme_id ), asset( 1000, asset_id_type() ) )->id; + limit_order_id_type order_id_mid = create_sell_order( alice_id, asset( 100, buyme_id ), asset( 1000, asset_id_type() ) )->get_id(); generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); generate_block(); @@ -2254,9 +2254,9 @@ BOOST_AUTO_TEST_CASE( buyback ) set_expiration( db, trx ); // #11 // Selling 10 BUYME for 50 BTS, a low price. - limit_order_id_type order_id_low = create_sell_order( alice_id, asset( 10, buyme_id ), asset( 50, asset_id_type() ) )->id; + limit_order_id_type order_id_low = create_sell_order( alice_id, asset( 10, buyme_id ), asset( 50, asset_id_type() ) )->get_id(); // Selling 10 BUYME for 150 BTS, a high price. - limit_order_id_type order_id_high = create_sell_order( alice_id, asset( 10, buyme_id ), asset( 150, asset_id_type() ) )->id; + limit_order_id_type order_id_high = create_sell_order( alice_id, asset( 10, buyme_id ), asset( 150, asset_id_type() ) )->get_id(); fund( rex_id(db), asset( 250, asset_id_type() ) ); generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); diff --git a/tests/tests/pob_tests.cpp b/tests/tests/pob_tests.cpp index cf277bfae4..872e6cf342 100644 --- a/tests/tests/pob_tests.cpp +++ b/tests/tests/pob_tests.cpp @@ -36,8 +36,7 @@ using namespace graphene::chain::test; BOOST_FIXTURE_TEST_SUITE( pob_tests, database_fixture ) BOOST_AUTO_TEST_CASE( hardfork_time_test ) -{ - try { +{ try { // Proceeds to a recent hard fork generate_blocks( HARDFORK_CORE_1270_TIME ); @@ -61,11 +60,7 @@ BOOST_AUTO_TEST_CASE( hardfork_time_test ) ticket_update_operation uop = make_ticket_update_op( tmp_ticket, lock_720_days, {} ); BOOST_CHECK_THROW( propose( uop ), fc::exception ); - } catch (fc::exception& e) { - edump((e.to_detail_string())); - throw; - } -} +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } BOOST_AUTO_TEST_CASE( validation_and_basic_logic_test ) { try { @@ -170,7 +165,7 @@ BOOST_AUTO_TEST_CASE( validation_and_basic_logic_test ) // negative amount BOOST_CHECK_THROW( update_ticket( tick_1, liquid, asset(-1) ), fc::exception ); // non-core asset - BOOST_CHECK_THROW( update_ticket( tick_1, liquid, asset(1, usd.id) ), fc::exception ); + BOOST_CHECK_THROW( update_ticket( tick_1, liquid, asset(1, usd.get_id()) ), fc::exception ); // too big amount BOOST_CHECK_THROW( update_ticket( tick_1, liquid, asset(2) ), fc::exception ); // target type unchanged @@ -198,9 +193,9 @@ BOOST_AUTO_TEST_CASE( validation_and_basic_logic_test ) BOOST_CHECK_THROW( PUSH_TX(db, trx, ~0), fc::exception ); } - ticket_id_type tick_1_id = tick_1.id; - ticket_id_type tick_2_id = tick_2.id; - ticket_id_type tick_4_id = tick_4.id; + ticket_id_type tick_1_id = tick_1.get_id(); + ticket_id_type tick_2_id = tick_2.get_id(); + ticket_id_type tick_4_id = tick_4.get_id(); // Update ticket 1 to liquid generic_operation_result result = update_ticket( tick_1, liquid, asset(1) ); @@ -244,7 +239,7 @@ BOOST_AUTO_TEST_CASE( validation_and_basic_logic_test ) BOOST_CHECK( *result.updated_objects.begin() == tick_2_id ); BOOST_CHECK_EQUAL( result.removed_objects.size(), 0u ); - ticket_id_type new_ticket_id = *result.new_objects.begin(); + ticket_id_type new_ticket_id { *result.new_objects.begin() }; BOOST_CHECK( new_ticket_id > tick_4_id ); BOOST_REQUIRE( db.find( new_ticket_id ) ); BOOST_CHECK( new_ticket_id(db).account == sam_id ); @@ -269,11 +264,7 @@ BOOST_AUTO_TEST_CASE( validation_and_basic_logic_test ) BOOST_CHECK( new_ticket_id(db).target_type == lock_180_days ); BOOST_CHECK( new_ticket_id(db).amount == asset(3) ); - } catch (fc::exception& e) { - edump((e.to_detail_string())); - throw; - } -} +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } BOOST_AUTO_TEST_CASE( one_lock_180_ticket ) { try { @@ -291,7 +282,7 @@ BOOST_AUTO_TEST_CASE( one_lock_180_ticket ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_180_days, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); @@ -386,7 +377,7 @@ BOOST_AUTO_TEST_CASE( one_lock_360_ticket ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_360_days, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_360_days ); @@ -517,7 +508,7 @@ BOOST_AUTO_TEST_CASE( one_lock_720_ticket ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_720_days, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_720_days ); @@ -684,7 +675,7 @@ BOOST_AUTO_TEST_CASE( one_lock_720_ticket_if_blocks_missed ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_720_days, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_720_days ); @@ -741,7 +732,7 @@ BOOST_AUTO_TEST_CASE( one_lock_forever_ticket ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_forever, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_forever ); @@ -930,7 +921,7 @@ BOOST_AUTO_TEST_CASE( one_lock_forever_ticket_if_blocks_missed ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_forever, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_forever ); @@ -993,7 +984,7 @@ BOOST_AUTO_TEST_CASE( one_lock_forever_ticket_if_too_many_blocks_missed ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_forever, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_forever ); @@ -2202,7 +2193,7 @@ BOOST_AUTO_TEST_CASE( cancel_charging_from_liquid ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_360_days, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_360_days ); @@ -2703,7 +2694,7 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) BOOST_CHECK( *result.updated_objects.begin() == tick_1_id ); // the new ticket is stable - ticket_id_type tick_2_id = *result.new_objects.begin(); + ticket_id_type tick_2_id { *result.new_objects.begin() }; BOOST_REQUIRE( db.find( tick_2_id ) ); BOOST_CHECK( tick_2_id(db).target_type == lock_180_days ); @@ -2847,7 +2838,7 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) BOOST_CHECK( *result.updated_objects.begin() == tick_1_id ); // the new created ticket is freed already - ticket_id_type tick_3_id = *result.new_objects.begin(); + ticket_id_type tick_3_id { *result.new_objects.begin() }; BOOST_CHECK( *result.removed_objects.begin() == tick_3_id ); BOOST_CHECK( !db.find( tick_3_id ) ); @@ -2961,10 +2952,10 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) ted_balance -= 100000; BOOST_CHECK_EQUAL( db.get_balance( ted_id, asset_id_type() ).amount.value, ted_balance ); - ticket_id_type tick_1_id = tick_1.id; - ticket_id_type tick_2_id = tick_2.id; - ticket_id_type tick_3_id = tick_3.id; - ticket_id_type tick_4_id = tick_4.id; + ticket_id_type tick_1_id = tick_1.get_id(); + ticket_id_type tick_2_id = tick_2.get_id(); + ticket_id_type tick_3_id = tick_3.get_id(); + ticket_id_type tick_4_id = tick_4.get_id(); // one day passed generate_blocks( db.head_block_time() + fc::days(1) ); @@ -2990,7 +2981,7 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_5_id = *result.new_objects.begin(); + ticket_id_type tick_5_id { *result.new_objects.begin() }; BOOST_REQUIRE( db.find( tick_5_id ) ); BOOST_CHECK( tick_5_id(db).target_type == lock_180_days ); // target type of the new ticket is set BOOST_CHECK( tick_5_id(db).current_type == liquid ); @@ -3085,7 +3076,7 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_6_id = *result.new_objects.begin(); + ticket_id_type tick_6_id { *result.new_objects.begin() }; BOOST_CHECK( tick_6_id(db).target_type == lock_180_days ); BOOST_CHECK( tick_6_id(db).current_type == lock_180_days ); BOOST_CHECK( tick_6_id(db).status == withdrawing ); // 7 days to finish @@ -3103,7 +3094,7 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_7_id = *result.new_objects.begin(); + ticket_id_type tick_7_id { *result.new_objects.begin() }; BOOST_CHECK( tick_7_id(db).target_type == liquid ); BOOST_CHECK( tick_7_id(db).current_type == liquid ); BOOST_CHECK( tick_7_id(db).status == withdrawing ); // 180 days to finish @@ -3127,7 +3118,7 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_51_id = *result.new_objects.begin(); + ticket_id_type tick_51_id { *result.new_objects.begin() }; BOOST_CHECK( tick_51_id(db).target_type == liquid ); BOOST_CHECK( tick_51_id(db).current_type == liquid ); BOOST_CHECK( tick_51_id(db).status == withdrawing ); // 180 days to finish @@ -3147,7 +3138,7 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_52_id = *result.new_objects.begin(); + ticket_id_type tick_52_id { *result.new_objects.begin() }; BOOST_CHECK( tick_52_id(db).target_type == lock_forever ); BOOST_CHECK( tick_52_id(db).current_type == lock_180_days ); BOOST_CHECK( tick_52_id(db).status == charging ); // 15 days to next step @@ -3214,7 +3205,7 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_8_id = *result.new_objects.begin(); + ticket_id_type tick_8_id { *result.new_objects.begin() }; BOOST_CHECK( tick_8_id(db).target_type == lock_180_days ); BOOST_CHECK( tick_8_id(db).current_type == liquid ); BOOST_CHECK( tick_8_id(db).status == charging ); // 15 days to finish @@ -3232,7 +3223,7 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_9_id = *result.new_objects.begin(); + ticket_id_type tick_9_id { *result.new_objects.begin() }; BOOST_CHECK( tick_9_id(db).target_type == liquid ); BOOST_CHECK( tick_9_id(db).current_type == lock_180_days ); BOOST_CHECK( tick_9_id(db).status == withdrawing ); // 4 days to next step @@ -3534,7 +3525,7 @@ BOOST_AUTO_TEST_CASE( hf2262_test ) // create a ticket const ticket_object& tick_1 = create_ticket( sam_id, lock_180_days, asset(100) ); - ticket_id_type tick_1_id = tick_1.id; + ticket_id_type tick_1_id = tick_1.get_id(); BOOST_CHECK( tick_1_id(db).account == sam_id ); BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); @@ -3592,7 +3583,7 @@ BOOST_AUTO_TEST_CASE( hf2262_test ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_2_id = *result.new_objects.begin(); + ticket_id_type tick_2_id { *result.new_objects.begin() }; BOOST_CHECK( tick_2_id(db).target_type == liquid ); BOOST_CHECK( tick_2_id(db).current_type == liquid ); BOOST_CHECK( tick_2_id(db).status == withdrawing ); @@ -3629,7 +3620,7 @@ BOOST_AUTO_TEST_CASE( hf2262_test ) BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); - ticket_id_type tick_3_id = *result.new_objects.begin(); + ticket_id_type tick_3_id { *result.new_objects.begin() }; BOOST_CHECK( tick_3_id(db).target_type == liquid ); BOOST_CHECK( tick_3_id(db).current_type == liquid ); BOOST_CHECK( tick_3_id(db).status == withdrawing ); @@ -3650,7 +3641,7 @@ BOOST_AUTO_TEST_CASE( hf2262_test ) // create a new ticket const ticket_object& tick_4 = create_ticket( sam_id, lock_360_days, asset(200) ); - ticket_id_type tick_4_id = tick_4.id; + ticket_id_type tick_4_id = tick_4.get_id(); BOOST_CHECK( tick_4_id(db).account == sam_id ); BOOST_CHECK( tick_4_id(db).target_type == lock_360_days ); diff --git a/tests/tests/samet_fund_tests.cpp b/tests/tests/samet_fund_tests.cpp index 75739f067e..4b669817fd 100644 --- a/tests/tests/samet_fund_tests.cpp +++ b/tests/tests/samet_fund_tests.cpp @@ -55,7 +55,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_hardfork_time_test ) // Before the hard fork, unable to create a samet fund or transact against a samet fund, // or do any of them with proposals - BOOST_CHECK_THROW( create_samet_fund( sam_id, core.id, 10000, 100 ), fc::exception ); + BOOST_CHECK_THROW( create_samet_fund( sam_id, core.get_id(), 10000, 100 ), fc::exception ); samet_fund_id_type tmp_sf_id; BOOST_CHECK_THROW( delete_samet_fund( sam_id, tmp_sf_id ), fc::exception ); @@ -64,7 +64,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_hardfork_time_test ) BOOST_CHECK_THROW( repay_to_samet_fund( sam_id, tmp_sf_id, core.amount(100), core.amount(100) ), fc::exception ); - samet_fund_create_operation cop = make_samet_fund_create_op( sam_id, core.id, 10000, 100 ); + samet_fund_create_operation cop = make_samet_fund_create_op( sam_id, core.get_id(), 10000, 100 ); BOOST_CHECK_THROW( propose( cop ), fc::exception ); samet_fund_delete_operation dop = make_samet_fund_delete_op( sam_id, tmp_sf_id ); @@ -103,12 +103,12 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) asset_id_type core_id; const asset_object& usd = create_user_issued_asset( "MYUSD" ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( sam, usd.amount(init_amount) ); issue_uia( ted, usd.amount(init_amount) ); const asset_object& eur = create_user_issued_asset( "MYEUR", sam, white_list ); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); issue_uia( sam, eur.amount(init_amount) ); issue_uia( ted, eur.amount(init_amount) ); // Make a whitelist @@ -142,7 +142,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) // Able to propose { - samet_fund_create_operation cop = make_samet_fund_create_op( sam_id, core.id, 10000, 100 ); + samet_fund_create_operation cop = make_samet_fund_create_op( sam_id, core.get_id(), 10000, 100 ); propose( cop ); samet_fund_id_type tmp_sf_id; @@ -180,8 +180,8 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) check_balances(); // Able to create samet funds with valid data - const samet_fund_object& sfo1 = create_samet_fund( sam_id, core.id, 10000, 100u ); - samet_fund_id_type sf1_id = sfo1.id; + const samet_fund_object& sfo1 = create_samet_fund( sam_id, core.get_id(), 10000, 100u ); + samet_fund_id_type sf1_id = sfo1.get_id(); BOOST_CHECK( sfo1.owner_account == sam_id ); BOOST_CHECK( sfo1.asset_type == core.id ); BOOST_CHECK( sfo1.balance == 10000 ); @@ -191,8 +191,8 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) expected_balance_sam_core -= 10000; check_balances(); - const samet_fund_object& sfo2 = create_samet_fund( ted_id, usd.id, 1, 10000000u ); - samet_fund_id_type sf2_id = sfo2.id; + const samet_fund_object& sfo2 = create_samet_fund( ted_id, usd.get_id(), 1, 10000000u ); + samet_fund_id_type sf2_id = sfo2.get_id(); BOOST_CHECK( sfo2.owner_account == ted_id ); BOOST_CHECK( sfo2.asset_type == usd.id ); BOOST_CHECK( sfo2.balance == 1 ); @@ -202,8 +202,8 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) expected_balance_ted_usd -= 1; check_balances(); - const samet_fund_object& sfo3 = create_samet_fund( sam_id, eur.id, 10, 1u ); // Account is whitelisted - samet_fund_id_type sf3_id = sfo3.id; + const samet_fund_object& sfo3 = create_samet_fund( sam_id, eur.get_id(), 10, 1u ); // Account is whitelisted + samet_fund_id_type sf3_id = sfo3.get_id(); BOOST_CHECK( sfo3.owner_account == sam_id ); BOOST_CHECK( sfo3.asset_type == eur_id ); BOOST_CHECK( sfo3.balance == 10 ); @@ -215,14 +215,14 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) // Unable to create a samet fund with invalid data // Non-positive balance - BOOST_CHECK_THROW( create_samet_fund( sam_id, core.id, -1, 100u ), fc::exception ); - BOOST_CHECK_THROW( create_samet_fund( ted_id, usd.id, 0, 10000000u ), fc::exception ); + BOOST_CHECK_THROW( create_samet_fund( sam_id, core.get_id(), -1, 100u ), fc::exception ); + BOOST_CHECK_THROW( create_samet_fund( ted_id, usd.get_id(), 0, 10000000u ), fc::exception ); // Insufficient account balance - BOOST_CHECK_THROW( create_samet_fund( por_id, usd.id, 1, 100u ), fc::exception ); + BOOST_CHECK_THROW( create_samet_fund( por_id, usd.get_id(), 1, 100u ), fc::exception ); // Nonexistent asset type BOOST_CHECK_THROW( create_samet_fund( sam_id, no_asset_id, 1, 100u ), fc::exception ); // Account is not whitelisted - BOOST_CHECK_THROW( create_samet_fund( ted_id, eur.id, 10, 1u ), fc::exception ); + BOOST_CHECK_THROW( create_samet_fund( ted_id, eur.get_id(), 10, 1u ), fc::exception ); check_balances(); @@ -297,7 +297,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) // Unable to delete a fund that does not exist BOOST_CHECK_THROW( delete_samet_fund( sam_id, sf1_id ), fc::exception ); // Unable to delete a fund that is not owned by him - BOOST_CHECK_THROW( delete_samet_fund( sam_id, sfo2.id ), fc::exception ); + BOOST_CHECK_THROW( delete_samet_fund( sam_id, sfo2.get_id() ), fc::exception ); BOOST_REQUIRE( !db.find( sf1_id ) ); BOOST_REQUIRE( db.find( sf2_id ) ); @@ -361,7 +361,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_crud_and_proposal_test ) check_balances(); // Sam is unable to recreate the fund - BOOST_CHECK_THROW( create_samet_fund( sam_id, eur.id, 10, 1u ), fc::exception ); + BOOST_CHECK_THROW( create_samet_fund( sam_id, eur.get_id(), 10, 1u ), fc::exception ); check_balances(); generate_block(); @@ -389,12 +389,12 @@ BOOST_AUTO_TEST_CASE( samet_fund_borrow_repay_test ) asset_id_type core_id; const asset_object& usd = create_user_issued_asset( "MYUSD" ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( sam, usd.amount(init_amount) ); issue_uia( ted, usd.amount(init_amount) ); const asset_object& eur = create_user_issued_asset( "MYEUR", sam, white_list ); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); issue_uia( sam, eur.amount(init_amount) ); issue_uia( ted, eur.amount(init_amount) ); // Make a whitelist @@ -445,20 +445,20 @@ BOOST_AUTO_TEST_CASE( samet_fund_borrow_repay_test ) check_balances(); // create samet funds - const samet_fund_object& sfo1 = create_samet_fund( sam_id, core.id, 10000, 10000u ); // fee rate is 1% - samet_fund_id_type sf1_id = sfo1.id; + const samet_fund_object& sfo1 = create_samet_fund( sam_id, core.get_id(), 10000, 10000u ); // fee rate is 1% + samet_fund_id_type sf1_id = sfo1.get_id(); expected_balance_sam_core -= 10000; check_balances(); - const samet_fund_object& sfo2 = create_samet_fund( ted_id, usd.id, 1, 10000000u ); // fee rate is 1000% - samet_fund_id_type sf2_id = sfo2.id; + const samet_fund_object& sfo2 = create_samet_fund( ted_id, usd.get_id(), 1, 10000000u ); // fee rate is 1000% + samet_fund_id_type sf2_id = sfo2.get_id(); expected_balance_ted_usd -= 1; check_balances(); - const samet_fund_object& sfo3 = create_samet_fund( sam_id, eur.id, 10, 1u ); // Account is whitelisted - samet_fund_id_type sf3_id = sfo3.id; + const samet_fund_object& sfo3 = create_samet_fund( sam_id, eur.get_id(), 10, 1u ); // Account is whitelisted + samet_fund_id_type sf3_id = sfo3.get_id(); expected_balance_sam_eur -= 10; check_balances(); @@ -714,7 +714,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_borrow_repay_test ) trx.operations.push_back( cop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const operation_result& op_result = ptx.operation_results.front(); - proposal_id_type pid = op_result.get(); + proposal_id_type pid { op_result.get() }; proposal_update_operation puo; puo.proposal = pid; @@ -754,7 +754,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_borrow_repay_test ) trx.operations.push_back( cop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const operation_result& op_result = ptx.operation_results.front(); - proposal_id_type pid = op_result.get(); + proposal_id_type pid { op_result.get() }; proposals.push_back( pid ); }; @@ -1042,33 +1042,33 @@ BOOST_AUTO_TEST_CASE( samet_fund_apis_test ) asset_id_type core_id; const asset_object& usd = create_user_issued_asset( "MYUSD" ); - asset_id_type usd_id = usd.id; + asset_id_type usd_id = usd.get_id(); issue_uia( sam, usd.amount(init_amount) ); issue_uia( ted, usd.amount(init_amount) ); const asset_object& eur = create_user_issued_asset( "MYEUR", sam, white_list ); - asset_id_type eur_id = eur.id; + asset_id_type eur_id = eur.get_id(); issue_uia( sam, eur.amount(init_amount) ); issue_uia( ted, eur.amount(init_amount) ); // create samet funds const samet_fund_object& sfo1 = create_samet_fund( sam_id, core_id, 10000, 10000u ); // fee rate is 1% - samet_fund_id_type sf1_id = sfo1.id; + samet_fund_id_type sf1_id = sfo1.get_id(); const samet_fund_object& sfo2 = create_samet_fund( ted_id, usd_id, 1, 10000000u ); // fee rate is 1000% - samet_fund_id_type sf2_id = sfo2.id; + samet_fund_id_type sf2_id = sfo2.get_id(); const samet_fund_object& sfo3 = create_samet_fund( sam_id, eur_id, 10, 1u ); - samet_fund_id_type sf3_id = sfo3.id; + samet_fund_id_type sf3_id = sfo3.get_id(); const samet_fund_object& sfo4 = create_samet_fund( sam_id, eur_id, 10, 2u ); - samet_fund_id_type sf4_id = sfo4.id; + samet_fund_id_type sf4_id = sfo4.get_id(); const samet_fund_object& sfo5 = create_samet_fund( sam_id, usd_id, 100, 20u ); - samet_fund_id_type sf5_id = sfo5.id; + samet_fund_id_type sf5_id = sfo5.get_id(); const samet_fund_object& sfo6 = create_samet_fund( ted_id, usd_id, 1000, 200u ); - samet_fund_id_type sf6_id = sfo6.id; + samet_fund_id_type sf6_id = sfo6.get_id(); generate_block(); @@ -1179,7 +1179,7 @@ BOOST_AUTO_TEST_CASE( samet_fund_account_history_test ) // create samet funds const samet_fund_object& sfo1 = create_samet_fund( sam_id, core_id, 10000, 10000u ); // fee rate is 1% - samet_fund_id_type sf1_id = sfo1.id; + samet_fund_id_type sf1_id = sfo1.get_id(); generate_block(); diff --git a/tests/tests/settle_tests.cpp b/tests/tests/settle_tests.cpp index b6e0ee735f..5fef6aeede 100644 --- a/tests/tests/settle_tests.cpp +++ b/tests/tests/settle_tests.cpp @@ -50,9 +50,9 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) const auto& bitusd = create_bitasset("USDBIT", paul_id); const auto& bitcny = create_bitasset("CNYBIT", paul_id); const auto& core = asset_id_type()(db); - asset_id_type bitusd_id = bitusd.id; - asset_id_type bitcny_id = bitcny.id; - asset_id_type core_id = core.id; + asset_id_type bitusd_id = bitusd.get_id(); + asset_id_type bitcny_id = bitcny.get_id(); + asset_id_type core_id = core.get_id(); // fund accounts transfer(committee_account, michael_id, asset( 100000000 ) ); @@ -62,7 +62,7 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) transfer(committee_account, jim_id, asset(10000000)); // add a feed to asset - update_feed_producers( bitusd, {paul.id} ); + update_feed_producers( bitusd, {paul.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; current_feed.maximum_short_squeeze_ratio = 1100; @@ -71,11 +71,11 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) // paul gets some bitusd const call_order_object& call_paul = *borrow( paul, bitusd.amount(1000), core.amount(100) ); - call_order_id_type call_paul_id = call_paul.id; + call_order_id_type call_paul_id = call_paul.get_id(); BOOST_REQUIRE_EQUAL( get_balance( paul, bitusd ), 1000 ); // and transfer some to rachel - transfer(paul.id, rachel.id, asset(200, bitusd.id)); + transfer(paul.get_id(), rachel.get_id(), asset(200, bitusd.get_id())); BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), 200); @@ -84,12 +84,12 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) // michael gets some bitusd const call_order_object& call_michael = *borrow(michael, bitusd.amount(6), core.amount(8)); - call_order_id_type call_michael_id = call_michael.id; + call_order_id_type call_michael_id = call_michael.get_id(); // add settle order and check rounding issue operation_result result = force_settle(rachel, bitusd.amount(4)); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 4 ); BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); @@ -139,7 +139,7 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) set_expiration( db, trx ); operation_result result2 = force_settle(rachel_id(db), bitusd_id(db).amount(34)); - force_settlement_id_type settle_id2 = *result2.get().value.new_objects->begin(); + force_settlement_id_type settle_id2 { *result2.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id2(db).balance.amount.value, 34 ); BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); @@ -194,13 +194,13 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) const operation_result result4 = force_settle(rachel_id(db), bitusd_id(db).amount(434)); const operation_result result5 = force_settle(rachel_id(db), bitusd_id(db).amount(5)); - force_settlement_id_type settle_id3 = *result3.get().value.new_objects->begin(); + force_settlement_id_type settle_id3 { *result3.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id3(db).balance.amount.value, 3 ); - force_settlement_id_type settle_id4 = *result4.get().value.new_objects->begin(); + force_settlement_id_type settle_id4 { *result4.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 434 ); - force_settlement_id_type settle_id5 = *result5.get().value.new_objects->begin(); + force_settlement_id_type settle_id5 { *result5.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); @@ -356,7 +356,8 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) generate_block(); // jim borrow some cny - call_order_id_type call_jim_id = borrow(jim_id(db), bitcny_id(db).amount(2000), core_id(db).amount(2000))->id; + call_order_id_type call_jim_id = borrow(jim_id(db), bitcny_id(db).amount(2000), core_id(db).amount(2000)) + ->get_id(); BOOST_CHECK_EQUAL( 2000, call_jim_id(db).debt.value ); BOOST_CHECK_EQUAL( 2000, call_jim_id(db).collateral.value ); @@ -386,13 +387,13 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) const operation_result result7 = force_settle(ted_id(db), bitusd_id(db).amount(21)); const operation_result result8 = force_settle(ted_id(db), bitusd_id(db).amount(22)); - force_settlement_id_type settle_id6 = *result6.get().value.new_objects->begin(); + force_settlement_id_type settle_id6 { *result6.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); - force_settlement_id_type settle_id7 = *result7.get().value.new_objects->begin(); + force_settlement_id_type settle_id7 { *result7.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); - force_settlement_id_type settle_id8 = *result8.get().value.new_objects->begin(); + force_settlement_id_type settle_id8 { *result8.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); @@ -403,16 +404,16 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) const operation_result result102 = force_settle(joe_id(db), bitcny_id(db).amount(1000)); const operation_result result103 = force_settle(joe_id(db), bitcny_id(db).amount(300)); - force_settlement_id_type settle_id101 = *result101.get() - .value.new_objects->begin(); + force_settlement_id_type settle_id101 { *result101.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id101(db).balance.amount.value, 100 ); - force_settlement_id_type settle_id102 = *result102.get() - .value.new_objects->begin(); + force_settlement_id_type settle_id102 { *result102.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 1000 ); - force_settlement_id_type settle_id103 = *result103.get() - .value.new_objects->begin(); + force_settlement_id_type settle_id103 { *result103.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); @@ -480,7 +481,7 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test ) // bob borrow some const call_order_object& call_bob = *borrow( bob_id(db), bitusd_id(db).amount(19), core_id(db).amount(2) ); - call_order_id_type call_bob_id = call_bob.id; + call_order_id_type call_bob_id = call_bob.get_id(); BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 9999998); // 10000000 - 2 BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); // new @@ -664,9 +665,9 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) const auto& bitusd = create_bitasset("USDBIT", paul_id); const auto& bitcny = create_bitasset("CNYBIT", paul_id); const auto& core = asset_id_type()(db); - asset_id_type bitusd_id = bitusd.id; - asset_id_type bitcny_id = bitcny.id; - asset_id_type core_id = core.id; + asset_id_type bitusd_id = bitusd.get_id(); + asset_id_type bitcny_id = bitcny.get_id(); + asset_id_type core_id = core.get_id(); // fund accounts transfer(committee_account, michael_id, asset( 100000000 ) ); @@ -676,7 +677,7 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) transfer(committee_account, jim_id, asset(10000000)); // add a feed to asset - update_feed_producers( bitusd, {paul.id} ); + update_feed_producers( bitusd, {paul.get_id()} ); price_feed current_feed; current_feed.maintenance_collateral_ratio = 1750; current_feed.maximum_short_squeeze_ratio = 1100; @@ -685,11 +686,11 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) // paul gets some bitusd const call_order_object& call_paul = *borrow( paul, bitusd.amount(1000), core.amount(100) ); - call_order_id_type call_paul_id = call_paul.id; + call_order_id_type call_paul_id = call_paul.get_id(); BOOST_REQUIRE_EQUAL( get_balance( paul, bitusd ), 1000 ); // and transfer some to rachel - transfer(paul.id, rachel.id, asset(200, bitusd.id)); + transfer(paul.get_id(), rachel.get_id(), asset(200, bitusd.get_id())); BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), 200); @@ -698,12 +699,12 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) // michael gets some bitusd const call_order_object& call_michael = *borrow(michael, bitusd.amount(6), core.amount(8)); - call_order_id_type call_michael_id = call_michael.id; + call_order_id_type call_michael_id = call_michael.get_id(); // add settle order and check rounding issue const operation_result result = force_settle(rachel, bitusd.amount(4)); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 4 ); BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); @@ -753,7 +754,7 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) set_expiration( db, trx ); const operation_result result2 = force_settle(rachel_id(db), bitusd_id(db).amount(34)); - force_settlement_id_type settle_id2 = *result2.get().value.new_objects->begin(); + force_settlement_id_type settle_id2 { *result2.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id2(db).balance.amount.value, 34 ); BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); @@ -808,13 +809,13 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) const operation_result result4 = force_settle(rachel_id(db), bitusd_id(db).amount(434)); const operation_result result5 = force_settle(rachel_id(db), bitusd_id(db).amount(5)); - force_settlement_id_type settle_id3 = *result3.get().value.new_objects->begin(); + force_settlement_id_type settle_id3 { *result3.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id3(db).balance.amount.value, 3 ); - force_settlement_id_type settle_id4 = *result4.get().value.new_objects->begin(); + force_settlement_id_type settle_id4 { *result4.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 434 ); - force_settlement_id_type settle_id5 = *result5.get().value.new_objects->begin(); + force_settlement_id_type settle_id5 { *result5.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); @@ -972,7 +973,8 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 203 ); // 182 + 21 // jim borrow some cny - call_order_id_type call_jim_id = borrow(jim_id(db), bitcny_id(db).amount(2000), core_id(db).amount(2000))->id; + call_order_id_type call_jim_id = borrow(jim_id(db), bitcny_id(db).amount(2000), core_id(db).amount(2000)) + ->get_id(); BOOST_CHECK_EQUAL( 2000, call_jim_id(db).debt.value ); BOOST_CHECK_EQUAL( 2000, call_jim_id(db).collateral.value ); @@ -1002,13 +1004,13 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) const operation_result result7 = force_settle(ted_id(db), bitusd_id(db).amount(21)); const operation_result result8 = force_settle(ted_id(db), bitusd_id(db).amount(22)); - force_settlement_id_type settle_id6 = *result6.get().value.new_objects->begin(); + force_settlement_id_type settle_id6 { *result6.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); - force_settlement_id_type settle_id7 = *result7.get().value.new_objects->begin(); + force_settlement_id_type settle_id7 { *result7.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); - force_settlement_id_type settle_id8 = *result8.get().value.new_objects->begin(); + force_settlement_id_type settle_id8 { *result8.get().value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); @@ -1019,16 +1021,16 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) const operation_result result102 = force_settle(joe_id(db), bitcny_id(db).amount(1000)); const operation_result result103 = force_settle(joe_id(db), bitcny_id(db).amount(300)); - force_settlement_id_type settle_id101 = *result101.get() - .value.new_objects->begin(); + force_settlement_id_type settle_id101 { *result101.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id101(db).balance.amount.value, 100 ); - force_settlement_id_type settle_id102 = *result102.get() - .value.new_objects->begin(); + force_settlement_id_type settle_id102 { *result102.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 1000 ); - force_settlement_id_type settle_id103 = *result103.get() - .value.new_objects->begin(); + force_settlement_id_type settle_id103 { *result103.get() + .value.new_objects->begin() }; BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); @@ -1098,7 +1100,7 @@ BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) // bob borrow some const call_order_object& call_bob = *borrow( bob_id(db), bitusd_id(db).amount(19), core_id(db).amount(2) ); - call_order_id_type call_bob_id = call_bob.id; + call_order_id_type call_bob_id = call_bob.get_id(); BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 9999998); // 10000000 - 2 BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); // new @@ -1278,8 +1280,8 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test ) // create assets const auto& bitusd = create_bitasset("USDBIT", paul_id); const auto& core = asset_id_type()(db); - asset_id_type bitusd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type bitusd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); // fund accounts transfer(committee_account, michael_id, asset( 100000000 ) ); @@ -1312,7 +1314,7 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test ) // paul gets some bitusd const call_order_object& call_paul = *borrow( paul_id(db), bitusd_id(db).amount(1001), core_id(db).amount(101)); - call_order_id_type call_paul_id = call_paul.id; + call_order_id_type call_paul_id = call_paul.get_id(); BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), bitusd_id(db) ), 1001 ); BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), core_id(db) ), 10000000-101); @@ -1328,7 +1330,7 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test ) // michael borrow some bitusd const call_order_object& call_michael = *borrow(michael_id(db), bitusd_id(db).amount(6), core_id(db).amount(8)); - call_order_id_type call_michael_id = call_michael.id; + call_order_id_type call_michael_id = call_michael.get_id(); BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000-8); @@ -1350,8 +1352,8 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test ) BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); // all call orders are gone after global settle - BOOST_CHECK( !db.find_object(call_paul_id) ); - BOOST_CHECK( !db.find_object(call_michael_id) ); + BOOST_CHECK( !db.find(call_paul_id) ); + BOOST_CHECK( !db.find(call_michael_id) ); // add settle order and check rounding issue force_settle(rachel_id(db), bitusd_id(db).amount(4)); @@ -1408,8 +1410,8 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test_after_hf_184 ) // create assets const auto& bitusd = create_bitasset("USDBIT", paul_id); const auto& core = asset_id_type()(db); - asset_id_type bitusd_id = bitusd.id; - asset_id_type core_id = core.id; + asset_id_type bitusd_id = bitusd.get_id(); + asset_id_type core_id = core.get_id(); // fund accounts transfer(committee_account, michael_id, asset( 100000000 ) ); @@ -1442,7 +1444,7 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test_after_hf_184 ) // paul gets some bitusd const call_order_object& call_paul = *borrow( paul_id(db), bitusd_id(db).amount(1001), core_id(db).amount(101)); - call_order_id_type call_paul_id = call_paul.id; + call_order_id_type call_paul_id = call_paul.get_id(); BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), bitusd_id(db) ), 1001 ); BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), core_id(db) ), 10000000-101); @@ -1458,7 +1460,7 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test_after_hf_184 ) // michael borrow some bitusd const call_order_object& call_michael = *borrow(michael_id(db), bitusd_id(db).amount(6), core_id(db).amount(8)); - call_order_id_type call_michael_id = call_michael.id; + call_order_id_type call_michael_id = call_michael.get_id(); BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000-8); @@ -1480,8 +1482,8 @@ BOOST_AUTO_TEST_CASE( global_settle_rounding_test_after_hf_184 ) BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); // all call orders are gone after global settle - BOOST_CHECK( !db.find_object(call_paul_id)); - BOOST_CHECK( !db.find_object(call_michael_id)); + BOOST_CHECK( !db.find(call_paul_id)); + BOOST_CHECK( !db.find(call_michael_id)); // settle order will not execute after HF due to too small GRAPHENE_REQUIRE_THROW( force_settle(rachel_id(db), bitusd_id(db).amount(4)), fc::exception ); @@ -1539,7 +1541,7 @@ BOOST_AUTO_TEST_CASE( create_bitassets ) constexpr auto referrer_reward_percent = 10 * GRAPHENE_1_PERCENT; const auto& biteur = create_bitasset( "EURBIT", paul_id, market_fee_percent, charge_market_fee, 2 ); - asset_id_type biteur_id = biteur.id; + asset_id_type biteur_id = biteur.get_id(); const auto& bitusd = create_bitasset( "USDBIT", paul_id, market_fee_percent, charge_market_fee, 2, biteur_id ); @@ -1579,9 +1581,9 @@ BOOST_AUTO_TEST_CASE( market_fee_of_settle_order_before_hardfork_1780 ) GET_ACTOR(rachelreferrer); const asset_object &biteur = get_asset( "EURBIT" ); - asset_id_type biteur_id = biteur.id; + asset_id_type biteur_id = biteur.get_id(); const asset_object &bitusd = get_asset( "USDBIT" ); - asset_id_type bitusd_id = bitusd.id; + asset_id_type bitusd_id = bitusd.get_id(); const auto& core = asset_id_type()(db); @@ -1708,9 +1710,9 @@ BOOST_AUTO_TEST_CASE( market_fee_of_settle_order_after_hardfork_1780 ) GET_ACTOR(rachelreferrer); const asset_object &biteur = get_asset( "EURBIT" ); - asset_id_type biteur_id = biteur.id; + asset_id_type biteur_id = biteur.get_id(); const asset_object &bitusd = get_asset( "USDBIT" ); - asset_id_type bitusd_id = bitusd.id; + asset_id_type bitusd_id = bitusd.get_id(); const auto& core = asset_id_type()(db); @@ -1804,9 +1806,9 @@ BOOST_AUTO_TEST_CASE( market_fee_of_instant_settle_order_after_hardfork_1780 ) GET_ACTOR(rachelreferrer); const asset_object &biteur = get_asset( "EURBIT" ); - asset_id_type biteur_id = biteur.id; + asset_id_type biteur_id = biteur.get_id(); const asset_object &bitusd = get_asset( "USDBIT" ); - asset_id_type bitusd_id = bitusd.id; + asset_id_type bitusd_id = bitusd.get_id(); const auto& core = asset_id_type()(db); @@ -2041,7 +2043,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_gs ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); // add a price feed publisher and publish a feed update_feed_producers( mpa_id, { feeder_id } ); @@ -2058,7 +2060,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_gs ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // Transfer funds to seller transfer( borrower, seller, asset(100000,mpa_id) ); @@ -2071,7 +2073,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_gs ) // seller settles some auto result = force_settle( seller, asset(11100,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find(settle_id) ); BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 11100 ); @@ -2167,7 +2169,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_no_feed ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); // add a price feed publisher and publish a feed update_feed_producers( mpa_id, { feeder_id } ); @@ -2184,7 +2186,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_no_feed ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // Transfer funds to seller transfer( borrower, seller, asset(100000,mpa_id) ); @@ -2197,7 +2199,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_no_feed ) // seller settles some auto result = force_settle( seller, asset(11100,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find(settle_id) ); BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 11100 ); @@ -2284,7 +2286,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_zero_amount ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); // add a price feed publisher and publish a feed update_feed_producers( mpa_id, { feeder_id } ); @@ -2301,7 +2303,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_zero_amount ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // Transfer funds to seller transfer( borrower, seller, asset(100000,mpa_id) ); @@ -2314,7 +2316,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_zero_amount ) // seller settles some auto result = force_settle( seller, asset(0,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find(settle_id) ); BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 0 ); @@ -2395,7 +2397,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_100_percent_offset ) trx.operations.push_back( acop ); processed_transaction ptx = PUSH_TX(db, trx, ~0); const asset_object& mpa = db.get(ptx.operation_results[0].get()); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); // add a price feed publisher and publish a feed update_feed_producers( mpa_id, { feeder_id } ); @@ -2412,7 +2414,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_100_percent_offset ) // undercollateralization price = 100000:2000 * 1250:1000 = 100000:1600 const call_order_object* call_ptr = borrow( borrower, asset(100000, mpa_id), asset(2000) ); BOOST_REQUIRE( call_ptr ); - call_order_id_type call_id = call_ptr->id; + call_order_id_type call_id = call_ptr->get_id(); // Transfer funds to seller transfer( borrower, seller, asset(100000,mpa_id) ); @@ -2425,7 +2427,7 @@ BOOST_AUTO_TEST_CASE( settle_order_cancel_due_to_100_percent_offset ) // seller settles some auto result = force_settle( seller, asset(11100,mpa_id) ); - force_settlement_id_type settle_id = *result.get().value.new_objects->begin(); + force_settlement_id_type settle_id { *result.get().value.new_objects->begin() }; BOOST_REQUIRE( db.find(settle_id) ); BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 11100 ); diff --git a/tests/tests/simple_maker_taker_fee_tests.cpp b/tests/tests/simple_maker_taker_fee_tests.cpp index 90cc65568c..4d287b1538 100644 --- a/tests/tests/simple_maker_taker_fee_tests.cpp +++ b/tests/tests/simple_maker_taker_fee_tests.cpp @@ -111,7 +111,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Initialize actors ACTORS((jill)(izzy)); - account_id_type issuer_id = jill.id; + account_id_type issuer_id = jill.get_id(); fc::ecc::private_key issuer_private_key = jill_private_key; // Initialize tokens @@ -273,12 +273,12 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Approve the proposal trx.clear(); - proposal_id_type pid = processed.operation_results[0].get(); + proposal_id_type pid { processed.operation_results[0].get() }; proposal_update_operation pup; pup.fee_paying_account = jill.id; pup.proposal = pid; - pup.active_approvals_to_add.insert(jill.id); + pup.active_approvals_to_add.insert(jill.get_id()); trx.operations.push_back(pup); set_expiration(db, trx); sign(trx, jill_private_key); @@ -329,12 +329,12 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Approve the proposal trx.clear(); - proposal_id_type pid = processed.operation_results[0].get(); + proposal_id_type pid { processed.operation_results[0].get() }; proposal_update_operation pup; pup.fee_paying_account = jill.id; pup.proposal = pid; - pup.active_approvals_to_add.insert(jill.id); + pup.active_approvals_to_add.insert(jill.get_id()); trx.operations.push_back(pup); set_expiration(db, trx); sign(trx, jill_private_key); @@ -372,7 +372,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa ACTORS((smartissuer)(feedproducer)); // Initialize tokens - create_bitasset("SMARTBIT", smartissuer.id); + create_bitasset("SMARTBIT", smartissuer.get_id()); // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); const asset_object &bitsmart = get_asset("SMARTBIT"); @@ -492,11 +492,11 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa charlie2coin_market_fee_percent); const uint16_t bitsmart1coin_market_fee_percent = 7 * GRAPHENE_1_PERCENT; - create_bitasset("SMARTBIT1", smartissuer.id, bitsmart1coin_market_fee_percent); + create_bitasset("SMARTBIT1", smartissuer.get_id(), bitsmart1coin_market_fee_percent); const uint16_t bitsmart2coin_market_fee_percent = 8 * GRAPHENE_1_PERCENT; - create_bitasset("SMARTBIT2", smartissuer.id, bitsmart2coin_market_fee_percent); + create_bitasset("SMARTBIT2", smartissuer.get_id(), bitsmart2coin_market_fee_percent); // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); @@ -765,7 +765,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Alice and Bob place orders which match, and are completely filled by each other // Alice is willing to sell 10 JILLCOIN for at least 300 IZZYCOIN - limit_order_create_operation alice_sell_op = create_sell_operation(alice.id, + limit_order_create_operation alice_sell_op = create_sell_operation(alice.get_id(), jillcoin.amount(10 * JILL_PRECISION), izzycoin.amount(300 * IZZY_PRECISION)); @@ -774,13 +774,14 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; - const limit_order_object* alice_order_before = db.find(alice_order_id); + const limit_order_object* alice_order_before = db.find(alice_order_id); BOOST_CHECK(alice_order_before != nullptr); // Bob is willing to sell 300 IZZYCOIN for at least 10 JILLCOIN - limit_order_create_operation bob_sell_op = create_sell_operation(bob.id, izzycoin.amount(300 * IZZY_PRECISION), + limit_order_create_operation bob_sell_op = create_sell_operation(bob.get_id(), + izzycoin.amount(300 * IZZY_PRECISION), jillcoin.amount( 10 * JILL_PRECISION)); @@ -789,12 +790,12 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset bob_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, bob_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type bob_order_id = ptx.operation_results[0].get(); + limit_order_id_type bob_order_id { ptx.operation_results[0].get() }; // Check that the orders were filled by ensuring that they are no longer on the order books - const limit_order_object* alice_order = db.find(alice_order_id); + const limit_order_object* alice_order = db.find(alice_order_id); BOOST_CHECK(alice_order == nullptr); - const limit_order_object* bob_order = db.find(bob_order_id); + const limit_order_object* bob_order = db.find(bob_order_id); BOOST_CHECK(bob_order == nullptr); @@ -930,7 +931,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Alice and Bob place orders which match, and are completely filled by each other // Alice is willing to sell 10 JILLCOIN for at least 300 IZZYCOIN - limit_order_create_operation alice_sell_op = create_sell_operation(alice.id, + limit_order_create_operation alice_sell_op = create_sell_operation(alice.get_id(), jillcoin.amount(10 * JILL_PRECISION), izzycoin.amount(300 * IZZY_PRECISION)); @@ -939,13 +940,14 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; - const limit_order_object* alice_order_before = db.find(alice_order_id); + const limit_order_object* alice_order_before = db.find(alice_order_id); BOOST_CHECK(alice_order_before != nullptr); // Bob is willing to sell 300 IZZYCOIN for at least 10 JILLCOIN - limit_order_create_operation bob_sell_op = create_sell_operation(bob.id, izzycoin.amount(300 * IZZY_PRECISION), + limit_order_create_operation bob_sell_op = create_sell_operation(bob.get_id(), + izzycoin.amount(300 * IZZY_PRECISION), jillcoin.amount( 10 * JILL_PRECISION)); @@ -954,12 +956,12 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset bob_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, bob_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type bob_order_id = ptx.operation_results[0].get(); + limit_order_id_type bob_order_id { ptx.operation_results[0].get() }; // Check that the orders were filled by ensuring that they are no longer on the order books - const limit_order_object* alice_order = db.find(alice_order_id); + const limit_order_object* alice_order = db.find(alice_order_id); BOOST_CHECK(alice_order == nullptr); - const limit_order_object* bob_order = db.find(bob_order_id); + const limit_order_object* bob_order = db.find(bob_order_id); BOOST_CHECK(bob_order == nullptr); @@ -1095,7 +1097,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Alice and Bob place orders which match, and are completely filled by each other // Alice is willing to sell 10 JILLCOIN for at least 300 IZZYCOIN - limit_order_create_operation alice_sell_op = create_sell_operation(alice.id, + limit_order_create_operation alice_sell_op = create_sell_operation(alice.get_id(), jillcoin.amount(10 * JILL_PRECISION), izzycoin.amount(300 * IZZY_PRECISION)); @@ -1104,13 +1106,14 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; - const limit_order_object* alice_order_before = db.find(alice_order_id); + const limit_order_object* alice_order_before = db.find(alice_order_id); BOOST_CHECK(alice_order_before != nullptr); // Bob is willing to sell 300 IZZYCOIN for at least 10 JILLCOIN - limit_order_create_operation bob_sell_op = create_sell_operation(bob.id, izzycoin.amount(300 * IZZY_PRECISION), + limit_order_create_operation bob_sell_op = create_sell_operation(bob.get_id(), + izzycoin.amount(300 * IZZY_PRECISION), jillcoin.amount( 10 * JILL_PRECISION)); @@ -1119,12 +1122,12 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset bob_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, bob_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type bob_order_id = ptx.operation_results[0].get(); + limit_order_id_type bob_order_id { ptx.operation_results[0].get() }; // Check that the orders were filled by ensuring that they are no longer on the order books - const limit_order_object* alice_order = db.find(alice_order_id); + const limit_order_object* alice_order = db.find(alice_order_id); BOOST_CHECK(alice_order == nullptr); - const limit_order_object* bob_order = db.find(bob_order_id); + const limit_order_object* bob_order = db.find(bob_order_id); BOOST_CHECK(bob_order == nullptr); @@ -1229,7 +1232,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Alice and Bob place orders which match, and are completely filled by each other // Alice is willing to sell 10 JILLCOIN for at least 300 IZZYCOIN - limit_order_create_operation alice_sell_op = create_sell_operation(alice.id, + limit_order_create_operation alice_sell_op = create_sell_operation(alice.get_id(), jillcoin.amount(10 * JILL_PRECISION), izzycoin.amount(300 * IZZY_PRECISION)); @@ -1238,13 +1241,14 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; - const limit_order_object* alice_order_before = db.find(alice_order_id); + const limit_order_object* alice_order_before = db.find(alice_order_id); BOOST_CHECK(alice_order_before != nullptr); // Bob is willing to sell 300 IZZYCOIN for at least 10 JILLCOIN - limit_order_create_operation bob_sell_op = create_sell_operation(bob.id, izzycoin.amount(300 * IZZY_PRECISION), + limit_order_create_operation bob_sell_op = create_sell_operation(bob.get_id(), + izzycoin.amount(300 * IZZY_PRECISION), jillcoin.amount( 10 * JILL_PRECISION)); @@ -1253,12 +1257,12 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset bob_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, bob_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type bob_order_id = ptx.operation_results[0].get(); + limit_order_id_type bob_order_id { ptx.operation_results[0].get() }; // Check that the orders were filled by ensuring that they are no longer on the order books - const limit_order_object* alice_order = db.find(alice_order_id); + const limit_order_object* alice_order = db.find(alice_order_id); BOOST_CHECK(alice_order == nullptr); - const limit_order_object* bob_order = db.find(bob_order_id); + const limit_order_object* bob_order = db.find(bob_order_id); BOOST_CHECK(bob_order == nullptr); @@ -1309,7 +1313,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa const uint16_t SMARTBIT_PRECISION = 10000; const uint16_t smartbit_market_fee_percent = 2 * GRAPHENE_1_PERCENT; - create_bitasset("SMARTBIT", smartissuer.id, smartbit_market_fee_percent, + create_bitasset("SMARTBIT", smartissuer.get_id(), smartbit_market_fee_percent, charge_market_fee, 4); // Obtain asset object after a block is generated to obtain the final object that is commited to the database @@ -1318,7 +1322,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa const auto &core = asset_id_type()(db); - update_feed_producers(smartbit, {feedproducer.id}); + update_feed_producers(smartbit, {feedproducer.get_id()}); price_feed current_feed; current_feed.settlement_price = smartbit.amount(100) / core.amount(100); @@ -1402,7 +1406,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa BOOST_REQUIRE_EQUAL(get_balance(alice, jillcoin), 10 * JILL_PRECISION); BOOST_TEST_MESSAGE("Issuing 300 SMARTBIT to bob"); - transfer(committee_account, bob.id, asset(10000000)); + transfer(committee_account, bob.get_id(), asset(10000000)); publish_feed(smartbit, feedproducer, current_feed); // Publish a recent feed borrow(bob, smartbit.amount(300 * SMARTBIT_PRECISION), asset(2 * 300 * SMARTBIT_PRECISION)); BOOST_TEST_MESSAGE("Checking bob's balance"); @@ -1410,7 +1414,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Alice and Bob place orders which match, and are completely filled by each other // Alice is willing to sell 10 JILLCOIN for at least 300 SMARTBIT - limit_order_create_operation alice_sell_op = create_sell_operation(alice.id, + limit_order_create_operation alice_sell_op = create_sell_operation(alice.get_id(), jillcoin.amount(10 * JILL_PRECISION), smartbit.amount(300 * SMARTBIT_PRECISION)); trx.clear(); @@ -1418,27 +1422,27 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type alice_order_id = ptx.operation_results[0].get(); + limit_order_id_type alice_order_id { ptx.operation_results[0].get() }; - const limit_order_object *alice_order_before = db.find(alice_order_id); + const limit_order_object *alice_order_before = db.find(alice_order_id); BOOST_CHECK(alice_order_before != nullptr); // Bob is willing to sell 300 SMARTBIT for at least 10 JILLCOIN limit_order_create_operation bob_sell_op - = create_sell_operation(bob.id, smartbit.amount(300 * SMARTBIT_PRECISION), + = create_sell_operation(bob.get_id(), smartbit.amount(300 * SMARTBIT_PRECISION), jillcoin.amount(10 * JILL_PRECISION)); trx.clear(); trx.operations.push_back(bob_sell_op); asset bob_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, bob_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type bob_order_id = ptx.operation_results[0].get(); + limit_order_id_type bob_order_id { ptx.operation_results[0].get() }; // Check that the orders were filled by ensuring that they are no longer on the order books - const limit_order_object *alice_order = db.find(alice_order_id); + const limit_order_object *alice_order = db.find(alice_order_id); BOOST_CHECK(alice_order == nullptr); - const limit_order_object *bob_order = db.find(bob_order_id); + const limit_order_object *bob_order = db.find(bob_order_id); BOOST_CHECK(bob_order == nullptr); @@ -1503,7 +1507,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa const uint16_t SMARTBIT_PRECISION = 10000; const uint16_t smartbit_market_fee_percent = 2 * GRAPHENE_1_PERCENT; - create_bitasset("SMARTBIT", smartissuer.id, smartbit_market_fee_percent, + create_bitasset("SMARTBIT", smartissuer.get_id(), smartbit_market_fee_percent, charge_market_fee, 4); // Obtain asset object after a block is generated to obtain the final object that is commited to the database generate_block(); @@ -1511,7 +1515,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa const auto &core = asset_id_type()(db); - update_feed_producers(smartbit, {feedproducer.id}); + update_feed_producers(smartbit, {feedproducer.get_id()}); price_feed current_feed; current_feed.settlement_price = smartbit.amount(100) / core.amount(100); @@ -1601,7 +1605,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa BOOST_REQUIRE_EQUAL(get_balance(alice, jillcoin), 10 * JILL_PRECISION); BOOST_TEST_MESSAGE("Issuing 600 SMARTBIT to bob"); - transfer(committee_account, bob.id, asset(2 * 1000 * SMARTBIT_PRECISION)); + transfer(committee_account, bob.get_id(), asset(2 * 1000 * SMARTBIT_PRECISION)); publish_feed(smartbit, feedproducer, current_feed); // Publish a recent feed borrow(bob, smartbit.amount(600 * SMARTBIT_PRECISION), asset(2 * 600 * SMARTBIT_PRECISION)); BOOST_TEST_MESSAGE("Checking bob's balance"); @@ -1609,7 +1613,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Alice and Bob place orders which match, and are completely filled by each other // Alice is willing to sell 10 JILLCOIN for at least 300 SMARTBIT - limit_order_create_operation order_1_op = create_sell_operation(alice.id, + limit_order_create_operation order_1_op = create_sell_operation(alice.get_id(), jillcoin.amount(10 * JILL_PRECISION), smartbit.amount(300 * SMARTBIT_PRECISION)); trx.clear(); @@ -1617,28 +1621,28 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type order_1_id = ptx.operation_results[0].get(); + limit_order_id_type order_1_id { ptx.operation_results[0].get() }; - const limit_order_object *order_1_before = db.find(order_1_id); + const limit_order_object *order_1_before = db.find(order_1_id); BOOST_CHECK(order_1_before != nullptr); // Bob is willing to sell 600 SMARTBIT for at least 20 JILLCOIN limit_order_create_operation order_2_op - = create_sell_operation(bob.id, smartbit.amount(600 * SMARTBIT_PRECISION), + = create_sell_operation(bob.get_id(), smartbit.amount(600 * SMARTBIT_PRECISION), jillcoin.amount(20 * JILL_PRECISION)); trx.clear(); trx.operations.push_back(order_2_op); asset order_2_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, bob_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type order_2_id = ptx.operation_results[0].get(); + limit_order_id_type order_2_id { ptx.operation_results[0].get() }; // Check that order 1 was completely filled by ensuring that they it is no longer on the order book - const limit_order_object *order_1 = db.find(order_1_id); + const limit_order_object *order_1 = db.find(order_1_id); BOOST_CHECK(order_1 == nullptr); // Check that order 2 was partially filled by ensuring that they it is still on the order book - const limit_order_object *order_2 = db.find(order_2_id); + const limit_order_object *order_2 = db.find(order_2_id); BOOST_CHECK(order_2 != nullptr); @@ -1678,7 +1682,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa BOOST_REQUIRE_EQUAL(get_balance(charlie, jillcoin), 5 * JILL_PRECISION); // Charlie is is willing to sell 5 JILLCOIN for at least 150 SMARTBIT - limit_order_create_operation order_3_op = create_sell_operation(charlie.id, + limit_order_create_operation order_3_op = create_sell_operation(charlie.get_id(), jillcoin.amount(5 * JILL_PRECISION), smartbit.amount(150 * SMARTBIT_PRECISION)); trx.clear(); @@ -1686,14 +1690,14 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset charlie_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, charlie_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type order_3_id = ptx.operation_results[0].get(); + limit_order_id_type order_3_id { ptx.operation_results[0].get() }; // Order 3 should be completely filled - const limit_order_object *order_3 = db.find(order_3_id); + const limit_order_object *order_3 = db.find(order_3_id); BOOST_CHECK(order_3 == nullptr); // Order 2 should be partially filled and still present on the order books - const limit_order_object *order_2_after = db.find(order_2_id); + const limit_order_object *order_2_after = db.find(order_2_id); BOOST_CHECK(order_2_after != nullptr); // Check the new balance of the taker @@ -1766,7 +1770,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa const uint16_t SMARTBIT_PRECISION = 10000; const uint16_t smartbit_market_fee_percent = 2 * GRAPHENE_1_PERCENT; - create_bitasset("SMARTBIT", smartissuer.id, smartbit_market_fee_percent, + create_bitasset("SMARTBIT", smartissuer.get_id(), smartbit_market_fee_percent, charge_market_fee, 4); uint16_t jill_maker_fee_percent = jill_market_fee_percent; @@ -1782,7 +1786,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa const auto &core = asset_id_type()(db); - update_feed_producers(smartbit, {feedproducer.id}); + update_feed_producers(smartbit, {feedproducer.get_id()}); price_feed current_feed; current_feed.settlement_price = smartbit.amount(100) / core.amount(100); @@ -1803,7 +1807,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa BOOST_REQUIRE_EQUAL(get_balance(alice, jillcoin), 10 * JILL_PRECISION); BOOST_TEST_MESSAGE("Issuing 600 SMARTBIT to bob"); - transfer(committee_account, bob.id, asset(2 * 1000 * SMARTBIT_PRECISION)); + transfer(committee_account, bob.get_id(), asset(2 * 1000 * SMARTBIT_PRECISION)); publish_feed(smartbit, feedproducer, current_feed); // Publish a recent feed borrow(bob, smartbit.amount(600 * SMARTBIT_PRECISION), asset(2 * 600 * SMARTBIT_PRECISION)); BOOST_TEST_MESSAGE("Checking bob's balance"); @@ -1811,7 +1815,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa // Alice and Bob place orders which match, and are completely filled by each other // Alice is willing to sell 10 JILLCOIN for at least 300 SMARTBIT - limit_order_create_operation order_1_op = create_sell_operation(alice.id, + limit_order_create_operation order_1_op = create_sell_operation(alice.get_id(), jillcoin.amount(10 * JILL_PRECISION), smartbit.amount(300 * SMARTBIT_PRECISION)); trx.clear(); @@ -1819,28 +1823,28 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset alice_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, alice_private_key); processed_transaction ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type order_1_id = ptx.operation_results[0].get(); + limit_order_id_type order_1_id { ptx.operation_results[0].get() }; - const limit_order_object *order_1_before = db.find(order_1_id); + const limit_order_object *order_1_before = db.find(order_1_id); BOOST_CHECK(order_1_before != nullptr); // Bob is willing to sell 600 SMARTBIT for at least 20 JILLCOIN limit_order_create_operation order_2_op - = create_sell_operation(bob.id, smartbit.amount(600 * SMARTBIT_PRECISION), + = create_sell_operation(bob.get_id(), smartbit.amount(600 * SMARTBIT_PRECISION), jillcoin.amount(20 * JILL_PRECISION)); trx.clear(); trx.operations.push_back(order_2_op); asset order_2_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, bob_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type order_2_id = ptx.operation_results[0].get(); + limit_order_id_type order_2_id { ptx.operation_results[0].get() }; // Check that order 1 was completely filled by ensuring that they it is no longer on the order book - const limit_order_object *order_1 = db.find(order_1_id); + const limit_order_object *order_1 = db.find(order_1_id); BOOST_CHECK(order_1 == nullptr); // Check that order 2 was partially filled by ensuring that they it is still on the order book - const limit_order_object *order_2 = db.find(order_2_id); + const limit_order_object *order_2 = db.find(order_2_id); BOOST_CHECK(order_2 != nullptr); @@ -1900,7 +1904,7 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa BOOST_REQUIRE_EQUAL(get_balance(charlie, jillcoin), 5 * JILL_PRECISION); // Charlie is is willing to sell 5 JILLCOIN for at least 150 SMARTBIT - limit_order_create_operation order_3_op = create_sell_operation(charlie.id, + limit_order_create_operation order_3_op = create_sell_operation(charlie.get_id(), jillcoin.amount(5 * JILL_PRECISION), smartbit.amount(150 * SMARTBIT_PRECISION)); trx.clear(); @@ -1908,14 +1912,14 @@ BOOST_FIXTURE_TEST_SUITE(simple_maker_taker_fee_tests, simple_maker_taker_databa asset charlie_sell_fee = db.current_fee_schedule().set_fee(trx.operations.back()); sign(trx, charlie_private_key); ptx = PUSH_TX(db, trx); // No exception should be thrown - limit_order_id_type order_3_id = ptx.operation_results[0].get(); + limit_order_id_type order_3_id { ptx.operation_results[0].get() }; // Order 3 should be completely filled - const limit_order_object *order_3 = db.find(order_3_id); + const limit_order_object *order_3 = db.find(order_3_id); BOOST_CHECK(order_3 == nullptr); // Order 2 should be partially filled and still present on the order books - const limit_order_object *order_2_after = db.find(order_2_id); + const limit_order_object *order_2_after = db.find(order_2_id); BOOST_CHECK(order_2_after != nullptr); // Check the new balance of the taker diff --git a/tests/tests/smartcoin_tests.cpp b/tests/tests/smartcoin_tests.cpp index 7d69caaa05..43fe290ec3 100644 --- a/tests/tests/smartcoin_tests.cpp +++ b/tests/tests/smartcoin_tests.cpp @@ -71,18 +71,18 @@ BOOST_AUTO_TEST_CASE(bsip36) upgrade_to_lifetime_member(witness11_id); // Create all the witnesses - const witness_id_type witness0_witness_id = create_witness(witness0_id, witness0_private_key).id; - const witness_id_type witness1_witness_id = create_witness(witness1_id, witness1_private_key).id; - const witness_id_type witness2_witness_id = create_witness(witness2_id, witness2_private_key).id; - const witness_id_type witness3_witness_id = create_witness(witness3_id, witness3_private_key).id; - const witness_id_type witness4_witness_id = create_witness(witness4_id, witness4_private_key).id; - const witness_id_type witness5_witness_id = create_witness(witness5_id, witness5_private_key).id; - const witness_id_type witness6_witness_id = create_witness(witness6_id, witness6_private_key).id; - const witness_id_type witness7_witness_id = create_witness(witness7_id, witness7_private_key).id; - const witness_id_type witness8_witness_id = create_witness(witness8_id, witness8_private_key).id; - const witness_id_type witness9_witness_id = create_witness(witness9_id, witness9_private_key).id; - const witness_id_type witness10_witness_id = create_witness(witness10_id, witness10_private_key).id; - const witness_id_type witness11_witness_id = create_witness(witness11_id, witness11_private_key).id; + const witness_id_type witness0_witness_id = create_witness(witness0_id, witness0_private_key).get_id(); + const witness_id_type witness1_witness_id = create_witness(witness1_id, witness1_private_key).get_id(); + const witness_id_type witness2_witness_id = create_witness(witness2_id, witness2_private_key).get_id(); + const witness_id_type witness3_witness_id = create_witness(witness3_id, witness3_private_key).get_id(); + const witness_id_type witness4_witness_id = create_witness(witness4_id, witness4_private_key).get_id(); + const witness_id_type witness5_witness_id = create_witness(witness5_id, witness5_private_key).get_id(); + const witness_id_type witness6_witness_id = create_witness(witness6_id, witness6_private_key).get_id(); + const witness_id_type witness7_witness_id = create_witness(witness7_id, witness7_private_key).get_id(); + const witness_id_type witness8_witness_id = create_witness(witness8_id, witness8_private_key).get_id(); + const witness_id_type witness9_witness_id = create_witness(witness9_id, witness9_private_key).get_id(); + const witness_id_type witness10_witness_id = create_witness(witness10_id, witness10_private_key).get_id(); + const witness_id_type witness11_witness_id = create_witness(witness11_id, witness11_private_key).get_id(); // Create a vector with private key of all witnesses, will be used to activate 11 witnesses at a time const vector private_keys = { @@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(bsip36) }; // Create the asset - const asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + const asset_id_type bit_usd_id = create_bitasset("USDBIT").get_id(); // Update the asset to be fed by system witnesses asset_update_operation op; @@ -390,7 +390,7 @@ BOOST_AUTO_TEST_CASE(bsip36_update_feed_producers) ACTORS( (sam)(alice)(paul)(bob) ); // Create the asset - const asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + const asset_id_type bit_usd_id = create_bitasset("USDBIT").get_id(); // Update asset issuer const asset_object &asset_obj = bit_usd_id(db); @@ -482,14 +482,14 @@ BOOST_AUTO_TEST_CASE(bsip36_additional) INVOKE( bsip36 ); // get the stuff needed from invoked test - const asset_id_type bit_usd_id = get_asset("USDBIT").id; + const asset_id_type bit_usd_id = get_asset("USDBIT").get_id(); const asset_id_type core_id = asset_id_type(); - const account_id_type witness5_id= get_account("witness5").id; - const account_id_type witness6_id= get_account("witness6").id; - const account_id_type witness7_id= get_account("witness7").id; - const account_id_type witness8_id= get_account("witness8").id; - const account_id_type witness9_id= get_account("witness9").id; - const account_id_type witness10_id= get_account("witness10").id; + const account_id_type witness5_id= get_account("witness5").get_id(); + const account_id_type witness6_id= get_account("witness6").get_id(); + const account_id_type witness7_id= get_account("witness7").get_id(); + const account_id_type witness8_id= get_account("witness8").get_id(); + const account_id_type witness9_id= get_account("witness9").get_id(); + const account_id_type witness10_id= get_account("witness10").get_id(); set_expiration( db, trx ); diff --git a/tests/tests/swan_tests.cpp b/tests/tests/swan_tests.cpp index 7253f4134d..4711f46e0d 100644 --- a/tests/tests/swan_tests.cpp +++ b/tests/tests/swan_tests.cpp @@ -79,7 +79,7 @@ struct swan_fixture : database_fixture { bitusd_ptr = &db.get(ptx.operation_results[0].get()); } const auto& bitusd = *bitusd_ptr; - _swan = bitusd.id; + _swan = bitusd.get_id(); _back = asset_id_type(); update_feed_producers(swan(), {_feedproducer}); } @@ -99,7 +99,7 @@ struct swan_fixture : database_fixture { set_feed( 1, 2 ); // this sell order is designed to trigger a black swan - limit_order_id_type oid = create_sell_order( borrower2(), swan().amount(1), back().amount(3) )->id; + limit_order_id_type oid = create_sell_order( borrower2(), swan().amount(1), back().amount(3) )->get_id(); FC_ASSERT( get_balance(borrower(), swan()) == amount1 ); FC_ASSERT( get_balance(borrower2(), swan()) == amount2 - 1 ); @@ -228,16 +228,16 @@ BOOST_AUTO_TEST_CASE( black_swan_issue_346 ) { int64_t bal = get_balance( *actor, core ); if( bal < init_balance ) - transfer( committee_account, actor->id, asset(init_balance - bal) ); + transfer( committee_account, actor->get_id(), asset(init_balance - bal) ); else if( bal > init_balance ) - transfer( actor->id, committee_account, asset(bal - init_balance) ); + transfer( actor->get_id(), committee_account, asset(bal - init_balance) ); } }; auto setup_asset = [&]() -> const asset_object& { const asset_object& bitusd = create_bitasset("USDBIT"+fc::to_string(trial)+"X", feeder_id); - update_feed_producers( bitusd, {feeder.id} ); + update_feed_producers( bitusd, {feeder.get_id()} ); BOOST_CHECK( !bitusd.bitasset_data(db).has_settlement() ); trial++; return bitusd; @@ -306,17 +306,17 @@ BOOST_AUTO_TEST_CASE( black_swan_issue_346 ) borrow( borrower, bitusd.amount(100), asset(5000) ); // 2x collat transfer( borrower, seller, bitusd.amount(100) ); // this order is at $0.019, we should not be able to match against it - limit_order_id_type oid_019 = create_sell_order( seller, bitusd.amount(39), core.amount(2000) )->id; + limit_order_id_type oid_019 = create_sell_order( seller, bitusd.amount(39), core.amount(2000) )->get_id(); // this order is at $0.020, we should be able to match against it - limit_order_id_type oid_020 = create_sell_order( seller, bitusd.amount(40), core.amount(2000) )->id; + limit_order_id_type oid_020 = create_sell_order( seller, bitusd.amount(40), core.amount(2000) )->get_id(); set_price( bitusd, bitusd.amount(21) / core.amount(1000) ); // $0.021 // // We attempt to match against $0.019 order and black swan, // and this is intended behavior. See discussion in ticket. // BOOST_CHECK( bitusd.bitasset_data(db).has_settlement() ); - BOOST_CHECK( db.find_object( oid_019 ) != nullptr ); - BOOST_CHECK( db.find_object( oid_020 ) == nullptr ); + BOOST_CHECK( db.find( oid_019 ) != nullptr ); + BOOST_CHECK( db.find( oid_020 ) == nullptr ); } } catch( const fc::exception& e) { @@ -562,7 +562,7 @@ BOOST_AUTO_TEST_CASE( recollateralize ) update_feed_producers(bitcny, {_feedproducer}); price_feed feed; feed.settlement_price = bitcny.amount(1) / asset(1); - publish_feed( bitcny.id, _feedproducer, feed ); + publish_feed( bitcny.get_id(), _feedproducer, feed ); borrow( borrower2(), bitcny.amount(100), asset(1000) ); // can't bid wrong collateral type @@ -828,7 +828,7 @@ BOOST_AUTO_TEST_CASE( revive_empty_with_bid ) set_feed( 1, 2 ); // this sell order is designed to trigger a black swan - limit_order_id_type oid = create_sell_order( borrower2(), swan().amount(1), back().amount(3) )->id; + limit_order_id_type oid = create_sell_order( borrower2(), swan().amount(1), back().amount(3) )->get_id(); BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); cancel_limit_order( oid(db) ); @@ -1016,15 +1016,15 @@ BOOST_AUTO_TEST_CASE( hf2281_asset_permissions_flags_test ) // create a PM with a zero market_fee_percent const asset_object& pm = create_prediction_market( "TESTPM", sam_id, 0, charge_market_fee ); - asset_id_type pm_id = pm.id; + asset_id_type pm_id = pm.get_id(); // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); // create a UIA with a zero market_fee_percent const asset_object& uia = create_user_issued_asset( "TESTUIA", sam_id(db), charge_market_fee ); - asset_id_type uia_id = uia.id; + asset_id_type uia_id = uia.get_id(); // Prepare for asset update asset_update_operation auop; @@ -1168,7 +1168,7 @@ BOOST_AUTO_TEST_CASE( hf2281_asset_owner_permission_test ) // create a MPA with a zero market_fee_percent const asset_object& mpa = create_bitasset( "TESTBIT", sam_id, 0, charge_market_fee ); - asset_id_type mpa_id = mpa.id; + asset_id_type mpa_id = mpa.get_id(); BOOST_CHECK( mpa_id(db).can_bid_collateral() ); @@ -1300,7 +1300,7 @@ BOOST_AUTO_TEST_CASE( disable_collateral_bidding_test ) // Disable collateral bidding asset_update_operation auop; auop.issuer = swan().issuer; - auop.asset_to_update = swan().id; + auop.asset_to_update = swan().get_id(); auop.new_options = swan().options; auop.new_options.flags |= disable_collateral_bidding; trx.operations.clear(); @@ -1428,7 +1428,7 @@ BOOST_AUTO_TEST_CASE( update_bitasset_after_gs ) // unable to update backing asset - asset_id_type uia_id = create_user_issued_asset( "MYUIA" ).id; + asset_id_type uia_id = create_user_issued_asset( "MYUIA" ).get_id(); aubop.new_options.short_backing_asset = uia_id; diff --git a/tests/tests/uia_tests.cpp b/tests/tests/uia_tests.cpp index fff0947c09..320c0bc255 100644 --- a/tests/tests/uia_tests.cpp +++ b/tests/tests/uia_tests.cpp @@ -44,7 +44,7 @@ BOOST_FIXTURE_TEST_SUITE( uia_tests, database_fixture ) BOOST_AUTO_TEST_CASE( create_advanced_uia ) { try { - asset_id_type test_asset_id = db.get_index().get_next_id(); + asset_id_type test_asset_id { db.get_index().get_next_id() }; asset_create_operation creator; creator.issuer = account_id_type(); creator.fee = asset(); @@ -143,7 +143,7 @@ BOOST_AUTO_TEST_CASE( override_transfer_whitelist_test ) { try { ACTORS( (dan)(eric)(sam) ); const asset_object& advanced = create_user_issued_asset( "ADVANCED", sam, white_list | override_authority ); - asset_id_type advanced_id = advanced.id; + asset_id_type advanced_id = advanced.get_id(); BOOST_TEST_MESSAGE( "Issuing 1000 ADVANCED to dan" ); issue_uia( dan, advanced.amount( 1000 ) ); BOOST_TEST_MESSAGE( "Checking dan's balance" ); @@ -223,11 +223,11 @@ BOOST_AUTO_TEST_CASE( override_transfer_whitelist_test ) BOOST_AUTO_TEST_CASE( issue_whitelist_uia ) { try { - account_id_type izzy_id = create_account("izzy").id; + account_id_type izzy_id = create_account("izzy").get_id(); const asset_id_type uia_id = create_user_issued_asset( - "ADVANCED", izzy_id(db), white_list ).id; - account_id_type nathan_id = create_account("nathan").id; - account_id_type vikram_id = create_account("vikram").id; + "ADVANCED", izzy_id(db), white_list ).get_id(); + account_id_type nathan_id = create_account("nathan").get_id(); + account_id_type vikram_id = create_account("vikram").get_id(); trx.clear(); asset_issue_operation op; @@ -306,10 +306,10 @@ BOOST_AUTO_TEST_CASE( transfer_whitelist_uia ) try { INVOKE(issue_whitelist_uia); const asset_object& advanced = get_asset("ADVANCED"); - const asset_id_type uia_id = advanced.id; + const asset_id_type uia_id = advanced.get_id(); const account_object& nathan = get_account("nathan"); const account_object& dan = create_account("dan"); - account_id_type izzy_id = get_account("izzy").id; + account_id_type izzy_id = get_account("izzy").get_id(); upgrade_to_lifetime_member(dan); trx.clear(); @@ -382,10 +382,11 @@ BOOST_AUTO_TEST_CASE( transfer_whitelist_uia ) op.asset_to_update = advanced.id; op.new_options = advanced.options; op.new_options.blacklist_authorities.clear(); - op.new_options.blacklist_authorities.insert(dan.id); + op.new_options.blacklist_authorities.insert(dan.get_id()); trx.operations.back() = op; PUSH_TX( db, trx, ~0 ); - BOOST_CHECK(advanced.options.blacklist_authorities.find(dan.id) != advanced.options.blacklist_authorities.end()); + BOOST_CHECK(advanced.options.blacklist_authorities.find(dan.get_id()) + != advanced.options.blacklist_authorities.end()); } BOOST_TEST_MESSAGE( "Attempting to transfer from dan back to nathan" ); @@ -669,11 +670,14 @@ BOOST_AUTO_TEST_CASE( asset_name_test ) do { if ( (c >= 48 && c <= 57) ) // numbers - BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, false, true, true), "Failed on good ASCII value " + std::to_string(c) ); + BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, false, true, true), + "Failed on good ASCII value " + std::to_string(c) ); else if ( c >= 65 && c <= 90) // letters - BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, true, true, true), "Failed on good ASCII value " + std::to_string(c) ); + BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, true, true, true), + "Failed on good ASCII value " + std::to_string(c) ); else // everything else - BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, false, false, false), "Failed on bad ASCII value " + std::to_string(c) ); + BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, false, false, false), + "Failed on bad ASCII value " + std::to_string(c) ); c++; } while (c != 0); } diff --git a/tests/tests/voting_tests.cpp b/tests/tests/voting_tests.cpp index 019c60fc20..22bde94bec 100644 --- a/tests/tests/voting_tests.cpp +++ b/tests/tests/voting_tests.cpp @@ -117,20 +117,20 @@ BOOST_AUTO_TEST_CASE(put_my_witnesses) upgrade_to_lifetime_member(witness13_id); // Create all the witnesses - const witness_id_type witness0_witness_id = create_witness(witness0_id, witness0_private_key).id; - const witness_id_type witness1_witness_id = create_witness(witness1_id, witness1_private_key).id; - const witness_id_type witness2_witness_id = create_witness(witness2_id, witness2_private_key).id; - const witness_id_type witness3_witness_id = create_witness(witness3_id, witness3_private_key).id; - const witness_id_type witness4_witness_id = create_witness(witness4_id, witness4_private_key).id; - const witness_id_type witness5_witness_id = create_witness(witness5_id, witness5_private_key).id; - const witness_id_type witness6_witness_id = create_witness(witness6_id, witness6_private_key).id; - const witness_id_type witness7_witness_id = create_witness(witness7_id, witness7_private_key).id; - const witness_id_type witness8_witness_id = create_witness(witness8_id, witness8_private_key).id; - const witness_id_type witness9_witness_id = create_witness(witness9_id, witness9_private_key).id; - const witness_id_type witness10_witness_id = create_witness(witness10_id, witness10_private_key).id; - const witness_id_type witness11_witness_id = create_witness(witness11_id, witness11_private_key).id; - const witness_id_type witness12_witness_id = create_witness(witness12_id, witness12_private_key).id; - const witness_id_type witness13_witness_id = create_witness(witness13_id, witness13_private_key).id; + const witness_id_type witness0_witness_id = create_witness(witness0_id, witness0_private_key).get_id(); + const witness_id_type witness1_witness_id = create_witness(witness1_id, witness1_private_key).get_id(); + const witness_id_type witness2_witness_id = create_witness(witness2_id, witness2_private_key).get_id(); + const witness_id_type witness3_witness_id = create_witness(witness3_id, witness3_private_key).get_id(); + const witness_id_type witness4_witness_id = create_witness(witness4_id, witness4_private_key).get_id(); + const witness_id_type witness5_witness_id = create_witness(witness5_id, witness5_private_key).get_id(); + const witness_id_type witness6_witness_id = create_witness(witness6_id, witness6_private_key).get_id(); + const witness_id_type witness7_witness_id = create_witness(witness7_id, witness7_private_key).get_id(); + const witness_id_type witness8_witness_id = create_witness(witness8_id, witness8_private_key).get_id(); + const witness_id_type witness9_witness_id = create_witness(witness9_id, witness9_private_key).get_id(); + const witness_id_type witness10_witness_id = create_witness(witness10_id, witness10_private_key).get_id(); + const witness_id_type witness11_witness_id = create_witness(witness11_id, witness11_private_key).get_id(); + const witness_id_type witness12_witness_id = create_witness(witness12_id, witness12_private_key).get_id(); + const witness_id_type witness13_witness_id = create_witness(witness13_id, witness13_private_key).get_id(); // Create a vector with private key of all witnesses, will be used to activate 9 witnesses at a time const vector private_keys = { @@ -231,7 +231,7 @@ BOOST_AUTO_TEST_CASE(track_votes_witnesses_enabled) INVOKE(put_my_witnesses); - const account_id_type witness1_id= get_account("witness1").id; + const account_id_type witness1_id= get_account("witness1").get_id(); auto witness1_object = db_api1.get_witness_by_account(witness1_id(db).name); BOOST_CHECK_EQUAL(witness1_object->total_votes, 111u); @@ -246,7 +246,7 @@ BOOST_AUTO_TEST_CASE(track_votes_witnesses_disabled) INVOKE(put_my_witnesses); - const account_id_type witness1_id= get_account("witness1").id; + const account_id_type witness1_id= get_account("witness1").get_id(); auto witness1_object = db_api1.get_witness_by_account(witness1_id(db).name); BOOST_CHECK_EQUAL(witness1_object->total_votes, 0u); @@ -289,20 +289,20 @@ BOOST_AUTO_TEST_CASE(put_my_committee_members) upgrade_to_lifetime_member(committee13_id); // Create all the committee - const committee_member_id_type committee0_committee_id = create_committee_member(committee0_id(db)).id; - const committee_member_id_type committee1_committee_id = create_committee_member(committee1_id(db)).id; - const committee_member_id_type committee2_committee_id = create_committee_member(committee2_id(db)).id; - const committee_member_id_type committee3_committee_id = create_committee_member(committee3_id(db)).id; - const committee_member_id_type committee4_committee_id = create_committee_member(committee4_id(db)).id; - const committee_member_id_type committee5_committee_id = create_committee_member(committee5_id(db)).id; - const committee_member_id_type committee6_committee_id = create_committee_member(committee6_id(db)).id; - const committee_member_id_type committee7_committee_id = create_committee_member(committee7_id(db)).id; - const committee_member_id_type committee8_committee_id = create_committee_member(committee8_id(db)).id; - const committee_member_id_type committee9_committee_id = create_committee_member(committee9_id(db)).id; - const committee_member_id_type committee10_committee_id = create_committee_member(committee10_id(db)).id; - const committee_member_id_type committee11_committee_id = create_committee_member(committee11_id(db)).id; - const committee_member_id_type committee12_committee_id = create_committee_member(committee12_id(db)).id; - const committee_member_id_type committee13_committee_id = create_committee_member(committee13_id(db)).id; + const committee_member_id_type committee0_committee_id = create_committee_member(committee0_id(db)).get_id(); + const committee_member_id_type committee1_committee_id = create_committee_member(committee1_id(db)).get_id(); + const committee_member_id_type committee2_committee_id = create_committee_member(committee2_id(db)).get_id(); + const committee_member_id_type committee3_committee_id = create_committee_member(committee3_id(db)).get_id(); + const committee_member_id_type committee4_committee_id = create_committee_member(committee4_id(db)).get_id(); + const committee_member_id_type committee5_committee_id = create_committee_member(committee5_id(db)).get_id(); + const committee_member_id_type committee6_committee_id = create_committee_member(committee6_id(db)).get_id(); + const committee_member_id_type committee7_committee_id = create_committee_member(committee7_id(db)).get_id(); + const committee_member_id_type committee8_committee_id = create_committee_member(committee8_id(db)).get_id(); + const committee_member_id_type committee9_committee_id = create_committee_member(committee9_id(db)).get_id(); + const committee_member_id_type committee10_committee_id = create_committee_member(committee10_id(db)).get_id(); + const committee_member_id_type committee11_committee_id = create_committee_member(committee11_id(db)).get_id(); + const committee_member_id_type committee12_committee_id = create_committee_member(committee12_id(db)).get_id(); + const committee_member_id_type committee13_committee_id = create_committee_member(committee13_id(db)).get_id(); // Create a vector with private key of all committee members, will be used to activate 9 members at a time const vector private_keys = { @@ -410,7 +410,7 @@ BOOST_AUTO_TEST_CASE(track_votes_committee_enabled) INVOKE(put_my_committee_members); - const account_id_type committee1_id= get_account("committee1").id; + const account_id_type committee1_id= get_account("committee1").get_id(); auto committee1_object = db_api1.get_committee_member_by_account(committee1_id(db).name); BOOST_CHECK_EQUAL(committee1_object->total_votes, 111u); @@ -425,7 +425,7 @@ BOOST_AUTO_TEST_CASE(track_votes_committee_disabled) INVOKE(put_my_committee_members); - const account_id_type committee1_id= get_account("committee1").id; + const account_id_type committee1_id= get_account("committee1").get_id(); auto committee1_object = db_api1.get_committee_member_by_account(committee1_id(db).name); BOOST_CHECK_EQUAL(committee1_object->total_votes, 0u); @@ -614,7 +614,7 @@ BOOST_AUTO_TEST_CASE( witness_votes_calculation ) { auto wit_object = db_api1.get_witness_by_account( wit_account_ids[i](db).name ); BOOST_REQUIRE( wit_object.valid() ); - wit_ids.push_back( wit_object->id ); + wit_ids.push_back( wit_object->get_id() ); } generate_blocks( HARDFORK_CORE_2103_TIME - 750 * 86400 ); @@ -878,7 +878,7 @@ BOOST_AUTO_TEST_CASE( committee_votes_calculation ) { auto com_object = db_api1.get_committee_member_by_account( com_account_ids[i](db).name ); BOOST_REQUIRE( com_object.valid() ); - com_ids.push_back( com_object->id ); + com_ids.push_back( com_object->get_id() ); } generate_blocks( HARDFORK_CORE_2103_TIME - 750 * 86400 ); From c761429a93e1d391713287448971651272d64da4 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 18 Oct 2022 01:49:27 +0000 Subject: [PATCH 288/338] Bump ZLIB version --- .github/workflows/build-and-test.win.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.win.yml b/.github/workflows/build-and-test.win.yml index cc2fccc1a0..641cd7c497 100644 --- a/.github/workflows/build-and-test.win.yml +++ b/.github/workflows/build-and-test.win.yml @@ -8,7 +8,7 @@ env: BOOST_DOTTED_VERSION: 1.69.0 CURL_VERSION: 7.67.0 OPENSSL_VERSION: 1.1.1d - ZLIB_VERSION: 1.2.12 + ZLIB_VERSION: 1.2.13 jobs: prepare-mingw64-libs: name: Build required 3rd-party libraries From a7587b56041f35c7ba9c66db0a0452fe797a6f0e Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 18 Oct 2022 15:07:12 +0000 Subject: [PATCH 289/338] Refactor code and fix code smells --- libraries/chain/account_object.cpp | 18 ++++-- libraries/chain/db_balance.cpp | 11 ++-- .../chain/withdraw_permission_evaluator.hpp | 24 +++---- .../chain/withdraw_permission_evaluator.cpp | 16 ++--- libraries/db/include/graphene/db/index.hpp | 52 +++++++-------- libraries/db/include/graphene/db/object.hpp | 8 +-- .../market_history/market_history_plugin.hpp | 63 +++++++++---------- .../market_history/market_history_plugin.cpp | 19 +++--- .../include/graphene/protocol/object_id.hpp | 2 +- .../wallet/include/graphene/wallet/wallet.hpp | 8 +-- libraries/wallet/wallet.cpp | 8 +-- libraries/wallet/wallet_api_impl.cpp | 2 +- libraries/wallet/wallet_api_impl.hpp | 2 +- libraries/wallet/wallet_transfer.cpp | 4 +- 14 files changed, 122 insertions(+), 115 deletions(-) diff --git a/libraries/chain/account_object.cpp b/libraries/chain/account_object.cpp index 6f38c289fb..f031f6a2f1 100644 --- a/libraries/chain/account_object.cpp +++ b/libraries/chain/account_object.cpp @@ -191,7 +191,8 @@ void account_member_index::object_modified(const object& after) { set after_account_members = get_account_members(a); - vector removed; removed.reserve(before_account_members.size()); + vector removed; + removed.reserve(before_account_members.size()); std::set_difference(before_account_members.begin(), before_account_members.end(), after_account_members.begin(), after_account_members.end(), std::inserter(removed, removed.end())); @@ -199,7 +200,8 @@ void account_member_index::object_modified(const object& after) for( auto itr = removed.begin(); itr != removed.end(); ++itr ) account_to_account_memberships[*itr].erase(account_id); - vector added; added.reserve(after_account_members.size()); + vector added; + added.reserve(after_account_members.size()); std::set_difference(after_account_members.begin(), after_account_members.end(), before_account_members.begin(), before_account_members.end(), std::inserter(added, added.end())); @@ -212,7 +214,8 @@ void account_member_index::object_modified(const object& after) { set after_key_members = get_key_members(a); - vector removed; removed.reserve(before_key_members.size()); + vector removed; + removed.reserve(before_key_members.size()); std::set_difference(before_key_members.begin(), before_key_members.end(), after_key_members.begin(), after_key_members.end(), std::inserter(removed, removed.end())); @@ -220,7 +223,8 @@ void account_member_index::object_modified(const object& after) for( auto itr = removed.begin(); itr != removed.end(); ++itr ) account_to_key_memberships[*itr].erase(account_id); - vector added; added.reserve(after_key_members.size()); + vector added; + added.reserve(after_key_members.size()); std::set_difference(after_key_members.begin(), after_key_members.end(), before_key_members.begin(), before_key_members.end(), std::inserter(added, added.end())); @@ -232,7 +236,8 @@ void account_member_index::object_modified(const object& after) { set
after_address_members = get_address_members(a); - vector
removed; removed.reserve(before_address_members.size()); + vector
removed; + removed.reserve(before_address_members.size()); std::set_difference(before_address_members.begin(), before_address_members.end(), after_address_members.begin(), after_address_members.end(), std::inserter(removed, removed.end())); @@ -240,7 +245,8 @@ void account_member_index::object_modified(const object& after) for( auto itr = removed.begin(); itr != removed.end(); ++itr ) account_to_address_memberships[*itr].erase(account_id); - vector
added; added.reserve(after_address_members.size()); + vector
added; + added.reserve(after_address_members.size()); std::set_difference(after_address_members.begin(), after_address_members.end(), before_address_members.begin(), before_address_members.end(), std::inserter(added, added.end())); diff --git a/libraries/chain/db_balance.cpp b/libraries/chain/db_balance.cpp index 990e17e990..75040dd716 100644 --- a/libraries/chain/db_balance.cpp +++ b/libraries/chain/db_balance.cpp @@ -60,7 +60,7 @@ void database::adjust_balance(account_id_type account, asset delta ) auto abo = index.get_account_balance( account, delta.asset_id ); if( !abo ) { - FC_ASSERT( delta.amount > 0, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", + FC_ASSERT( delta.amount > 0, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account(*this).name) ("b",to_pretty_string(asset(0,delta.asset_id))) ("r",to_pretty_string(-delta))); @@ -209,9 +209,12 @@ void database::deposit_cashback(const account_object& acct, share_type amount, b account_id_type acct_id = acct.get_id(); - if( acct_id == GRAPHENE_COMMITTEE_ACCOUNT || acct_id == GRAPHENE_WITNESS_ACCOUNT || - acct_id == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT || acct_id == GRAPHENE_NULL_ACCOUNT || - acct_id == GRAPHENE_TEMP_ACCOUNT ) + // Note: missing 'PROXY_TO_SELF' here + bool is_reserved_account = ( acct_id == GRAPHENE_COMMITTEE_ACCOUNT || acct_id == GRAPHENE_WITNESS_ACCOUNT || + acct_id == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT ); + is_reserved_account = ( is_reserved_account || acct_id == GRAPHENE_NULL_ACCOUNT || + acct_id == GRAPHENE_TEMP_ACCOUNT ); + if( is_reserved_account ) { // The blockchain's accounts do not get cashback; it simply goes to the reserve pool. modify( get_core_dynamic_data(), [amount](asset_dynamic_data_object& d) { diff --git a/libraries/chain/include/graphene/chain/withdraw_permission_evaluator.hpp b/libraries/chain/include/graphene/chain/withdraw_permission_evaluator.hpp index 25bc4b1bc7..920e8468a1 100644 --- a/libraries/chain/include/graphene/chain/withdraw_permission_evaluator.hpp +++ b/libraries/chain/include/graphene/chain/withdraw_permission_evaluator.hpp @@ -30,37 +30,37 @@ namespace graphene { namespace chain { class withdraw_permission_create_evaluator : public evaluator { public: - typedef withdraw_permission_create_operation operation_type; + using operation_type = withdraw_permission_create_operation; - void_result do_evaluate( const operation_type& op ); - object_id_type do_apply( const operation_type& op ); + void_result do_evaluate( const operation_type& op ) const; + object_id_type do_apply( const operation_type& op ) const; }; class withdraw_permission_claim_evaluator : public evaluator { public: - typedef withdraw_permission_claim_operation operation_type; + using operation_type = withdraw_permission_claim_operation; - void_result do_evaluate( const operation_type& op ); - void_result do_apply( const operation_type& op ); + void_result do_evaluate( const operation_type& op ) const; + void_result do_apply( const operation_type& op ) const; }; class withdraw_permission_update_evaluator : public evaluator { public: - typedef withdraw_permission_update_operation operation_type; + using operation_type = withdraw_permission_update_operation; - void_result do_evaluate( const operation_type& op ); - void_result do_apply( const operation_type& op ); + void_result do_evaluate( const operation_type& op ) const; + void_result do_apply( const operation_type& op ) const; }; class withdraw_permission_delete_evaluator : public evaluator { public: - typedef withdraw_permission_delete_operation operation_type; + using operation_type = withdraw_permission_delete_operation; - void_result do_evaluate( const operation_type& op ); - void_result do_apply( const operation_type& op ); + void_result do_evaluate( const operation_type& op ) const; + void_result do_apply( const operation_type& op ) const; }; } } // graphene::chain diff --git a/libraries/chain/withdraw_permission_evaluator.cpp b/libraries/chain/withdraw_permission_evaluator.cpp index eaf8566e56..3daf6dc599 100644 --- a/libraries/chain/withdraw_permission_evaluator.cpp +++ b/libraries/chain/withdraw_permission_evaluator.cpp @@ -31,7 +31,7 @@ namespace graphene { namespace chain { -void_result withdraw_permission_create_evaluator::do_evaluate(const operation_type& op) +void_result withdraw_permission_create_evaluator::do_evaluate(const operation_type& op)const { try { database& d = db(); FC_ASSERT(d.find(op.withdraw_from_account)); @@ -44,7 +44,7 @@ void_result withdraw_permission_create_evaluator::do_evaluate(const operation_ty return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } -object_id_type withdraw_permission_create_evaluator::do_apply(const operation_type& op) +object_id_type withdraw_permission_create_evaluator::do_apply(const operation_type& op)const { try { return db().create([&op](withdraw_permission_object& p) { p.withdraw_from_account = op.withdraw_from_account; @@ -57,7 +57,7 @@ object_id_type withdraw_permission_create_evaluator::do_apply(const operation_ty } FC_CAPTURE_AND_RETHROW( (op) ) } void_result withdraw_permission_claim_evaluator::do_evaluate( - const withdraw_permission_claim_evaluator::operation_type& op) + const withdraw_permission_claim_evaluator::operation_type& op)const { try { const database& d = db(); time_point_sec head_block_time = d.head_block_time(); @@ -93,7 +93,7 @@ void_result withdraw_permission_claim_evaluator::do_evaluate( } FC_CAPTURE_AND_RETHROW( (op) ) } void_result withdraw_permission_claim_evaluator::do_apply( - const withdraw_permission_claim_evaluator::operation_type& op) + const withdraw_permission_claim_evaluator::operation_type& op)const { try { database& d = db(); @@ -114,7 +114,7 @@ void_result withdraw_permission_claim_evaluator::do_apply( } FC_CAPTURE_AND_RETHROW( (op) ) } void_result withdraw_permission_update_evaluator::do_evaluate( - const withdraw_permission_update_evaluator::operation_type& op) + const withdraw_permission_update_evaluator::operation_type& op)const { try { database& d = db(); @@ -130,7 +130,7 @@ void_result withdraw_permission_update_evaluator::do_evaluate( } FC_CAPTURE_AND_RETHROW( (op) ) } void_result withdraw_permission_update_evaluator::do_apply( - const withdraw_permission_update_evaluator::operation_type& op) + const withdraw_permission_update_evaluator::operation_type& op)const { try { database& d = db(); @@ -145,7 +145,7 @@ void_result withdraw_permission_update_evaluator::do_apply( } FC_CAPTURE_AND_RETHROW( (op) ) } void_result withdraw_permission_delete_evaluator::do_evaluate( - const withdraw_permission_delete_evaluator::operation_type& op) + const withdraw_permission_delete_evaluator::operation_type& op)const { try { database& d = db(); @@ -157,7 +157,7 @@ void_result withdraw_permission_delete_evaluator::do_evaluate( } FC_CAPTURE_AND_RETHROW( (op) ) } void_result withdraw_permission_delete_evaluator::do_apply( - const withdraw_permission_delete_evaluator::operation_type& op) + const withdraw_permission_delete_evaluator::operation_type& op)const { try { db().remove(db().get(op.withdrawal_permission)); return void_result(); diff --git a/libraries/db/include/graphene/db/index.hpp b/libraries/db/include/graphene/db/index.hpp index 7e431a5605..6a86c16137 100644 --- a/libraries/db/include/graphene/db/index.hpp +++ b/libraries/db/include/graphene/db/index.hpp @@ -42,7 +42,7 @@ namespace graphene { namespace db { class index_observer { public: - virtual ~index_observer(){} + virtual ~index_observer() = default; /** called just after the object is added */ virtual void on_add( const object& obj ){} /** called just before obj is removed */ @@ -70,7 +70,7 @@ namespace graphene { namespace db { class index { public: - virtual ~index(){} + virtual ~index() = default; virtual uint8_t object_space_id()const = 0; virtual uint8_t object_type_id()const = 0; @@ -139,7 +139,7 @@ namespace graphene { namespace db { class secondary_index { public: - virtual ~secondary_index(){}; + virtual ~secondary_index() = default; virtual void object_inserted( const object& obj ){}; virtual void object_removed( const object& obj ){}; virtual void about_to_modify( const object& before ){}; @@ -154,6 +154,8 @@ namespace graphene { namespace db { public: base_primary_index( object_database& db ):_db(db){} + virtual ~base_primary_index() = default; + /** called just before obj is modified */ void save_undo( const object& obj ); @@ -218,9 +220,7 @@ namespace graphene { namespace db { FC_ASSERT( (1ULL << chunkbits) > MAX_HOLE, "Small chunkbits is inefficient." ); } - virtual ~direct_index(){} - - virtual void object_inserted( const object& obj ) + void object_inserted( const object& obj ) override { uint64_t instance = obj.id.instance(); if( instance == next ) @@ -254,7 +254,7 @@ namespace graphene { namespace db { content[instance >> chunkbits][instance & _mask] = static_cast( &obj ); } - virtual void object_removed( const object& obj ) + void object_removed( const object& obj ) override { FC_ASSERT( nullptr != dynamic_cast(&obj), "Wrong object type!" ); uint64_t instance = obj.id.instance(); @@ -264,12 +264,12 @@ namespace graphene { namespace db { content[instance >> chunkbits][instance & _mask] = nullptr; } - virtual void about_to_modify( const object& before ) + void about_to_modify( const object& before ) override { ids_being_modified.emplace( before.id ); } - virtual void object_modified( const object& after ) + void object_modified( const object& after ) override { FC_ASSERT( ids_being_modified.top() == after.id, "Modification of ID is not supported!"); ids_being_modified.pop(); @@ -312,7 +312,7 @@ namespace graphene { namespace db { class primary_index : public DerivedIndex, public base_primary_index { public: - typedef typename DerivedIndex::object_type object_type; + using object_type = typename DerivedIndex::object_type; primary_index( object_database& db ) :base_primary_index(db),_next_id(object_type::space_id,object_type::type_id,0) @@ -321,18 +321,18 @@ namespace graphene { namespace db { _direct_by_id = add_secondary_index< direct_index< object_type, DirectBits > >(); } - virtual uint8_t object_space_id()const override + uint8_t object_space_id()const override { return object_type::space_id; } - virtual uint8_t object_type_id()const override + uint8_t object_type_id()const override { return object_type::type_id; } - virtual object_id_type get_next_id()const override { return _next_id; } - virtual void use_next_id()override { ++_next_id.number; } - virtual void set_next_id( object_id_type id )override { _next_id = id; } + object_id_type get_next_id()const override { return _next_id; } + void use_next_id()override { ++_next_id.number; } + void set_next_id( object_id_type id )override { _next_id = id; } /** @return the object with id or nullptr if not found */ - virtual const object* find( object_id_type id )const override + const object* find( object_id_type id )const override { if( DirectBits > 0 ) return _direct_by_id->find( id ); @@ -345,7 +345,7 @@ namespace graphene { namespace db { return fc::sha256::hash(desc); } - virtual void open( const fc::path& db )override + void open( const fc::path& db )override { if( !fc::exists( db ) ) return; fc::file_mapping fm( db.generic_string().c_str(), fc::read_only ); @@ -365,7 +365,7 @@ namespace graphene { namespace db { } } - virtual void save( const fc::path& db ) override + void save( const fc::path& db ) override { std::ofstream out( db.generic_string(), std::ofstream::binary | std::ofstream::out | std::ofstream::trunc ); @@ -380,7 +380,7 @@ namespace graphene { namespace db { }); } - virtual const object& load( const std::vector& data )override + const object& load( const std::vector& data )override { const auto& result = DerivedIndex::insert( fc::raw::unpack( data ) ); for( const auto& item : _sindex ) @@ -389,7 +389,7 @@ namespace graphene { namespace db { } - virtual const object& create(const std::function& constructor )override + const object& create(const std::function& constructor )override { const auto& result = DerivedIndex::create( constructor ); for( const auto& item : _sindex ) @@ -398,7 +398,7 @@ namespace graphene { namespace db { return result; } - virtual const object& insert( object&& obj ) override + const object& insert( object&& obj ) override { const auto& result = DerivedIndex::insert( std::move( obj ) ); for( const auto& item : _sindex ) @@ -407,7 +407,7 @@ namespace graphene { namespace db { return result; } - virtual void remove( const object& obj ) override + void remove( const object& obj ) override { for( const auto& item : _sindex ) item->object_removed( obj ); @@ -415,7 +415,7 @@ namespace graphene { namespace db { DerivedIndex::remove(obj); } - virtual void modify( const object& obj, const std::function& m )override + void modify( const object& obj, const std::function& m )override { save_undo( obj ); for( const auto& item : _sindex ) @@ -426,12 +426,12 @@ namespace graphene { namespace db { on_modify( obj ); } - virtual void add_observer( const std::shared_ptr& o ) override + void add_observer( const std::shared_ptr& o ) override { _observers.emplace_back( o ); } - virtual void object_from_variant( const fc::variant& var, object& obj, uint32_t max_depth )const override + void object_from_variant( const fc::variant& var, object& obj, uint32_t max_depth )const override { object_id_type id = obj.id; object_type* result = dynamic_cast( &obj ); @@ -440,7 +440,7 @@ namespace graphene { namespace db { obj.id = id; } - virtual void object_default( object& obj )const override + void object_default( object& obj )const override { object_id_type id = obj.id; object_type* result = dynamic_cast( &obj ); diff --git a/libraries/db/include/graphene/db/object.hpp b/libraries/db/include/graphene/db/object.hpp index 747c0e6b72..bc03841511 100644 --- a/libraries/db/include/graphene/db/object.hpp +++ b/libraries/db/include/graphene/db/object.hpp @@ -89,18 +89,18 @@ namespace graphene { namespace db { { public: using object::object; // constructors - virtual std::unique_ptr clone()const + std::unique_ptr clone()const override { return std::make_unique( *static_cast(this) ); } - virtual void move_from( object& obj ) + void move_from( object& obj ) override { static_cast(*this) = std::move( static_cast(obj) ); } - virtual fc::variant to_variant()const + fc::variant to_variant()const override { return fc::variant( static_cast(*this), MAX_NESTING ); } - virtual std::vector pack()const { return fc::raw::pack( static_cast(*this) ); } + std::vector pack()const override { return fc::raw::pack( static_cast(*this) ); } }; template diff --git a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp index 2020caa5ef..6bdaccf1f3 100644 --- a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp +++ b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp @@ -55,9 +55,10 @@ enum market_history_object_type bucket_object_type = 1, market_ticker_object_type = 2, market_ticker_meta_object_type = 3, - liquidity_pool_history_object_type = 4, - liquidity_pool_ticker_meta_object_type = 5, - liquidity_pool_ticker_object_type = 6 + // LP = liquidity pool + lp_history_object_type = 4, + lp_ticker_meta_object_type = 5, + lp_ticker_object_type = 6 }; struct bucket_key @@ -120,17 +121,17 @@ struct order_history_object : public abstract_object, member< object, object_id_type, &object::id > >, ordered_unique< tag, member< bucket_object, bucket_key, &bucket_object::key > > > -> bucket_object_multi_index_type; +>; struct by_market_time; -typedef multi_index_container< +using order_history_multi_index_type = multi_index_container< order_history_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -186,11 +187,11 @@ typedef multi_index_container< > > > -> order_history_multi_index_type; +>; struct by_market; struct by_volume; -typedef multi_index_container< +using market_ticker_obj_mlti_idx_type = multi_index_container< market_ticker_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -205,16 +206,16 @@ typedef multi_index_container< > > > -> market_ticker_object_multi_index_type; +>; -typedef generic_index bucket_index; -typedef generic_index history_index; -typedef generic_index market_ticker_index; +using bucket_index = generic_index; +using history_index = generic_index; +using market_ticker_index = generic_index; /** Stores operation histories related to liquidity pools */ struct liquidity_pool_history_object : public abstract_object + MARKET_HISTORY_SPACE_ID, lp_history_object_type> { liquidity_pool_id_type pool; uint64_t sequence = 0; @@ -228,7 +229,7 @@ struct by_pool_time; struct by_pool_op_type_seq; struct by_pool_op_type_time; -typedef multi_index_container< +using lp_history_multi_index_type = multi_index_container< liquidity_pool_history_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, @@ -281,25 +282,24 @@ typedef multi_index_container< > > > -> liquidity_pool_history_multi_index_type; +>; -typedef generic_index< liquidity_pool_history_object, - liquidity_pool_history_multi_index_type > liquidity_pool_history_index; +using liquidity_pool_history_index = generic_index< liquidity_pool_history_object, lp_history_multi_index_type >; /// Stores meta data for liquidity pool tickers -struct liquidity_pool_ticker_meta_object : public abstract_object +struct lp_ticker_meta_object : public abstract_object { object_id_type rolling_min_lp_his_id; bool skip_min_lp_his_id = false; }; -using liquidity_pool_ticker_id_type = object_id; +using liquidity_pool_ticker_id_type = object_id; /// Stores ticker data for liquidity pools struct liquidity_pool_ticker_object : public abstract_object + MARKET_HISTORY_SPACE_ID, lp_ticker_object_type> { uint32_t _24h_deposit_count = 0; fc::uint128_t _24h_deposit_amount_a = 0; @@ -341,15 +341,14 @@ struct liquidity_pool_ticker_object : public abstract_object, member< object, object_id_type, &object::id > > > -> liquidity_pool_ticker_multi_index_type; +>; -typedef generic_index< liquidity_pool_ticker_object, - liquidity_pool_ticker_multi_index_type > liquidity_pool_ticker_index; +using liquidity_pool_ticker_index = generic_index< liquidity_pool_ticker_object, lp_ticker_multi_index_type >; namespace detail @@ -358,9 +357,9 @@ namespace detail } /** - * The market history plugin can be configured to track any number of intervals via its configuration. Once per block it - * will scan the virtual operations and look for fill_order_operations and then adjust the appropriate bucket objects for - * each fill order. + * The market history plugin can be configured to track any number of intervals via its configuration. + * Once per block it will scan the virtual operations and look for fill_order_operations and then adjust + * the appropriate bucket objects for each fill order. */ class market_history_plugin : public graphene::app::plugin { @@ -409,7 +408,7 @@ FC_REFLECT_DERIVED( graphene::market_history::market_ticker_meta_object, (graphe (rolling_min_order_his_id)(skip_min_order_his_id) ) FC_REFLECT_DERIVED( graphene::market_history::liquidity_pool_history_object, (graphene::db::object), (pool)(sequence)(time)(op_type)(op) ) -FC_REFLECT_DERIVED( graphene::market_history::liquidity_pool_ticker_meta_object, (graphene::db::object), +FC_REFLECT_DERIVED( graphene::market_history::lp_ticker_meta_object, (graphene::db::object), (rolling_min_lp_his_id)(skip_min_lp_his_id) ) FC_REFLECT_DERIVED( graphene::market_history::liquidity_pool_ticker_object, (graphene::db::object), (_24h_deposit_count) diff --git a/libraries/plugins/market_history/market_history_plugin.cpp b/libraries/plugins/market_history/market_history_plugin.cpp index 4aee6da8ff..382ed4fc62 100644 --- a/libraries/plugins/market_history/market_history_plugin.cpp +++ b/libraries/plugins/market_history/market_history_plugin.cpp @@ -53,7 +53,7 @@ class market_history_plugin_impl /// process all operations related to liquidity pools void update_liquidity_pool_histories( time_point_sec time, const operation_history_object& oho, - const liquidity_pool_ticker_meta_object*& lp_meta ); + const lp_ticker_meta_object*& lp_meta ); graphene::chain::database& database() { @@ -302,8 +302,8 @@ void market_history_plugin_impl::update_market_histories( const signed_block& b if( meta_idx.size() > 0 ) _meta = &( *meta_idx.begin() ); - const liquidity_pool_ticker_meta_object* _lp_meta = nullptr; - const auto& lp_meta_idx = db.get_index_type>(); + const lp_ticker_meta_object* _lp_meta = nullptr; + const auto& lp_meta_idx = db.get_index_type>(); if( lp_meta_idx.size() > 0 ) _lp_meta = &( *lp_meta_idx.begin() ); @@ -475,7 +475,7 @@ void market_history_plugin_impl::update_market_histories( const signed_block& b { if( history_itr->id != _lp_meta->rolling_min_lp_his_id ) // if rolled out some { - db.modify( *_lp_meta, [history_itr]( liquidity_pool_ticker_meta_object& mtm ) { + db.modify( *_lp_meta, [history_itr]( lp_ticker_meta_object& mtm ) { mtm.rolling_min_lp_his_id = history_itr->id; mtm.skip_min_lp_his_id = false; }); @@ -486,7 +486,7 @@ void market_history_plugin_impl::update_market_histories( const signed_block& b if( !_lp_meta->skip_min_lp_his_id || last_min_his_id != _lp_meta->rolling_min_lp_his_id ) // if rolled out some { - db.modify( *_lp_meta, [last_min_his_id]( liquidity_pool_ticker_meta_object& mtm ) { + db.modify( *_lp_meta, [last_min_his_id]( lp_ticker_meta_object& mtm ) { mtm.rolling_min_lp_his_id = last_min_his_id; mtm.skip_min_lp_his_id = true; }); @@ -530,7 +530,7 @@ struct get_liquidity_pool_id_visitor void market_history_plugin_impl::update_liquidity_pool_histories( time_point_sec time, const operation_history_object& oho, - const liquidity_pool_ticker_meta_object*& lp_meta ) + const lp_ticker_meta_object*& lp_meta ) { try { optional pool; @@ -574,10 +574,9 @@ void market_history_plugin_impl::update_liquidity_pool_histories( // save a reference to the ticker meta object if( lp_meta == nullptr ) { - const auto& lp_meta_idx = db.get_index_type>(); + const auto& lp_meta_idx = db.get_index_type>(); if( lp_meta_idx.size() == 0 ) - lp_meta = &db.create( [&new_his_obj]( - liquidity_pool_ticker_meta_object& lptm ) { + lp_meta = &db.create( [&new_his_obj]( lp_ticker_meta_object& lptm ) { lptm.rolling_min_lp_his_id = new_his_obj.id; lptm.skip_min_lp_his_id = false; }); @@ -775,7 +774,7 @@ void market_history_plugin::plugin_initialize(const boost::program_options::vari database().add_index< primary_index< simple_index< market_ticker_meta_object > > >(); database().add_index< primary_index< liquidity_pool_history_index > >(); - database().add_index< primary_index< simple_index< liquidity_pool_ticker_meta_object > > >(); + database().add_index< primary_index< simple_index< lp_ticker_meta_object > > >(); database().add_index< primary_index< liquidity_pool_ticker_index, 8 > >(); // 256 pools per chunk if( options.count( "bucket-size" ) > 0 ) diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index 417e400d39..ccd0305680 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -235,7 +235,7 @@ struct member_name, 0> { static constexpr const cha FC_ASSERT( space_id <= vo.one_byte_mask, "space overflow" ); auto type_id = fc::to_uint64( s.substr( first_dot+1, (second_dot-first_dot)-1 ) ); FC_ASSERT( type_id <= vo.one_byte_mask, "type overflow"); - vo.reset( space_id, type_id, instance ); + vo.reset( static_cast(space_id), static_cast(type_id), instance ); } FC_CAPTURE_AND_RETHROW( (var) ) } template void to_variant( const graphene::db::object_id& var, fc::variant& vo, uint32_t max_depth = 1 ) diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index 14a50e9d4e..1cd1b1976d 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -1062,7 +1062,7 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction canceling the order */ - signed_transaction cancel_order(const limit_order_id_type& order_id, bool broadcast = false); + signed_transaction cancel_order(const limit_order_id_type& order_id, bool broadcast = false) const; /** Creates a new user-issued or market-issued asset. * @@ -1480,7 +1480,7 @@ class wallet_api signed_transaction htlc_create( const string& source, const string& destination, const string& amount, const string& asset_symbol_or_id, const string& hash_algorithm, const string& preimage_hash, uint32_t preimage_size, - uint32_t claim_period_seconds, const string& memo, bool broadcast = false); + uint32_t claim_period_seconds, const string& memo, bool broadcast = false) const; /**** * Update a hashed time lock contract @@ -1491,7 +1491,7 @@ class wallet_api * @return the signed transaction */ signed_transaction htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, const std::string& preimage, - bool broadcast = false ); + bool broadcast = false ) const; /***** * Increase the timelock on an existing HTLC @@ -1503,7 +1503,7 @@ class wallet_api * @return the signed transaction */ signed_transaction htlc_extend( const htlc_id_type& htlc_id, const string& issuer, uint32_t seconds_to_add, - bool broadcast = false); + bool broadcast = false) const; /** * Get information about a vesting balance object or vesting balance objects owned by an account. diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 42d7cdca6d..0067bae1be 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -202,7 +202,7 @@ uint64_t wallet_api::get_asset_count()const signed_transaction wallet_api::htlc_create( const string& source, const string& destination, const string& amount, const string& asset_symbol, const string& hash_algorithm, const string& preimage_hash, uint32_t preimage_size, - uint32_t claim_period_seconds, const string& memo, bool broadcast ) + uint32_t claim_period_seconds, const string& memo, bool broadcast ) const { return my->htlc_create(source, destination, amount, asset_symbol, hash_algorithm, preimage_hash, preimage_size, claim_period_seconds, memo, broadcast); @@ -265,14 +265,14 @@ fc::optional wallet_api::get_htlc(const htlc_id_type& htlc_id) cons } signed_transaction wallet_api::htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, - const string& preimage, bool broadcast) + const string& preimage, bool broadcast ) const { return my->htlc_redeem(htlc_id, issuer, std::vector(preimage.begin(), preimage.end()), broadcast); } signed_transaction wallet_api::htlc_extend( const htlc_id_type& htlc_id, const string& issuer, - uint32_t seconds_to_add, bool broadcast) + uint32_t seconds_to_add, bool broadcast ) const { return my->htlc_extend(htlc_id, issuer, seconds_to_add, broadcast); } @@ -1328,7 +1328,7 @@ signed_transaction wallet_api::borrow_asset_ext( string seller_name, string amou amount_of_collateral, extensions, broadcast); } -signed_transaction wallet_api::cancel_order(const limit_order_id_type& order_id, bool broadcast) +signed_transaction wallet_api::cancel_order(const limit_order_id_type& order_id, bool broadcast) const { FC_ASSERT(!is_locked()); return my->cancel_order(order_id, broadcast); diff --git a/libraries/wallet/wallet_api_impl.cpp b/libraries/wallet/wallet_api_impl.cpp index aadb2da8ce..9c7268be43 100644 --- a/libraries/wallet/wallet_api_impl.cpp +++ b/libraries/wallet/wallet_api_impl.cpp @@ -188,7 +188,7 @@ namespace graphene { namespace wallet { namespace detail { fc::async([this]{resync();}, "Resync after block"); } - void wallet_api_impl::set_operation_fees( signed_transaction& tx, const fee_schedule& s ) + void wallet_api_impl::set_operation_fees( signed_transaction& tx, const fee_schedule& s ) const { for( auto& op : tx.operations ) s.set_fee(op); diff --git a/libraries/wallet/wallet_api_impl.hpp b/libraries/wallet/wallet_api_impl.hpp index 7e53071869..709b9f1a90 100644 --- a/libraries/wallet/wallet_api_impl.hpp +++ b/libraries/wallet/wallet_api_impl.hpp @@ -137,7 +137,7 @@ class wallet_api_impl * @param tx the transaction * @param s the fee schedule */ - void set_operation_fees( signed_transaction& tx, const fee_schedule& s ); + void set_operation_fees( signed_transaction& tx, const fee_schedule& s ) const; /*** * @brief return basic info about the chain diff --git a/libraries/wallet/wallet_transfer.cpp b/libraries/wallet/wallet_transfer.cpp index c157b29433..a18890624c 100644 --- a/libraries/wallet/wallet_transfer.cpp +++ b/libraries/wallet/wallet_transfer.cpp @@ -148,8 +148,8 @@ namespace graphene { namespace wallet { namespace detail { } FC_CAPTURE_AND_RETHROW( (htlc_id)(issuer)(preimage)(broadcast) ) } - signed_transaction wallet_api_impl::htlc_extend ( const htlc_id_type& htlc_id, const string& issuer, - uint32_t seconds_to_add, bool broadcast) + signed_transaction wallet_api_impl::htlc_extend( const htlc_id_type& htlc_id, const string& issuer, + uint32_t seconds_to_add, bool broadcast ) { try { From ddfbb6d305607f9f60263620afd1a2ac60e5c81c Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 19 Oct 2022 20:37:05 +0000 Subject: [PATCH 290/338] Fix code smells --- libraries/chain/db_init.cpp | 3 ++- .../chain/include/graphene/chain/database.hpp | 8 ++++++-- libraries/db/include/graphene/db/index.hpp | 6 +++--- .../db/include/graphene/db/object_database.hpp | 16 +++++++++++----- .../db/include/graphene/db/undo_database.hpp | 7 +++++-- libraries/db/object_database.cpp | 4 +--- .../api_helper_indexes/api_helper_indexes.cpp | 5 ++--- 7 files changed, 30 insertions(+), 19 deletions(-) diff --git a/libraries/chain/db_init.cpp b/libraries/chain/db_init.cpp index 5e2a459589..527902aa52 100644 --- a/libraries/chain/db_init.cpp +++ b/libraries/chain/db_init.cpp @@ -77,7 +77,8 @@ namespace graphene { namespace chain { void database::initialize_evaluators() { - _operation_evaluators.resize(255); + constexpr size_t max_num_of_evaluators = 255; + _operation_evaluators.resize(max_num_of_evaluators); register_evaluator(); register_evaluator(); register_evaluator(); diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index b953f4fdba..d143b2e39b 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -234,8 +234,12 @@ namespace graphene { namespace chain { template void register_evaluator() { - _operation_evaluators[operation::tag::value] - = std::make_unique>(); + const auto op_type = operation::tag::value; + FC_ASSERT( op_type >= 0, "Negative operation type" ); + FC_ASSERT( op_type < _operation_evaluators.size(), + "The operation type (${a}) must be smaller than the size of _operation_evaluators (${b})", + ("a", op_type)("b", _operation_evaluators.size()) ); + _operation_evaluators[op_type] = std::make_unique>(); } ///@} diff --git a/libraries/db/include/graphene/db/index.hpp b/libraries/db/include/graphene/db/index.hpp index 6a86c16137..95437c6a55 100644 --- a/libraries/db/include/graphene/db/index.hpp +++ b/libraries/db/include/graphene/db/index.hpp @@ -152,7 +152,7 @@ namespace graphene { namespace db { class base_primary_index { public: - base_primary_index( object_database& db ):_db(db){} + explicit base_primary_index( object_database& db ):_db(db){} virtual ~base_primary_index() = default; @@ -314,7 +314,7 @@ namespace graphene { namespace db { public: using object_type = typename DerivedIndex::object_type; - primary_index( object_database& db ) + explicit primary_index( object_database& db ) :base_primary_index(db),_next_id(object_type::space_id,object_type::type_id,0) { if( DirectBits > 0 ) @@ -341,7 +341,7 @@ namespace graphene { namespace db { fc::sha256 get_object_version()const { - std::string desc = "1.0";//get_type_description(); + std::string desc = "1.0"; return fc::sha256::hash(desc); } diff --git a/libraries/db/include/graphene/db/object_database.hpp b/libraries/db/include/graphene/db/object_database.hpp index 2ab8bad8e4..ea25c18059 100644 --- a/libraries/db/include/graphene/db/object_database.hpp +++ b/libraries/db/include/graphene/db/object_database.hpp @@ -40,9 +40,15 @@ namespace graphene { namespace db { { public: object_database(); - ~object_database(); + virtual ~object_database() = default; - void reset_indexes() { _index.clear(); _index.resize(255); } + static constexpr uint8_t _index_size = 255; + + void reset_indexes() + { + _index.clear(); + _index.resize(_index_size); + } void open(const fc::path& data_dir ); @@ -137,9 +143,9 @@ namespace graphene { namespace db { template IndexType* add_index() { - typedef typename IndexType::object_type ObjectType; - if( _index[ObjectType::space_id].size() <= ObjectType::type_id ) - _index[ObjectType::space_id].resize( 255 ); + using ObjectType = typename IndexType::object_type; + if( _index[ObjectType::space_id].size() <= ObjectType::type_id ) + _index[ObjectType::space_id].resize( _index_size ); assert(!_index[ObjectType::space_id][ObjectType::type_id]); std::unique_ptr indexptr( std::make_unique(*this) ); _index[ObjectType::space_id][ObjectType::type_id] = std::move(indexptr); diff --git a/libraries/db/include/graphene/db/undo_database.hpp b/libraries/db/include/graphene/db/undo_database.hpp index 5ca4cd3838..ed9a5f3e2e 100644 --- a/libraries/db/include/graphene/db/undo_database.hpp +++ b/libraries/db/include/graphene/db/undo_database.hpp @@ -47,7 +47,7 @@ namespace graphene { namespace db { class undo_database { public: - undo_database( object_database& db ):_db(db){} + explicit undo_database( object_database& db ):_db(db){} class session { @@ -73,7 +73,10 @@ namespace graphene { namespace db { private: friend class undo_database; - session(undo_database& db, bool disable_on_exit = false): _db(db),_disable_on_exit(disable_on_exit) {} + + explicit session(undo_database& db, bool disable_on_exit = false) + : _db(db),_disable_on_exit(disable_on_exit) {} + undo_database& _db; bool _apply_undo = true; bool _disable_on_exit = false; diff --git a/libraries/db/object_database.cpp b/libraries/db/object_database.cpp index 47c664345a..25ffcbe54f 100644 --- a/libraries/db/object_database.cpp +++ b/libraries/db/object_database.cpp @@ -32,12 +32,10 @@ namespace graphene { namespace db { object_database::object_database() :_undo_db(*this) { - _index.resize(255); + _index.resize(_index_size); _undo_db.enable(); } -object_database::~object_database(){} - void object_database::close() { } diff --git a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp index ca02447b02..f71796b6a8 100644 --- a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp +++ b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp @@ -226,12 +226,11 @@ void api_helper_indexes::refresh_next_ids() // Assuming that all indexes have been created when processing the first block, // for better performance, only do this twice, one on plugin startup, the other on the first block. - constexpr uint8_t max = 255; size_t count = 0; size_t failed_count = 0; - for( uint8_t space = 0; space < max; ++space ) + for( uint8_t space = 0; space < db._index_size; ++space ) { - for( uint8_t type = 0; type < max; ++type ) + for( uint8_t type = 0; type < db._index_size; ++type ) { try { From b1a89b973f62198ee0e783ad7e5cea8cf4843c2f Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 19 Oct 2022 22:05:23 +0000 Subject: [PATCH 291/338] Remove a redundant function and refactor some code Remove wallet_api_impl::account_id_to_string(account_id_type id) --- libraries/wallet/wallet_account.cpp | 10 +--------- libraries/wallet/wallet_api_impl.cpp | 4 ++-- libraries/wallet/wallet_api_impl.hpp | 2 -- libraries/wallet/wallet_voting.cpp | 4 ++-- 4 files changed, 5 insertions(+), 15 deletions(-) diff --git a/libraries/wallet/wallet_account.cpp b/libraries/wallet/wallet_account.cpp index cd17b08bec..4b8f123f20 100644 --- a/libraries/wallet/wallet_account.cpp +++ b/libraries/wallet/wallet_account.cpp @@ -35,14 +35,6 @@ namespace graphene { namespace wallet { namespace detail { - std::string wallet_api_impl::account_id_to_string(account_id_type id) const - { - std::string account_id = fc::to_string(id.space_id) - + "." + fc::to_string(id.type_id) - + "." + fc::to_string(id.instance.value); - return account_id; - } - signed_transaction wallet_api_impl::register_account(string name, public_key_type owner, public_key_type active, string registrar_account, string referrer_account, uint32_t referrer_percent, bool broadcast ) @@ -194,7 +186,7 @@ namespace graphene { namespace wallet { namespace detail { account_object wallet_api_impl::get_account(account_id_type id) const { - std::string account_id = account_id_to_string(id); + auto account_id = std::string(id); auto rec = _remote_db->get_accounts({account_id}, {}).front(); FC_ASSERT(rec); diff --git a/libraries/wallet/wallet_api_impl.cpp b/libraries/wallet/wallet_api_impl.cpp index 9c7268be43..6350ae3215 100644 --- a/libraries/wallet/wallet_api_impl.cpp +++ b/libraries/wallet/wallet_api_impl.cpp @@ -341,7 +341,7 @@ namespace graphene { namespace wallet { namespace detail { for( const fc::optional& optional_account : owner_account_objects ) if (optional_account) { - std::string account_id = account_id_to_string(optional_account->get_id()); + auto account_id = std::string(optional_account->id); fc::optional witness_obj = _remote_db->get_witness_by_account(account_id); if (witness_obj) claim_registered_witness(optional_account->name); @@ -386,7 +386,7 @@ namespace graphene { namespace wallet { namespace detail { { assert( it != _wallet.my_accounts.end() ); old_accounts.push_back( *it ); - std::string account_id = account_id_to_string(old_accounts.back().get_id()); + auto account_id = std::string(old_accounts.back().id); account_ids_to_send.push_back( account_id ); ++it; } diff --git a/libraries/wallet/wallet_api_impl.hpp b/libraries/wallet/wallet_api_impl.hpp index 709b9f1a90..e9e5810361 100644 --- a/libraries/wallet/wallet_api_impl.hpp +++ b/libraries/wallet/wallet_api_impl.hpp @@ -425,8 +425,6 @@ class wallet_api_impl static_variant_map _operation_which_map = create_static_variant_map< operation >(); private: - std::string account_id_to_string(account_id_type id) const; - static htlc_hash do_hash( const string& algorithm, const std::string& hash ); void enable_umask_protection(); diff --git a/libraries/wallet/wallet_voting.cpp b/libraries/wallet/wallet_voting.cpp index a357d02fe6..3d30af356f 100644 --- a/libraries/wallet/wallet_voting.cpp +++ b/libraries/wallet/wallet_voting.cpp @@ -140,7 +140,7 @@ namespace graphene { namespace wallet { namespace detail { // then maybe it's the owner account try { - std::string owner_account_id = account_id_to_string(get_account_id(owner_account)); + auto owner_account_id = std::string(get_account_id(owner_account)); fc::optional witness = _remote_db->get_witness_by_account(owner_account_id); if (witness) return *witness; @@ -208,7 +208,7 @@ namespace graphene { namespace wallet { namespace detail { witness_create_op.block_signing_key = witness_public_key; witness_create_op.url = url; - if (_remote_db->get_witness_by_account(account_id_to_string(witness_create_op.witness_account))) + if (_remote_db->get_witness_by_account(std::string(witness_create_op.witness_account))) FC_THROW("Account ${owner_account} is already a witness", ("owner_account", owner_account)); signed_transaction tx; From 534529b12374bb47dbda4184a83cb5881e33b1ab Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 19 Oct 2022 22:27:58 +0000 Subject: [PATCH 292/338] Add safeguard code to add_index() --- .../db/include/graphene/db/object_database.hpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/libraries/db/include/graphene/db/object_database.hpp b/libraries/db/include/graphene/db/object_database.hpp index ea25c18059..8596ba3859 100644 --- a/libraries/db/include/graphene/db/object_database.hpp +++ b/libraries/db/include/graphene/db/object_database.hpp @@ -144,12 +144,15 @@ namespace graphene { namespace db { IndexType* add_index() { using ObjectType = typename IndexType::object_type; - if( _index[ObjectType::space_id].size() <= ObjectType::type_id ) - _index[ObjectType::space_id].resize( _index_size ); - assert(!_index[ObjectType::space_id][ObjectType::type_id]); - std::unique_ptr indexptr( std::make_unique(*this) ); - _index[ObjectType::space_id][ObjectType::type_id] = std::move(indexptr); - return static_cast(_index[ObjectType::space_id][ObjectType::type_id].get()); + const auto space_id = ObjectType::space_id; + const auto type_id = ObjectType::type_id; + FC_ASSERT( space_id < _index.size(), "Space ID ${s} overflow", ("s",space_id) ); + if( _index[space_id].size() <= type_id ) + _index[space_id].resize( _index_size ); + FC_ASSERT( type_id < _index[space_id].size(), "Type ID ${t} overflow", ("t",type_id) ); + FC_ASSERT( !_index[space_id][type_id], "Index ${s}.${t} already exists", ("s",space_id)("t",type_id) ); + _index[space_id][type_id] = std::make_unique(*this); + return static_cast(_index[space_id][type_id].get()); } template From ec587e8bc447324eb60ce9a2c768fe20ca662426 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 20 Oct 2022 14:37:13 +0000 Subject: [PATCH 293/338] Skip plugins in app_test --- tests/app/main.cpp | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index 9da9b1449f..ade829b11d 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -22,18 +22,12 @@ * THE SOFTWARE. */ #include -#include #include #include #include -#include -#include -#include -#include - #include #include #include @@ -240,10 +234,6 @@ BOOST_AUTO_TEST_CASE( three_node_network ) auto genesis_file = create_genesis_file(app_dir); graphene::app::application app1; - app1.register_plugin< graphene::account_history::account_history_plugin>(); - app1.register_plugin< graphene::market_history::market_history_plugin >(); - app1.register_plugin< graphene::witness_plugin::witness_plugin >(); - app1.register_plugin< graphene::grouped_orders::grouped_orders_plugin>(); auto sharable_cfg = std::make_shared(); auto& cfg = *sharable_cfg; fc::set_option( cfg, "p2p-endpoint", app1_p2p_endpoint_str ); @@ -265,10 +255,6 @@ BOOST_AUTO_TEST_CASE( three_node_network ) fc::temp_directory app2_dir( graphene::utilities::temp_directory_path() ); graphene::app::application app2; - app2.register_plugin(); - app2.register_plugin< graphene::market_history::market_history_plugin >(); - app2.register_plugin< graphene::witness_plugin::witness_plugin >(); - app2.register_plugin< graphene::grouped_orders::grouped_orders_plugin>(); auto sharable_cfg2 = std::make_shared(); auto& cfg2 = *sharable_cfg2; fc::set_option( cfg2, "genesis-json", genesis_file ); From 9c5aa1318611dc3a4dadbc7245e48004f52ec879 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 20 Oct 2022 14:58:25 +0000 Subject: [PATCH 294/338] Update CMake library linking order --- libraries/app/CMakeLists.txt | 4 ++-- libraries/chain/CMakeLists.txt | 2 +- libraries/net/CMakeLists.txt | 2 +- libraries/plugins/account_history/CMakeLists.txt | 2 +- libraries/plugins/api_helper_indexes/CMakeLists.txt | 2 +- libraries/plugins/custom_operations/CMakeLists.txt | 2 +- libraries/plugins/debug_witness/CMakeLists.txt | 2 +- libraries/plugins/delayed_node/CMakeLists.txt | 2 +- libraries/plugins/elasticsearch/CMakeLists.txt | 2 +- libraries/plugins/es_objects/CMakeLists.txt | 2 +- libraries/plugins/grouped_orders/CMakeLists.txt | 2 +- libraries/plugins/market_history/CMakeLists.txt | 2 +- libraries/plugins/snapshot/CMakeLists.txt | 2 +- libraries/plugins/template_plugin/CMakeLists.txt | 2 +- libraries/plugins/witness/CMakeLists.txt | 2 +- libraries/wallet/CMakeLists.txt | 3 ++- tests/CMakeLists.txt | 10 +++++----- 17 files changed, 23 insertions(+), 22 deletions(-) diff --git a/libraries/app/CMakeLists.txt b/libraries/app/CMakeLists.txt index 43a2a55254..a4215ea500 100644 --- a/libraries/app/CMakeLists.txt +++ b/libraries/app/CMakeLists.txt @@ -16,8 +16,8 @@ add_library( graphene_app # need to link graphene_debug_witness because plugins aren't sufficiently isolated #246 target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_elasticsearch graphene_grouped_orders - graphene_api_helper_indexes graphene_custom_operations - graphene_chain fc graphene_db graphene_net graphene_utilities graphene_debug_witness ) + graphene_api_helper_indexes graphene_custom_operations graphene_debug_witness + graphene_chain graphene_net graphene_utilities fc ) target_include_directories( graphene_app PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../egenesis/include" ) diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index c13440f5d1..e15a02a032 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -77,7 +77,7 @@ add_library( graphene_chain ) add_dependencies( graphene_chain build_hardfork_hpp ) -target_link_libraries( graphene_chain fc graphene_db graphene_protocol ) +target_link_libraries( graphene_chain graphene_db graphene_protocol fc ) target_include_directories( graphene_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" ) diff --git a/libraries/net/CMakeLists.txt b/libraries/net/CMakeLists.txt index b533e61a35..d8b55288f9 100644 --- a/libraries/net/CMakeLists.txt +++ b/libraries/net/CMakeLists.txt @@ -12,7 +12,7 @@ set(SOURCES node.cpp add_library( graphene_net ${SOURCES} ${HEADERS} ) target_link_libraries( graphene_net - PUBLIC fc graphene_db graphene_protocol ) + PUBLIC graphene_db graphene_protocol fc ) target_include_directories( graphene_net PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" PRIVATE "${CMAKE_SOURCE_DIR}/libraries/chain/include" diff --git a/libraries/plugins/account_history/CMakeLists.txt b/libraries/plugins/account_history/CMakeLists.txt index 4af81abb12..69509ed528 100644 --- a/libraries/plugins/account_history/CMakeLists.txt +++ b/libraries/plugins/account_history/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_account_history account_history_plugin.cpp ) -target_link_libraries( graphene_account_history graphene_chain graphene_app ) +target_link_libraries( graphene_account_history graphene_app graphene_chain ) target_include_directories( graphene_account_history PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/api_helper_indexes/CMakeLists.txt b/libraries/plugins/api_helper_indexes/CMakeLists.txt index 26c1f16699..648e93a8fe 100644 --- a/libraries/plugins/api_helper_indexes/CMakeLists.txt +++ b/libraries/plugins/api_helper_indexes/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_api_helper_indexes api_helper_indexes.cpp ) -target_link_libraries( graphene_api_helper_indexes graphene_chain graphene_app ) +target_link_libraries( graphene_api_helper_indexes graphene_app graphene_chain ) target_include_directories( graphene_api_helper_indexes PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/custom_operations/CMakeLists.txt b/libraries/plugins/custom_operations/CMakeLists.txt index 4202f740c8..246c2ae577 100644 --- a/libraries/plugins/custom_operations/CMakeLists.txt +++ b/libraries/plugins/custom_operations/CMakeLists.txt @@ -6,7 +6,7 @@ add_library( graphene_custom_operations custom_evaluators.cpp ) -target_link_libraries( graphene_custom_operations graphene_chain graphene_app ) +target_link_libraries( graphene_custom_operations graphene_app graphene_chain ) target_include_directories( graphene_custom_operations PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/debug_witness/CMakeLists.txt b/libraries/plugins/debug_witness/CMakeLists.txt index c512066981..a8f53bc74e 100644 --- a/libraries/plugins/debug_witness/CMakeLists.txt +++ b/libraries/plugins/debug_witness/CMakeLists.txt @@ -5,7 +5,7 @@ add_library( graphene_debug_witness debug_witness.cpp ) -target_link_libraries( graphene_debug_witness graphene_chain graphene_app ) +target_link_libraries( graphene_debug_witness graphene_app graphene_chain ) target_include_directories( graphene_debug_witness PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/delayed_node/CMakeLists.txt b/libraries/plugins/delayed_node/CMakeLists.txt index 63dd73e53c..2dfce0541f 100644 --- a/libraries/plugins/delayed_node/CMakeLists.txt +++ b/libraries/plugins/delayed_node/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_delayed_node delayed_node_plugin.cpp ) -target_link_libraries( graphene_delayed_node graphene_chain graphene_app ) +target_link_libraries( graphene_delayed_node graphene_app graphene_chain ) target_include_directories( graphene_delayed_node PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/elasticsearch/CMakeLists.txt b/libraries/plugins/elasticsearch/CMakeLists.txt index 971de3a191..25d69d5807 100644 --- a/libraries/plugins/elasticsearch/CMakeLists.txt +++ b/libraries/plugins/elasticsearch/CMakeLists.txt @@ -7,7 +7,7 @@ add_library( graphene_elasticsearch if(MSVC) set_source_files_properties(elasticsearch_plugin.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) -target_link_libraries( graphene_elasticsearch graphene_chain graphene_app ) +target_link_libraries( graphene_elasticsearch graphene_app graphene_chain ) target_include_directories( graphene_elasticsearch PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/es_objects/CMakeLists.txt b/libraries/plugins/es_objects/CMakeLists.txt index 926e157f44..99980acb71 100644 --- a/libraries/plugins/es_objects/CMakeLists.txt +++ b/libraries/plugins/es_objects/CMakeLists.txt @@ -8,7 +8,7 @@ if(MSVC) set_source_files_properties(es_objects.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) -target_link_libraries( graphene_es_objects graphene_chain graphene_app ) +target_link_libraries( graphene_es_objects graphene_app graphene_chain ) target_include_directories( graphene_es_objects PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/grouped_orders/CMakeLists.txt b/libraries/plugins/grouped_orders/CMakeLists.txt index 4ec9f64d27..4a22b8d2bb 100644 --- a/libraries/plugins/grouped_orders/CMakeLists.txt +++ b/libraries/plugins/grouped_orders/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_grouped_orders grouped_orders_plugin.cpp ) -target_link_libraries( graphene_grouped_orders graphene_chain graphene_app ) +target_link_libraries( graphene_grouped_orders graphene_app graphene_chain ) target_include_directories( graphene_grouped_orders PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/market_history/CMakeLists.txt b/libraries/plugins/market_history/CMakeLists.txt index 47410d7488..9acb4d76bc 100644 --- a/libraries/plugins/market_history/CMakeLists.txt +++ b/libraries/plugins/market_history/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_market_history market_history_plugin.cpp ) -target_link_libraries( graphene_market_history graphene_chain graphene_app ) +target_link_libraries( graphene_market_history graphene_app graphene_chain ) target_include_directories( graphene_market_history PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/snapshot/CMakeLists.txt b/libraries/plugins/snapshot/CMakeLists.txt index 227c386047..6e2f250b4b 100644 --- a/libraries/plugins/snapshot/CMakeLists.txt +++ b/libraries/plugins/snapshot/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_snapshot snapshot.cpp ) -target_link_libraries( graphene_snapshot graphene_chain graphene_app ) +target_link_libraries( graphene_snapshot graphene_app graphene_chain ) target_include_directories( graphene_snapshot PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/template_plugin/CMakeLists.txt b/libraries/plugins/template_plugin/CMakeLists.txt index a8a14584fc..d68c996139 100644 --- a/libraries/plugins/template_plugin/CMakeLists.txt +++ b/libraries/plugins/template_plugin/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_template_plugin template_plugin.cpp ) -target_link_libraries( graphene_template_plugin graphene_chain graphene_app ) +target_link_libraries( graphene_template_plugin graphene_app graphene_chain ) target_include_directories( graphene_template_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/witness/CMakeLists.txt b/libraries/plugins/witness/CMakeLists.txt index 3d4a9d272d..b7801e084a 100644 --- a/libraries/plugins/witness/CMakeLists.txt +++ b/libraries/plugins/witness/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_witness witness.cpp ) -target_link_libraries( graphene_witness graphene_chain graphene_app ) +target_link_libraries( graphene_witness graphene_app graphene_chain ) target_include_directories( graphene_witness PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/wallet/CMakeLists.txt b/libraries/wallet/CMakeLists.txt index c268c362a7..09a72fc118 100644 --- a/libraries/wallet/CMakeLists.txt +++ b/libraries/wallet/CMakeLists.txt @@ -46,7 +46,8 @@ set( SOURCES ) add_library( graphene_wallet ${SOURCES} ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp ${HEADERS} ) -target_link_libraries( graphene_wallet PRIVATE graphene_app graphene_net graphene_chain graphene_utilities fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( graphene_wallet PRIVATE graphene_app graphene_chain graphene_utilities fc + ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) target_include_directories( graphene_db PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) if(MSVC) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 44069f2560..c3ca602fe1 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -5,7 +5,7 @@ add_library( database_fixture ${COMMON_SOURCES} ${COMMON_HEADERS} ) -target_link_libraries( database_fixture PUBLIC graphene_app graphene_es_objects graphene_egenesis_none ) +target_link_libraries( database_fixture PUBLIC graphene_es_objects graphene_app graphene_egenesis_none ) target_include_directories( database_fixture PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/common" ) @@ -17,8 +17,8 @@ endif() file(GLOB UNIT_TESTS "tests/*.cpp") add_executable( chain_test ${UNIT_TESTS} ) -target_link_libraries( chain_test graphene_app database_fixture - graphene_witness graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( chain_test database_fixture + graphene_witness graphene_wallet graphene_app ${PLATFORM_SPECIFIC_LIBS} ) if(MSVC) set_source_files_properties( tests/serialization_tests.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) set_source_files_properties( tests/common/database_fixture.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) @@ -30,7 +30,7 @@ target_link_libraries( performance_test database_fixture ${PLATFORM_SPECIFIC_LIB file(GLOB APP_SOURCES "app/*.cpp") add_executable( app_test ${APP_SOURCES} ) -target_link_libraries( app_test graphene_app graphene_witness graphene_egenesis_none +target_link_libraries( app_test graphene_app graphene_egenesis_none ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB CLI_SOURCES "cli/*.cpp") @@ -38,7 +38,7 @@ add_executable( cli_test ${CLI_SOURCES} ) if(WIN32) list(APPEND PLATFORM_SPECIFIC_LIBS ws2_32) endif() -target_link_libraries( cli_test graphene_app graphene_wallet graphene_witness graphene_egenesis_none +target_link_libraries( cli_test graphene_wallet graphene_app graphene_egenesis_none ${PLATFORM_SPECIFIC_LIBS} ) if(MSVC) set_source_files_properties( cli/main.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) From d1423a0785121ec454cca272000552fcba35fb3e Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 20 Oct 2022 22:31:50 +0000 Subject: [PATCH 295/338] Fix code smells --- .../chain/include/graphene/chain/database.hpp | 2 +- .../db/include/graphene/db/generic_index.hpp | 22 ++++++++++--------- .../api_helper_indexes/api_helper_indexes.cpp | 4 ++-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index d143b2e39b..2535feb247 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -72,7 +72,7 @@ namespace graphene { namespace chain { //////////////////// db_management.cpp //////////////////// public: database(); - virtual ~database(); + ~database() override; enum validation_steps { diff --git a/libraries/db/include/graphene/db/generic_index.hpp b/libraries/db/include/graphene/db/generic_index.hpp index 812a6587d1..556e2c0834 100644 --- a/libraries/db/include/graphene/db/generic_index.hpp +++ b/libraries/db/include/graphene/db/generic_index.hpp @@ -43,29 +43,31 @@ namespace graphene { namespace db { class generic_index : public index { public: - typedef MultiIndexType index_type; - typedef ObjectType object_type; + using index_type = MultiIndexType; + using object_type = ObjectType; - virtual const object& insert( object&& obj )override + const object& insert( object&& obj )override { assert( nullptr != dynamic_cast(&obj) ); auto insert_result = _indices.insert( std::move( static_cast(obj) ) ); - FC_ASSERT( insert_result.second, "Could not insert object, most likely a uniqueness constraint was violated" ); + FC_ASSERT( insert_result.second, + "Could not insert object, most likely a uniqueness constraint was violated" ); return *insert_result.first; } - virtual const object& create(const std::function& constructor )override + const object& create(const std::function& constructor )override { ObjectType item; item.id = get_next_id(); constructor( item ); auto insert_result = _indices.insert( std::move(item) ); - FC_ASSERT(insert_result.second, "Could not create object! Most likely a uniqueness constraint is violated."); + FC_ASSERT( insert_result.second, + "Could not create object! Most likely a uniqueness constraint is violated."); use_next_id(); return *insert_result.first; } - virtual void modify( const object& obj, const std::function& m )override + void modify( const object& obj, const std::function& m )override { assert(nullptr != dynamic_cast(&obj)); std::exception_ptr exc; @@ -88,12 +90,12 @@ namespace graphene { namespace db { FC_ASSERT(ok, "Could not modify object, most likely an index constraint was violated"); } - virtual void remove( const object& obj )override + void remove( const object& obj )override { _indices.erase( _indices.iterator_to( static_cast(obj) ) ); } - virtual const object* find( object_id_type id )const override + const object* find( object_id_type id )const override { static_assert(std::is_same::value, "First index of MultiIndexType MUST be object_id_type!"); @@ -102,7 +104,7 @@ namespace graphene { namespace db { return &*itr; } - virtual void inspect_all_objects(std::function inspector)const override + void inspect_all_objects(std::function inspector)const override { try { for( const auto& ptr : _indices ) diff --git a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp index f71796b6a8..e6cd8100f6 100644 --- a/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp +++ b/libraries/plugins/api_helper_indexes/api_helper_indexes.cpp @@ -228,9 +228,9 @@ void api_helper_indexes::refresh_next_ids() // for better performance, only do this twice, one on plugin startup, the other on the first block. size_t count = 0; size_t failed_count = 0; - for( uint8_t space = 0; space < db._index_size; ++space ) + for( uint8_t space = 0; space < chain::database::_index_size; ++space ) { - for( uint8_t type = 0; type < db._index_size; ++type ) + for( uint8_t type = 0; type < chain::database::_index_size; ++type ) { try { From b5de5255c8194495f72a8f188b3e3478450dbb1a Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 21 Oct 2022 18:48:38 +0000 Subject: [PATCH 296/338] Avoid overflow in from_variant(object_id) And fix some other code smells --- .../include/graphene/protocol/object_id.hpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index ccd0305680..2fda3cf742 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -119,11 +119,11 @@ namespace graphene { namespace db { object_id() = default; explicit object_id( const fc::unsigned_int& i ):instance(i) { - FC_ASSERT( (i.value >> instance_bits) == 0, "instance overflow", ("instance",i) ); + validate(); } explicit object_id( uint64_t i ):instance(i) { - FC_ASSERT( (i >> instance_bits) == 0, "instance overflow", ("instance",i) ); + validate(); } explicit object_id( const object_id_type& id ):instance(id.instance()) { @@ -131,6 +131,11 @@ namespace graphene { namespace db { FC_ASSERT( id.is>(), "space or type mismatch" ); } + void validate()const + { + FC_ASSERT( (instance.value >> instance_bits) == 0, "instance overflow", ("instance",instance) ); + } + object_id& operator=( const object_id_type& o ) { *this = object_id(o); @@ -229,12 +234,11 @@ struct member_name, 0> { static constexpr const cha auto second_dot = s.find('.',first_dot+1); FC_ASSERT( second_dot != std::string::npos, "Missing the second dot" ); FC_ASSERT( second_dot != first_dot+1, "Missing the type part" ); - auto instance = fc::to_uint64(s.substr( second_dot+1 )); - FC_ASSERT( instance <= vo.max_instance, "instance overflow" ); auto space_id = fc::to_uint64( s.substr( 0, first_dot ) ); - FC_ASSERT( space_id <= vo.one_byte_mask, "space overflow" ); + FC_ASSERT( space_id <= graphene::db::object_id_type::one_byte_mask, "space overflow" ); auto type_id = fc::to_uint64( s.substr( first_dot+1, (second_dot-first_dot)-1 ) ); - FC_ASSERT( type_id <= vo.one_byte_mask, "type overflow"); + FC_ASSERT( type_id <= graphene::db::object_id_type::one_byte_mask, "type overflow"); + auto instance = fc::to_uint64(s.substr( second_dot+1 )); vo.reset( static_cast(space_id), static_cast(type_id), instance ); } FC_CAPTURE_AND_RETHROW( (var) ) } template @@ -256,7 +260,8 @@ struct member_name, 0> { static constexpr const cha fc::to_uint64( s.substr( first_dot+1, (second_dot-first_dot)-1 ) ) == TypeID, "Space.Type.0 (${SpaceID}.${TypeID}.0) doesn't match expected value ${var}", ("TypeID",TypeID)("SpaceID",SpaceID)("var",var) ); - vo.instance = fc::to_uint64(s.substr( second_dot+1 )); + graphene::db::object_id tmp { fc::to_uint64(s.substr( second_dot+1 )) }; + vo = tmp; } FC_CAPTURE_AND_RETHROW( (var) ) } } // namespace fc From 8f31fb06d2c2a40d6b69fd2870502375e893a581 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 18 Oct 2022 18:34:06 +0000 Subject: [PATCH 297/338] Fix some Doxygen warnings and errors --- .../chain/include/graphene/chain/database.hpp | 21 +++--- .../include/graphene/protocol/types.hpp | 72 +++++++++++-------- libraries/wallet/operation_printer.cpp | 2 +- 3 files changed, 52 insertions(+), 43 deletions(-) diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index 2535feb247..2e0b201ffc 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -396,17 +396,8 @@ namespace graphene { namespace chain { bool mute_exceptions = false, bool skip_matching_settle_orders = false ); - /** - * Matches the two orders, the first parameter is taker, the second is maker. - * - * @return a bit field indicating which orders were filled (and thus removed) - * - * 0 - no orders were matched - * 1 - only taker was filled - * 2 - only maker was filled - * 3 - both were filled - */ - ///@{ + // Note: Ideally this should be private. + // Now it is public because we use it in a non-member function in db_market.cpp . enum class match_result_type { none_filled = 0, @@ -416,6 +407,12 @@ namespace graphene { namespace chain { }; private: + /** + * Matches the two orders, the first parameter is taker, the second is maker. + * + * @return which orders were filled (and thus removed) + */ + ///@{ match_result_type match( const limit_order_object& taker, const limit_order_object& maker, const price& trade_price ); match_result_type match_limit_normal_limit( const limit_order_object& taker, const limit_order_object& maker, @@ -589,7 +586,7 @@ namespace graphene { namespace chain { * as any implied/virtual operations that resulted, such as filling an order. The * applied operations is cleared after applying each block and calling the block * observers which may want to index these operations. - * @param The operation to push + * @param op The operation to push * @param is_virtual Whether the operation is a virtual operation * * @return the op_id which can be used to set the result after it has finished being applied. diff --git a/libraries/protocol/include/graphene/protocol/types.hpp b/libraries/protocol/include/graphene/protocol/types.hpp index 0b0b7e97ee..ac92d2ca78 100644 --- a/libraries/protocol/include/graphene/protocol/types.hpp +++ b/libraries/protocol/include/graphene/protocol/types.hpp @@ -149,11 +149,37 @@ using private_key_type = fc::ecc::private_key; using chain_id_type = fc::sha256; using ratio_type = boost::rational; +/// @note If one of the following bits is set in asset issuer permissions, +/// it means the asset issuer (or owner for bitassets) has the permission to update +/// the corresponding flag, parameters or perform certain actions. +/// * @ref charge_market_fee +/// * @ref white_list +/// * @ref override_authority +/// * @ref transfer_restricted +/// * @ref disable_force_settle +/// * @ref global_settle +/// * @ref disable_confidential +/// * @ref witness_fed_asset +/// * @ref committee_fed_asset +/// +/// @note If one of the following bits is set in asset issuer permissions, +/// it means the asset issuer (or owner for bitassets) does NOT have the permission to update +/// the corresponding flag, parameters or perform certain actions. +/// This is to be compatible with old client software. +/// * @ref lock_max_supply +/// * @ref disable_new_supply +/// * @ref disable_mcr_update +/// * @ref disable_icr_update +/// * @ref disable_mssr_update +/// * @ref disable_bsrm_update +/// * @ref disable_collateral_bidding +/// +/// @note For @ref disable_mcr_update, @ref disable_icr_update and @ref disable_mssr_update, +/// if one of them is set in issuer permission, and +/// * if the value of the parameter was set by the bitasset owner, it can not be updated, +/// * if no value was set by the owner, the value can still be updated by the feed producers. enum asset_issuer_permission_flags { - /// @note If one of these bits is set in asset issuer permissions, - /// it means the asset issuer (or owner for bitassets) has the permission to update - /// the corresponding flag, parameters or perform certain actions. - ///@{ + // permission-enabling bits begin charge_market_fee = 0x01, ///< market trades in this asset may be charged white_list = 0x02, ///< accounts must be whitelisted in order to hold or transact this asset override_authority = 0x04, ///< issuer may transfer asset back to himself @@ -163,33 +189,19 @@ enum asset_issuer_permission_flags { disable_confidential = 0x40, ///< disallow the asset to be used with confidential transactions witness_fed_asset = 0x80, ///< the bitasset is to be fed by witnesses committee_fed_asset = 0x100, ///< the bitasset is to be fed by the committee - ///@} - /// @note If one of these bits is set in asset issuer permissions, - /// it means the asset issuer (or owner for bitassets) does NOT have the permission to update - /// the corresponding flag, parameters or perform certain actions. - /// This is to be compatible with old client software. - ///@{ + // permission-enabling bits end + // permission-disabling bits begin lock_max_supply = 0x200, ///< the max supply of the asset can not be updated disable_new_supply = 0x400, ///< unable to create new supply for the asset - /// @note These parameters are for issuer permission only. - /// For each parameter, if it is set in issuer permission, - /// it means the bitasset owner can not update the corresponding parameter. - ///@{ - /// @note For each one of these parameters, if it is set in issuer permission, and - /// * if the value of the parameter was set by the bitasset owner, it can not be updated, - /// * if no value was set by the owner, the value can still be updated by the feed producers. - ///@{ disable_mcr_update = 0x800, ///< the bitasset owner can not update MCR, permission only disable_icr_update = 0x1000, ///< the bitasset owner can not update ICR, permission only disable_mssr_update = 0x2000, ///< the bitasset owner can not update MSSR, permission only - ///@} disable_bsrm_update = 0x4000, ///< the bitasset owner can not update BSRM, permission only - ///@} disable_collateral_bidding = 0x8000 ///< Can not bid collateral after a global settlement - ///@} + // permission-disabling bits end }; -// The bits that can be used in asset issuer permissions for non-UIA assets +/// The bits that can be used in asset issuer permissions for non-UIA assets const static uint16_t ASSET_ISSUER_PERMISSION_MASK = charge_market_fee | white_list @@ -207,7 +219,7 @@ const static uint16_t ASSET_ISSUER_PERMISSION_MASK = | disable_mssr_update | disable_bsrm_update | disable_collateral_bidding; -// The "enable" bits for non-UIA assets +/// The "enable" bits for non-UIA assets const static uint16_t ASSET_ISSUER_PERMISSION_ENABLE_BITS_MASK = charge_market_fee | white_list @@ -218,7 +230,7 @@ const static uint16_t ASSET_ISSUER_PERMISSION_ENABLE_BITS_MASK = | disable_confidential | witness_fed_asset | committee_fed_asset; -// The "disable" bits for non-UIA assets +/// The "disable" bits for non-UIA assets const static uint16_t ASSET_ISSUER_PERMISSION_DISABLE_BITS_MASK = lock_max_supply | disable_new_supply @@ -227,7 +239,7 @@ const static uint16_t ASSET_ISSUER_PERMISSION_DISABLE_BITS_MASK = | disable_mssr_update | disable_bsrm_update | disable_collateral_bidding; -// The bits that can be used in asset issuer permissions for UIA assets +/// The bits that can be used in asset issuer permissions for UIA assets const static uint16_t UIA_ASSET_ISSUER_PERMISSION_MASK = charge_market_fee | white_list @@ -236,26 +248,26 @@ const static uint16_t UIA_ASSET_ISSUER_PERMISSION_MASK = | disable_confidential | lock_max_supply | disable_new_supply; -// The bits that can be used in asset issuer permissions for UIA assets before hf48/75 +/// The bits that can be used in asset issuer permissions for UIA assets before hf48/75 const static uint16_t DEFAULT_UIA_ASSET_ISSUER_PERMISSION = charge_market_fee | white_list | override_authority | transfer_restricted | disable_confidential; -// The bits that can be used in asset issuer permissions for non-UIA assets but not for UIA assets +/// The bits that can be used in asset issuer permissions for non-UIA assets but not for UIA assets const static uint16_t NON_UIA_ONLY_ISSUER_PERMISSION_MASK = ASSET_ISSUER_PERMISSION_MASK ^ UIA_ASSET_ISSUER_PERMISSION_MASK; -// The bits that can be used in asset issuer permissions but can not be used in flags +/// The bits that can be used in asset issuer permissions but can not be used in flags const static uint16_t PERMISSION_ONLY_MASK = global_settle | disable_mcr_update | disable_icr_update | disable_mssr_update | disable_bsrm_update; -// The bits that can be used in flags for non-UIA assets +/// The bits that can be used in flags for non-UIA assets const static uint16_t VALID_FLAGS_MASK = ASSET_ISSUER_PERMISSION_MASK & (uint16_t)(~PERMISSION_ONLY_MASK); -// the bits that can be used in flags for UIA assets +/// the bits that can be used in flags for UIA assets const static uint16_t UIA_VALID_FLAGS_MASK = UIA_ASSET_ISSUER_PERMISSION_MASK; enum reserved_spaces { diff --git a/libraries/wallet/operation_printer.cpp b/libraries/wallet/operation_printer.cpp index 604b30e26b..4119820ff2 100644 --- a/libraries/wallet/operation_printer.cpp +++ b/libraries/wallet/operation_printer.cpp @@ -364,7 +364,7 @@ std::string operation_result_printer::operator()(const void_result& x) const return ""; } -std::string operation_result_printer::operator()(const object_id_type& oid) const +std::string operation_result_printer::operator()(const graphene::protocol::object_id_type& oid) const { return std::string(oid); } From abff625096bf6e583c40b54dbd9755ad6cdd72f3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 19 Oct 2022 19:58:30 +0000 Subject: [PATCH 298/338] Update and reformat some comments --- .../include/graphene/protocol/types.hpp | 89 ++++++++++++------- 1 file changed, 56 insertions(+), 33 deletions(-) diff --git a/libraries/protocol/include/graphene/protocol/types.hpp b/libraries/protocol/include/graphene/protocol/types.hpp index ac92d2ca78..3f3fc216dd 100644 --- a/libraries/protocol/include/graphene/protocol/types.hpp +++ b/libraries/protocol/include/graphene/protocol/types.hpp @@ -149,37 +149,48 @@ using private_key_type = fc::ecc::private_key; using chain_id_type = fc::sha256; using ratio_type = boost::rational; -/// @note If one of the following bits is set in asset issuer permissions, -/// it means the asset issuer (or owner for bitassets) has the permission to update -/// the corresponding flag, parameters or perform certain actions. -/// * @ref charge_market_fee -/// * @ref white_list -/// * @ref override_authority -/// * @ref transfer_restricted -/// * @ref disable_force_settle -/// * @ref global_settle -/// * @ref disable_confidential -/// * @ref witness_fed_asset -/// * @ref committee_fed_asset -/// -/// @note If one of the following bits is set in asset issuer permissions, -/// it means the asset issuer (or owner for bitassets) does NOT have the permission to update -/// the corresponding flag, parameters or perform certain actions. -/// This is to be compatible with old client software. -/// * @ref lock_max_supply -/// * @ref disable_new_supply -/// * @ref disable_mcr_update -/// * @ref disable_icr_update -/// * @ref disable_mssr_update -/// * @ref disable_bsrm_update -/// * @ref disable_collateral_bidding -/// -/// @note For @ref disable_mcr_update, @ref disable_icr_update and @ref disable_mssr_update, -/// if one of them is set in issuer permission, and -/// * if the value of the parameter was set by the bitasset owner, it can not be updated, -/// * if no value was set by the owner, the value can still be updated by the feed producers. +/** + * @note + * If one of the following bits is set in asset issuer permissions, + * it means the asset issuer (or owner for bitassets) has the permission to update + * the corresponding flag, parameters or perform certain actions. + * - @ref charge_market_fee + * - @ref white_list + * - @ref override_authority + * - @ref transfer_restricted + * - @ref disable_force_settle + * - @ref global_settle + * - @ref disable_confidential + * - @ref witness_fed_asset + * - @ref committee_fed_asset + * + * @note + * If one of the following bits is set in asset issuer permissions, + * it means the asset issuer (or owner for bitassets) does NOT have the permission to update + * the corresponding flag, parameters or perform certain actions. + * This is to be compatible with old client software. + * - @ref lock_max_supply + * - @ref disable_new_supply + * - @ref disable_mcr_update + * - @ref disable_icr_update + * - @ref disable_mssr_update + * - @ref disable_bsrm_update + * - @ref disable_collateral_bidding + * + * @note + * For @ref disable_mcr_update, @ref disable_icr_update and @ref disable_mssr_update, + * if one of these is set in asset issuer permissions, and + * - if the bitasset owner has set a value for the corresponding parameter, the value can not be updated, + * - if the bitasset owner has not set a value for the corresponding parameter, the parameter can still be + * updated by the price feed producers. + * + */ enum asset_issuer_permission_flags { - // permission-enabling bits begin + // Permission-enabling bits begin + // If one of the following bits is set in asset issuer permissions, + // it means the asset issuer (or owner for bitassets) has the permission to update + // the corresponding flag, parameters or perform certain actions. + // Note: This comment is copied and reformatted above for better Doxygen documentation formatting. charge_market_fee = 0x01, ///< market trades in this asset may be charged white_list = 0x02, ///< accounts must be whitelisted in order to hold or transact this asset override_authority = 0x04, ///< issuer may transfer asset back to himself @@ -189,16 +200,28 @@ enum asset_issuer_permission_flags { disable_confidential = 0x40, ///< disallow the asset to be used with confidential transactions witness_fed_asset = 0x80, ///< the bitasset is to be fed by witnesses committee_fed_asset = 0x100, ///< the bitasset is to be fed by the committee - // permission-enabling bits end - // permission-disabling bits begin + // Permission-enabling bits end + + // Permission-disabling bits begin + // If one of the following bits is set in asset issuer permissions, + // it means the asset issuer (or owner for bitassets) does NOT have the permission to update + // the corresponding flag, parameters or perform certain actions. + // This is to be compatible with old client software. + // Note: This comment is copied and reformatted above for better Doxygen documentation formatting. lock_max_supply = 0x200, ///< the max supply of the asset can not be updated disable_new_supply = 0x400, ///< unable to create new supply for the asset + // For disable_mcr_update, disable_icr_update and disable_mssr_update, + // if one of these is set in asset issuer permissions, and + // - if the bitasset owner has set a value for the corresponding parameter, the value can not be updated, + // - if the bitasset owner has not set a value for the corresponding parameter, the parameter can still be + // updated by the price feed producers. + // Note: This comment is copied and reformatted above for better Doxygen documentation formatting. disable_mcr_update = 0x800, ///< the bitasset owner can not update MCR, permission only disable_icr_update = 0x1000, ///< the bitasset owner can not update ICR, permission only disable_mssr_update = 0x2000, ///< the bitasset owner can not update MSSR, permission only disable_bsrm_update = 0x4000, ///< the bitasset owner can not update BSRM, permission only disable_collateral_bidding = 0x8000 ///< Can not bid collateral after a global settlement - // permission-disabling bits end + // Permission-disabling bits end }; /// The bits that can be used in asset issuer permissions for non-UIA assets From 069e291ba38d0b4d13257473cf4976174639d4d3 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sat, 22 Oct 2022 20:16:30 +0000 Subject: [PATCH 299/338] Add a default value to fill_order_op::is_maker to fix a compiler warning --- libraries/protocol/include/graphene/protocol/market.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/protocol/include/graphene/protocol/market.hpp b/libraries/protocol/include/graphene/protocol/market.hpp index b91aaf478d..1d7a8ff091 100644 --- a/libraries/protocol/include/graphene/protocol/market.hpp +++ b/libraries/protocol/include/graphene/protocol/market.hpp @@ -154,7 +154,7 @@ namespace graphene { namespace protocol { asset receives; asset fee; // paid by receiving account price fill_price; - bool is_maker; + bool is_maker = true; pair get_market()const { From d1419738579c990e91f3ebc65df58c650530b4e0 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 23 Oct 2022 18:29:41 +0000 Subject: [PATCH 300/338] Fix compiler warnings --- libraries/chain/db_maint.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/libraries/chain/db_maint.cpp b/libraries/chain/db_maint.cpp index 0eb0c22183..91d4edc88d 100644 --- a/libraries/chain/db_maint.cpp +++ b/libraries/chain/db_maint.cpp @@ -527,8 +527,11 @@ void database::process_budget() - rec.from_accumulated_fees ) - rec.from_unused_witness_budget; - modify(core, [&rec,&witness_budget,&worker_budget,&leftover_worker_funds,&dpo] - ( asset_dynamic_data_object& _core ) + modify(core, [&rec +#ifndef NDEBUG + ,&witness_budget,&worker_budget,&leftover_worker_funds,&dpo +#endif + ] ( asset_dynamic_data_object& _core ) { _core.current_supply = (_core.current_supply + rec.supply_delta ); From 836db042004c6e102ee358ef9878120101f1a193 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 23 Oct 2022 18:42:32 +0000 Subject: [PATCH 301/338] Add override keyword to test_delegate::on_message --- tests/tests/p2p_node_tests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests/p2p_node_tests.cpp b/tests/tests/p2p_node_tests.cpp index 0faa8844df..610858531c 100644 --- a/tests/tests/p2p_node_tests.cpp +++ b/tests/tests/p2p_node_tests.cpp @@ -60,7 +60,7 @@ class test_delegate : public graphene::net::peer_connection_delegate { } void on_message( graphene::net::peer_connection* originating_peer, - const graphene::net::message& received_message ) + const graphene::net::message& received_message ) override { ilog( "on_message was called with ${msg}", ("msg",received_message) ); try { From 761aae4bf8272667ba80c11a23659a275ee933b0 Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 23 Oct 2022 21:49:21 +0000 Subject: [PATCH 302/338] Update default version of pts_address to constexpr --- libraries/app/database_api.cpp | 4 ++-- libraries/chain/balance_evaluator.cpp | 11 +++++++---- .../include/graphene/protocol/pts_address.hpp | 11 ++++++++--- libraries/protocol/transaction.cpp | 8 ++++---- libraries/wallet/wallet_account.cpp | 4 ++-- 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index ddd96aace9..3e1bcfa1ca 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -411,8 +411,8 @@ vector> database_api_impl::get_key_references( vector< for( auto& key : keys ) { - address a1( pts_address(key, false, 56) ); - address a2( pts_address(key, true, 56) ); + address a1( pts_address(key, false) ); // version = 56 (default) + address a2( pts_address(key, true) ); // version = 56 (default) address a3( pts_address(key, false, 0) ); address a4( pts_address(key, true, 0) ); address a5( key ); diff --git a/libraries/chain/balance_evaluator.cpp b/libraries/chain/balance_evaluator.cpp index 7c8b8b3ff1..2d0c4810bf 100644 --- a/libraries/chain/balance_evaluator.cpp +++ b/libraries/chain/balance_evaluator.cpp @@ -31,12 +31,15 @@ void_result balance_claim_evaluator::do_evaluate(const balance_claim_operation& database& d = db(); balance = &op.balance_to_claim(d); - GRAPHENE_ASSERT( + bool is_balance_owner_match = ( op.balance_owner_key == balance->owner || - pts_address(op.balance_owner_key, false, 56) == balance->owner || - pts_address(op.balance_owner_key, true, 56) == balance->owner || + pts_address(op.balance_owner_key, false) == balance->owner || // version = 56 (default) + pts_address(op.balance_owner_key, true) == balance->owner ); // version = 56 (default) + is_balance_owner_match = ( + is_balance_owner_match || pts_address(op.balance_owner_key, false, 0) == balance->owner || - pts_address(op.balance_owner_key, true, 0) == balance->owner, + pts_address(op.balance_owner_key, true, 0) == balance->owner ); + GRAPHENE_ASSERT( is_balance_owner_match, balance_claim_owner_mismatch, "Balance owner key was specified as '${op}' but balance's actual owner is '${bal}'", ("op", op.balance_owner_key) diff --git a/libraries/protocol/include/graphene/protocol/pts_address.hpp b/libraries/protocol/include/graphene/protocol/pts_address.hpp index 0a6e4cccea..6806832fb8 100644 --- a/libraries/protocol/include/graphene/protocol/pts_address.hpp +++ b/libraries/protocol/include/graphene/protocol/pts_address.hpp @@ -40,14 +40,19 @@ namespace graphene { namespace protocol { */ struct pts_address { + static constexpr uint8_t default_version = 56; + pts_address(); ///< constructs empty / null address - pts_address( const std::string& base58str ); ///< converts to binary, validates checksum - pts_address( const fc::ecc::public_key& pub, bool compressed = true, uint8_t version=56 ); ///< converts to binary + explicit pts_address( const std::string& base58str ); ///< converts to binary, validates checksum + /// Constructs from a public key + explicit pts_address( const fc::ecc::public_key& pub, + bool compressed = true, + uint8_t version = default_version ); uint8_t version()const { return addr.at(0); } bool is_valid()const; - operator std::string()const; ///< converts to base58 + checksum + explicit operator std::string()const; ///< converts to base58 + checksum std::array addr{}; ///< binary representation of address, 0-initialized }; diff --git a/libraries/protocol/transaction.cpp b/libraries/protocol/transaction.cpp index 82f5111e5a..2f1120650c 100644 --- a/libraries/protocol/transaction.cpp +++ b/libraries/protocol/transaction.cpp @@ -140,15 +140,15 @@ struct sign_state available_address_sigs = std::map(); provided_address_sigs = std::map(); for( auto& item : available_keys ) { - (*available_address_sigs)[ address(pts_address(item, false, 56) ) ] = item; - (*available_address_sigs)[ address(pts_address(item, true, 56) ) ] = item; + (*available_address_sigs)[ address(pts_address(item, false) ) ] = item; // verison = 56 (default) + (*available_address_sigs)[ address(pts_address(item, true) ) ] = item; // verison = 56 (default) (*available_address_sigs)[ address(pts_address(item, false, 0) ) ] = item; (*available_address_sigs)[ address(pts_address(item, true, 0) ) ] = item; (*available_address_sigs)[ address(item) ] = item; } for( auto& item : provided_signatures ) { - (*provided_address_sigs)[ address(pts_address(item.first, false, 56) ) ] = item.first; - (*provided_address_sigs)[ address(pts_address(item.first, true, 56) ) ] = item.first; + (*provided_address_sigs)[ address(pts_address(item.first, false) ) ] = item.first; //verison 56 (default) + (*provided_address_sigs)[ address(pts_address(item.first, true) ) ] = item.first; // verison 56 (default) (*provided_address_sigs)[ address(pts_address(item.first, false, 0) ) ] = item.first; (*provided_address_sigs)[ address(pts_address(item.first, true, 0) ) ] = item.first; (*provided_address_sigs)[ address(item.first) ] = item.first; diff --git a/libraries/wallet/wallet_account.cpp b/libraries/wallet/wallet_account.cpp index 4b8f123f20..68631120ab 100644 --- a/libraries/wallet/wallet_account.cpp +++ b/libraries/wallet/wallet_account.cpp @@ -346,9 +346,9 @@ namespace graphene { namespace wallet { namespace detail { addrs.push_back( address(pk) ); keys[addrs.back()] = *key; // see chain/balance_evaluator.cpp - addrs.push_back( address( pts_address( pk, false, 56 ) ) ); + addrs.push_back( address( pts_address( pk, false ) ) ); // version = 56 (default) keys[addrs.back()] = *key; - addrs.push_back( address( pts_address( pk, true, 56 ) ) ); + addrs.push_back( address( pts_address( pk, true ) ) ); // version = 56 (default) keys[addrs.back()] = *key; addrs.push_back( address( pts_address( pk, false, 0 ) ) ); keys[addrs.back()] = *key; From 40cc62c9adf0a84cc994c015b0ba0a0bb825176b Mon Sep 17 00:00:00 2001 From: abitmore Date: Sun, 23 Oct 2022 22:28:25 +0000 Subject: [PATCH 303/338] Update most functions of wallet_api class to const --- .../wallet/include/graphene/wallet/wallet.hpp | 235 +++++++++--------- libraries/wallet/wallet.cpp | 212 ++++++++-------- 2 files changed, 225 insertions(+), 222 deletions(-) diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index 1cd1b1976d..657d06d9b4 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -50,10 +50,15 @@ object* create_object( const variant& v ); class wallet_api { public: + // Variables + fc::signal lock_changed; + std::shared_ptr my; + + // Methods wallet_api( const wallet_data& initial_data, fc::api rapi ); virtual ~wallet_api(); - bool copy_wallet_file( string destination_filename ); + bool copy_wallet_file( string destination_filename )const; fc::ecc::private_key derive_private_key(const std::string& prefix_string, int sequence_number) const; @@ -61,7 +66,7 @@ class wallet_api * committee members. * @returns runtime info about the blockchain */ - variant info(); + variant info()const; /** Returns info such as client version, git version of graphene/fc, version of boost, openssl. * @returns compile time info and client and dependencies versions */ @@ -70,7 +75,7 @@ class wallet_api * @param num height of the block to retrieve * @returns info about the block, or null if not found */ - optional get_block( uint32_t num ); + optional get_block( uint32_t num )const; /** Returns the number of accounts registered on the blockchain * @returns the number of registered accounts */ @@ -80,7 +85,7 @@ class wallet_api * we possess. * @returns a list of account objects */ - vector list_my_accounts(); + vector list_my_accounts()const; /** Lists all accounts registered in the blockchain. * This returns a list of all account names and their account ids, sorted by account name. * @@ -93,7 +98,7 @@ class wallet_api * @param limit the maximum number of accounts to return (max: 1000) * @returns a list of accounts mapping account names to account ids */ - map list_accounts(const string& lowerbound, uint32_t limit); + map list_accounts(const string& lowerbound, uint32_t limit)const; /** List the balances of an account. * Each account can have multiple balances, one for each type of asset owned by that * account. The returned list will only contain assets for which the account has a @@ -101,7 +106,7 @@ class wallet_api * @param account_name_or_id the name or id of the account whose balances you want * @returns a list of the given account's balances */ - vector list_account_balances(const string& account_name_or_id); + vector list_account_balances(const string& account_name_or_id)const; /** Lists all assets registered on the blockchain. * * To list all assets, pass the empty string \c "" for the lowerbound to start @@ -148,7 +153,7 @@ class wallet_api * of \c name_or_id cannot be tied to an account, that input will be ignored. * */ - full_account get_full_account( const string& name_or_id ); + full_account get_full_account( const string& name_or_id )const; /** * @brief Get OHLCV data of a trading pair in a time range @@ -176,7 +181,7 @@ class wallet_api * * @note * 1. if \c name_or_id cannot be tied to an account, empty result will be returned - * 2. \c ostart_id and \c ostart_price can be \c null, if so the api will return the "first page" of orders; + * 2. \c ostart_id and \c ostart_price can be \c null, if so the api will return the "first page" of orders. * if \c ostart_id is specified and valid, its price will be used to do page query preferentially, * otherwise the \c ostart_price will be used */ @@ -185,7 +190,7 @@ class wallet_api const string "e, uint32_t limit = 101, optional ostart_id = optional(), - optional ostart_price = optional()); + optional ostart_price = optional())const; /** * @brief Get limit orders in a given market @@ -243,7 +248,7 @@ class wallet_api */ account_history_operation_detail get_account_history_by_operations( const string& account_name_or_id, const flat_set& operation_types, - uint32_t start, uint32_t limit); + uint32_t start, uint32_t limit)const; /** Returns the block chain's rapidly-changing properties. * The returned object contains information that changes every block interval @@ -353,7 +358,7 @@ class wallet_api * Create a new transaction builder. * @return handle of the new transaction builder */ - transaction_handle_type begin_builder_transaction(); + transaction_handle_type begin_builder_transaction()const; /** * @ingroup Transaction Builder API * @@ -361,7 +366,7 @@ class wallet_api * @param transaction_handle handle of the transaction builder * @param op the operation in JSON format */ - void add_operation_to_builder_transaction(transaction_handle_type transaction_handle, const operation& op); + void add_operation_to_builder_transaction(transaction_handle_type transaction_handle, const operation& op)const; /** * @ingroup Transaction Builder API * @@ -372,7 +377,7 @@ class wallet_api */ void replace_operation_in_builder_transaction(transaction_handle_type handle, unsigned operation_index, - const operation& new_op); + const operation& new_op)const; /** * @ingroup Transaction Builder API * @@ -381,7 +386,7 @@ class wallet_api * @param fee_asset symbol or ID of an asset that to be used to pay fees * @return total fees */ - asset set_fees_on_builder_transaction(transaction_handle_type handle, string fee_asset = GRAPHENE_SYMBOL); + asset set_fees_on_builder_transaction(transaction_handle_type handle, string fee_asset = GRAPHENE_SYMBOL)const; /** * @ingroup Transaction Builder API * @@ -389,7 +394,7 @@ class wallet_api * @param handle handle of the transaction builder * @return a transaction */ - transaction preview_builder_transaction(transaction_handle_type handle); + transaction preview_builder_transaction(transaction_handle_type handle)const; /** * @ingroup Transaction Builder API * @@ -398,7 +403,8 @@ class wallet_api * @param broadcast whether to broadcast the signed transaction to the network * @return a signed transaction */ - signed_transaction sign_builder_transaction(transaction_handle_type transaction_handle, bool broadcast = true); + signed_transaction sign_builder_transaction(transaction_handle_type transaction_handle, + bool broadcast = true)const; /** * @ingroup Transaction Builder API @@ -410,14 +416,14 @@ class wallet_api * @return a signed transaction */ signed_transaction sign_builder_transaction2(transaction_handle_type transaction_handle, - const vector& signing_keys = vector(), - bool broadcast = true); + const vector& signing_keys = vector(), + bool broadcast = true)const; /** Broadcast signed transaction * @param tx signed transaction * @returns the transaction ID along with the signed transaction. */ - pair broadcast_transaction(signed_transaction tx); + pair broadcast_transaction(signed_transaction tx)const; /** * @ingroup Transaction Builder API @@ -440,7 +446,7 @@ class wallet_api time_point_sec expiration = time_point::now() + fc::minutes(1), uint32_t review_period_seconds = 0, bool broadcast = true - ); + )const; /** * @ingroup Transaction Builder API @@ -462,7 +468,7 @@ class wallet_api time_point_sec expiration = time_point::now() + fc::minutes(1), uint32_t review_period_seconds = 0, bool broadcast = true - ); + )const; /** * @ingroup Transaction Builder API @@ -470,7 +476,7 @@ class wallet_api * Destroy a transaction builder. * @param handle handle of the transaction builder */ - void remove_builder_transaction(transaction_handle_type handle); + void remove_builder_transaction(transaction_handle_type handle)const; /** Checks whether the wallet has just been created and has not yet had a password set. * @@ -491,7 +497,7 @@ class wallet_api /** Locks the wallet immediately. * @ingroup Wallet Management */ - void lock(); + void lock()const; /** Unlocks the wallet. * @@ -504,7 +510,7 @@ class wallet_api * @param password the password previously set with \c set_password() * @ingroup Wallet Management */ - void unlock(string password); + void unlock(string password)const; /** Sets a new password on the wallet. * @@ -517,7 +523,7 @@ class wallet_api * @param password a new password * @ingroup Wallet Management */ - void set_password(string password); + void set_password(string password)const; /** Dumps all private keys owned by the wallet. * @@ -525,7 +531,7 @@ class wallet_api * using \c import_key() * @returns a map containing the private keys, indexed by their public key */ - map dump_private_keys(); + map dump_private_keys()const; /** Returns a list of all commands supported by the wallet API. * @@ -555,13 +561,13 @@ class wallet_api * existing wallet file * @returns true if the specified wallet is loaded */ - bool load_wallet_file(string wallet_filename = ""); + bool load_wallet_file(string wallet_filename = "")const; /** Quit from the wallet. * * The current wallet will be closed and saved. */ - void quit(); + void quit()const; /** Saves the current wallet to the given filename. * @@ -573,7 +579,7 @@ class wallet_api * or overwrite. If \c wallet_filename is empty, * save to the current filename. */ - void save_wallet_file(string wallet_filename = ""); + void save_wallet_file(string wallet_filename = "")const; /** Sets the wallet filename used for future writes. * @@ -582,7 +588,7 @@ class wallet_api * * @param wallet_filename the new filename to use for future saves */ - void set_wallet_filename(string wallet_filename); + void set_wallet_filename(string wallet_filename)const; /** Suggests a safe brain key to use for creating your account. * \c create_account_with_brain_key() requires you to specify a 'brain key', @@ -636,7 +642,7 @@ class wallet_api * @param wif_key the private key in WIF format * @returns true if the key was imported */ - bool import_key(string account_name_or_id, string wif_key); + bool import_key(string account_name_or_id, string wif_key)const; /** Imports accounts from a BitShares 0.x wallet file. * Current wallet file must be unlocked to perform the import. @@ -645,7 +651,7 @@ class wallet_api * @param password the password to encrypt the BitShares 0.x wallet file * @returns a map containing the accounts found and whether imported */ - map import_accounts( string filename, string password ); + map import_accounts( string filename, string password )const; /** Imports from a BitShares 0.x wallet file, find keys that were bound to a given account name on the * BitShares 0.x chain, rebind them to an account name on the 2.0 chain. @@ -658,7 +664,8 @@ class wallet_api * can be same or different to \c src_account_name * @returns whether the import has succeeded */ - bool import_account_keys( string filename, string password, string src_account_name, string dest_account_name ); + bool import_account_keys( string filename, string password, + string src_account_name, string dest_account_name )const; /** * This call will construct transaction(s) that will claim all balances controled @@ -669,7 +676,7 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network */ vector< signed_transaction > import_balance( string account_name_or_id, const vector& wif_keys, - bool broadcast ); + bool broadcast )const; /** Transforms a brain key to reduce the chance of errors when re-entering the key from memory. * @@ -691,7 +698,7 @@ class wallet_api * @see create_account_with_brain_key() * * @param name the name of the account, must be unique on the blockchain. Shorter names - * are more expensive to register; the rules are still in flux, but in general + * are more expensive to register. The rules are still in flux, but in general * names of more than 8 characters with at least one digit will be cheap. * @param owner the owner key for the new account * @param active the active key for the new account @@ -701,7 +708,7 @@ class wallet_api * same as the registrar_account if there is no referrer. * @param referrer_percent the percentage (0 - 100) of the new user's transaction fees * not claimed by the blockchain that will be distributed to the - * referrer; the rest will be sent to the registrar. Will be + * referrer, the rest will be sent to the registrar. Will be * multiplied by GRAPHENE_1_PERCENT when constructing the transaction. * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction registering the account @@ -712,7 +719,7 @@ class wallet_api string registrar_account, string referrer_account, uint32_t referrer_percent, - bool broadcast = false); + bool broadcast = false)const; /** * Upgrades an account to prime status. @@ -722,7 +729,7 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction upgrading the account */ - signed_transaction upgrade_account(string account_name_or_id, bool broadcast); + signed_transaction upgrade_account(string account_name_or_id, bool broadcast)const; /** Creates a new account and registers it on the blockchain. * @@ -748,7 +755,7 @@ class wallet_api string account_name, string registrar_account, string referrer_account, - bool broadcast = false); + bool broadcast = false)const; /** Transfer an amount from one account to another. * @param from the name or id of the account sending the funds @@ -767,7 +774,7 @@ class wallet_api string amount, string asset_symbol_or_id, string memo, - bool broadcast = false); + bool broadcast = false)const; /** * This method works just like transfer, except it always broadcasts and @@ -786,7 +793,7 @@ class wallet_api const string& to, const string& amount, const string& asset_symbol_or_id, - const string& memo ) { + const string& memo ) const { auto trx = transfer( from, to, amount, asset_symbol_or_id, memo, true ); return std::make_pair(trx.id(),trx); } @@ -807,24 +814,24 @@ class wallet_api * @param memo text to sign * @return the signed memo data */ - memo_data sign_memo(string from, string to, string memo); + memo_data sign_memo(string from, string to, string memo)const; /** Read a memo. * * @param memo JSON-encoded memo. * @returns string with decrypted message. */ - string read_memo(const memo_data& memo); + string read_memo(const memo_data& memo)const; /** Sign a message using an account's memo key. The signature is generated as in - * in https://github.com/xeroc/python-graphenelib/blob/d9634d74273ebacc92555499eca7c444217ecba0/graphenecommon/message.py#L64 . + * https://github.com/xeroc/python-graphenelib/blob/d9634d74273ebacc92555499eca7c444217ecba0/graphenecommon/message.py#L64 . * * @param signer the name or id of signing account * @param message text to sign * @return the signed message in an abstract format */ - signed_message sign_message(string signer, string message); + signed_message sign_message(string signer, string message)const; /** Verify a message signed with sign_message using the given account's memo key. * @@ -836,21 +843,21 @@ class wallet_api * @return true if signature matches */ bool verify_message( const string& message, const string& account, int32_t block, const string& msg_time, - const fc::ecc::compact_signature& sig ); + const fc::ecc::compact_signature& sig )const; /** Verify a message signed with sign_message * * @param message the signed_message structure containing message, meta data and signature * @return true if signature matches */ - bool verify_signed_message( signed_message message ); + bool verify_signed_message( signed_message message )const; /** Verify a message signed with sign_message, in its encapsulated form. * * @param message the complete encapsulated message string including separators and line feeds * @return true if signature matches */ - bool verify_encapsulated_message( string message ); + bool verify_encapsulated_message( string message )const; /** These methods are used for stealth transfers */ ///@{ @@ -862,7 +869,7 @@ class wallet_api * @param label a user-defined string as label * @return true if the label was set, otherwise false */ - bool set_key_label( public_key_type key, string label ); + bool set_key_label( public_key_type key, string label )const; /** * Get label of a public key. @@ -877,7 +884,7 @@ class wallet_api * @param brain_key the brain key to be used to generate a new blind account * @return the public key of the new account */ - public_key_type create_blind_account( string label, string brain_key ); + public_key_type create_blind_account( string label, string brain_key )const; /** * Return the total balances of all blinded commitments that can be claimed by the @@ -886,7 +893,7 @@ class wallet_api * @return the total balances of all blinded commitments that can be claimed by the * given account key or label */ - vector get_blind_balances( string key_or_label ); + vector get_blind_balances( string key_or_label )const; /** * Get all blind accounts. * @return all blind accounts @@ -910,7 +917,7 @@ class wallet_api * @param key_or_account a public key in Base58 format or an account * @return all blind receipts to/form the account */ - vector blind_history( string key_or_account ); + vector blind_history( string key_or_account )const; /** * Given a confirmation receipt, this method will parse it for a blinded balance and confirm @@ -923,7 +930,7 @@ class wallet_api * @param opt_memo a self-defined label for this transfer to be saved in local wallet file * @return a blind receipt */ - blind_receipt receive_blind_transfer( string confirmation_receipt, string opt_from, string opt_memo ); + blind_receipt receive_blind_transfer( string confirmation_receipt, string opt_from, string opt_memo )const; /** * Transfers a public balance from \c from_account_name_or_id to one or more blinded balances using a @@ -937,7 +944,7 @@ class wallet_api blind_confirmation transfer_to_blind( string from_account_name_or_id, string asset_symbol_or_id, vector> to_amounts, - bool broadcast = false ); + bool broadcast = false )const; /** * Transfers funds from a set of blinded balances to a public account balance. @@ -953,7 +960,7 @@ class wallet_api string to_account_name_or_id, string amount, string asset_symbol_or_id, - bool broadcast = false ); + bool broadcast = false )const; /** * Transfer from one set of blinded balances to another. @@ -968,11 +975,11 @@ class wallet_api string to_key_or_label, string amount, string symbol_or_id, - bool broadcast = false ); + bool broadcast = false )const; /** Place a limit order attempting to sell one asset for another. * - * Buying and selling are the same operation on BitShares; if you want to buy BTS + * Buying and selling are the same operation on BitShares. If you want to buy BTS * with USD, you should sell USD for BTS. * * The blockchain will attempt to sell the \c symbol_or_id_to_sell for as @@ -1004,7 +1011,7 @@ class wallet_api * cancelled and the un-spent funds are returned to the seller's * account * @param fill_or_kill if true, the order will only be included in the blockchain - * if it is filled immediately; if false, an open order will be + * if it is filled immediately. if false, an open order will be * left on the books to fill any amount that cannot be filled * immediately. * @param broadcast true to broadcast the transaction on the network @@ -1017,7 +1024,7 @@ class wallet_api string symbol_or_id_to_receive, uint32_t timeout_sec = 0, bool fill_or_kill = false, - bool broadcast = false); + bool broadcast = false)const; /** Borrow an asset or update the debt/collateral ratio for the loan. * @@ -1034,7 +1041,7 @@ class wallet_api * @returns the signed transaction borrowing the asset */ signed_transaction borrow_asset(string borrower, string amount_to_borrow, string asset_symbol_or_id, - string amount_of_collateral, bool broadcast = false); + string amount_of_collateral, bool broadcast = false)const; /** Borrow an asset or update the debt/collateral ratio for the loan, with additional options. * @@ -1054,7 +1061,7 @@ class wallet_api signed_transaction borrow_asset_ext( string borrower, string amount_to_borrow, string asset_symbol_or_id, string amount_of_collateral, call_order_update_operation::extensions_type extensions, - bool broadcast = false ); + bool broadcast = false )const; /** Cancel an existing order * @@ -1091,7 +1098,7 @@ class wallet_api uint8_t precision, asset_options common, fc::optional bitasset_opts, - bool broadcast = false); + bool broadcast = false)const; /** Create the specified amount of the specified asset and credit into the specified account. * @@ -1105,7 +1112,7 @@ class wallet_api signed_transaction issue_asset(string to_account, string amount, string symbol_or_id, string memo, - bool broadcast = false); + bool broadcast = false)const; /** Update the core options on an asset. * There are a number of options which all assets in the network use. These options are @@ -1126,7 +1133,7 @@ class wallet_api signed_transaction update_asset(string symbol_or_id, optional new_issuer, asset_options new_options, - bool broadcast = false); + bool broadcast = false)const; /** Update the issuer of an asset * Since this call requires the owner authority of the current issuer to sign the transaction, @@ -1141,7 +1148,7 @@ class wallet_api */ signed_transaction update_asset_issuer(string symbol_or_id, string new_issuer, - bool broadcast = false); + bool broadcast = false)const; /** Update the options specific to a BitAsset. * @@ -1158,7 +1165,7 @@ class wallet_api */ signed_transaction update_bitasset(string symbol_or_id, bitasset_options new_options, - bool broadcast = false); + bool broadcast = false)const; /** Update the set of feed-producing accounts for a BitAsset. * @@ -1172,7 +1179,7 @@ class wallet_api */ signed_transaction update_asset_feed_producers(string symbol_or_id, flat_set new_feed_producers, - bool broadcast = false); + bool broadcast = false)const; /** Publishes a price feed for the named asset. * @@ -1197,7 +1204,7 @@ class wallet_api signed_transaction publish_asset_feed(string publishing_account, string symbol_or_id, price_feed feed, - bool broadcast = false); + bool broadcast = false)const; /** Pay into the fee pool for the given asset. * @@ -1216,7 +1223,7 @@ class wallet_api signed_transaction fund_asset_fee_pool(string from, string symbol_or_id, string amount, - bool broadcast = false); + bool broadcast = false)const; /** Claim funds from the fee pool for the given asset. * @@ -1233,7 +1240,7 @@ class wallet_api */ signed_transaction claim_asset_fee_pool(string symbol_or_id, string amount, - bool broadcast = false); + bool broadcast = false)const; /** Burns an amount of given asset to its reserve pool. * @@ -1248,7 +1255,7 @@ class wallet_api signed_transaction reserve_asset(string from, string amount, string symbol_or_id, - bool broadcast = false); + bool broadcast = false)const; /** Forces a global settling of the given asset (black swan or prediction markets). * @@ -1269,7 +1276,7 @@ class wallet_api */ signed_transaction global_settle_asset(string symbol_or_id, price settle_price, - bool broadcast = false); + bool broadcast = false)const; /** Schedules a market-issued asset for automatic settlement. * @@ -1292,7 +1299,7 @@ class wallet_api signed_transaction settle_asset(string account_to_settle, string amount_to_settle, string symbol_or_id, - bool broadcast = false); + bool broadcast = false)const; /** Creates or updates a bid on an MPA after global settlement. * @@ -1311,7 +1318,7 @@ class wallet_api * @returns the signed transaction creating/updating the bid */ signed_transaction bid_collateral(string bidder, string debt_amount, string debt_symbol_or_id, - string additional_collateral, bool broadcast = false); + string additional_collateral, bool broadcast = false)const; /** Whitelist and blacklist accounts, primarily for transacting in whitelisted assets. * @@ -1336,7 +1343,7 @@ class wallet_api signed_transaction whitelist_account(string authorizing_account, string account_to_list, account_whitelist_operation::account_listing new_listing_status, - bool broadcast = false); + bool broadcast = false)const; /** Creates a committee_member object owned by the given account. * @@ -1350,7 +1357,7 @@ class wallet_api */ signed_transaction create_committee_member(string owner_account, string url, - bool broadcast = false); + bool broadcast = false)const; /** Lists all witnesses registered in the blockchain. * This returns a list of all account names that own witnesses, and the associated witness id, @@ -1365,7 +1372,7 @@ class wallet_api * @param limit the maximum number of witnesss to return (max: 1000) * @returns a list of witnesss mapping witness names to witness ids */ - map list_witnesses(const string& lowerbound, uint32_t limit); + map list_witnesses(const string& lowerbound, uint32_t limit)const; /** Lists all committee_members registered in the blockchain. * This returns a list of all account names that own committee_members, and the associated committee_member id, @@ -1380,19 +1387,19 @@ class wallet_api * @param limit the maximum number of committee_members to return (max: 1000) * @returns a list of committee_members mapping committee_member names to committee_member ids */ - map list_committee_members(const string& lowerbound, uint32_t limit); + map list_committee_members(const string& lowerbound, uint32_t limit)const; /** Returns information about the given witness. * @param owner_account the name or id of the witness account owner, or the id of the witness * @returns the information about the witness stored in the block chain */ - witness_object get_witness(string owner_account); + witness_object get_witness(string owner_account)const; /** Returns information about the given committee_member. * @param owner_account the name or id of the committee_member account owner, or the id of the committee_member * @returns the information about the committee_member stored in the block chain */ - committee_member_object get_committee_member(string owner_account); + committee_member_object get_committee_member(string owner_account)const; /** Creates a witness object owned by the given account. * @@ -1406,7 +1413,7 @@ class wallet_api */ signed_transaction create_witness(string owner_account, string url, - bool broadcast = false); + bool broadcast = false)const; /** * Update a witness object owned by the given account. @@ -1421,7 +1428,7 @@ class wallet_api signed_transaction update_witness(string witness_name, string url, string block_signing_key, - bool broadcast = false); + bool broadcast = false)const; /** @@ -1446,7 +1453,7 @@ class wallet_api string url, variant worker_settings, bool broadcast = false - ); + )const; /** * Update your votes for workers @@ -1460,7 +1467,7 @@ class wallet_api string account, worker_vote_delta delta, bool broadcast = false - ); + )const; /** * Create a hashed time lock contract @@ -1511,7 +1518,7 @@ class wallet_api * @param account_name An account name, account ID, or vesting balance object ID. * @return a list of vesting balance objects with additional info */ - vector< vesting_balance_object_with_info > get_vesting_balances( string account_name ); + vector< vesting_balance_object_with_info > get_vesting_balances( string account_name )const; /** * Withdraw a vesting balance. @@ -1526,7 +1533,7 @@ class wallet_api string witness_name, string amount, string asset_symbol_or_id, - bool broadcast = false); + bool broadcast = false)const; /** Vote for a given committee_member. * @@ -1548,7 +1555,7 @@ class wallet_api signed_transaction vote_for_committee_member(string voting_account, string committee_member, bool approve, - bool broadcast = false); + bool broadcast = false)const; /** Vote for a given witness. * @@ -1570,7 +1577,7 @@ class wallet_api signed_transaction vote_for_witness(string voting_account, string witness, bool approve, - bool broadcast = false); + bool broadcast = false)const; /** Set the voting proxy for an account. * @@ -1592,7 +1599,7 @@ class wallet_api */ signed_transaction set_voting_proxy(string account_to_modify, optional voting_account, - bool broadcast = false); + bool broadcast = false)const; /** Set your vote for the number of witnesses and committee_members in the system. * @@ -1618,7 +1625,7 @@ class wallet_api signed_transaction set_desired_witness_and_committee_member_count(string account_to_modify, uint16_t desired_number_of_witnesses, uint16_t desired_number_of_committee_members, - bool broadcast = false); + bool broadcast = false)const; /** Signs a transaction. * @@ -1628,7 +1635,7 @@ class wallet_api * @param broadcast true if you wish to broadcast the transaction * @return the signed version of the transaction */ - signed_transaction sign_transaction(signed_transaction tx, bool broadcast = false); + signed_transaction sign_transaction(signed_transaction tx, bool broadcast = false)const; /** Signs a transaction. * @@ -1642,7 +1649,7 @@ class wallet_api */ signed_transaction sign_transaction2(signed_transaction tx, const vector& signing_keys = vector(), - bool broadcast = true); + bool broadcast = true)const; /** Get transaction signers. @@ -1664,7 +1671,7 @@ class wallet_api /** Returns an uninitialized object representing a given blockchain operation. * - * This returns a default-initialized object of the given type; it can be used + * This returns a default-initialized object of the given type. It can be used * during early development of the wallet when we don't yet have custom commands for * creating all of the operations the blockchain supports. * @@ -1678,7 +1685,7 @@ class wallet_api * (e.g., "global_parameters_update_operation") * @return a default-constructed operation of the given type */ - operation get_prototype_operation(string operation_type); + operation get_prototype_operation(string operation_type)const; /** Creates a transaction to propose a parameter change. * @@ -1687,7 +1694,7 @@ class wallet_api * * @param proposing_account The account paying the fee to propose the tx * @param expiration_time Timestamp specifying when the proposal will either take effect or expire. - * @param changed_values The values to change; all other chain parameters are filled in with default values + * @param changed_values The values to change. All other chain parameters are filled in with default values * @param broadcast true if you wish to broadcast the transaction * @return the signed version of the transaction */ @@ -1695,7 +1702,7 @@ class wallet_api const string& proposing_account, fc::time_point_sec expiration_time, const variant_object& changed_values, - bool broadcast = false); + bool broadcast = false)const; /** Propose a fee change. * @@ -1710,7 +1717,7 @@ class wallet_api const string& proposing_account, fc::time_point_sec expiration_time, const variant_object& changed_values, - bool broadcast = false); + bool broadcast = false)const; /** Approve or disapprove a proposal. * @@ -1725,7 +1732,7 @@ class wallet_api const string& proposal_id, const approval_delta& delta, bool broadcast /* = false */ - ); + )const; /** * Returns the order book for the market base:quote. @@ -1734,7 +1741,7 @@ class wallet_api * @param limit depth of the order book to retrieve, for bids and asks each, capped at 50 * @return Order book of the market */ - order_book get_order_book( const string& base, const string& quote, unsigned limit = 50); + order_book get_order_book( const string& base, const string& quote, unsigned limit = 50)const; /** Signs a transaction. * @@ -1748,19 +1755,19 @@ class wallet_api * @return the signed transaction */ signed_transaction add_transaction_signature( signed_transaction tx, - bool broadcast = false ); + bool broadcast = false )const; - void dbg_make_uia(string creator, string symbol); - void dbg_make_mia(string creator, string symbol); - void dbg_push_blocks( std::string src_filename, uint32_t count ); - void dbg_generate_blocks( std::string debug_wif_key, uint32_t count ); - void dbg_stream_json_objects( const std::string& filename ); - void dbg_update_object( fc::variant_object update ); + void dbg_make_uia(string creator, string symbol)const; + void dbg_make_mia(string creator, string symbol)const; + void dbg_push_blocks( std::string src_filename, uint32_t count )const; + void dbg_generate_blocks( std::string debug_wif_key, uint32_t count )const; + void dbg_stream_json_objects( const std::string& filename )const; + void dbg_update_object( fc::variant_object update )const; - void flood_network(string prefix, uint32_t number_of_transactions); + void flood_network(string prefix, uint32_t number_of_transactions)const; - void network_add_nodes( const vector& nodes ); - vector< variant > network_get_connected_peers(); + void network_add_nodes( const vector& nodes )const; + vector< variant > network_get_connected_peers()const; /** * Used to transfer from one set of blinded balances to another @@ -1770,14 +1777,12 @@ class wallet_api string amount, string symbol, bool broadcast = false, - bool to_temp = false ); + bool to_temp = false )const; std::map> get_result_formatters() const; - fc::signal lock_changed; - std::shared_ptr my; - void encrypt_keys(); + void encrypt_keys()const; /** * Manage account storage map(key->value) by using the custom operations plugin. @@ -1794,7 +1799,7 @@ class wallet_api * @return The signed transaction */ signed_transaction account_store_map(string account, string catalog, bool remove, - flat_map> key_values, bool broadcast); + flat_map> key_values, bool broadcast)const; /** * Get \c account_storage_object of an account by using the custom operations plugin. @@ -1806,7 +1811,7 @@ class wallet_api * * @return An \c account_storage_object or empty. */ - vector get_account_storage(string account, string catalog); + vector get_account_storage(string account, string catalog)const; }; diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 0067bae1be..f3fe362b05 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -155,16 +155,14 @@ wallet_api::wallet_api(const wallet_data& initial_data, fc::api rapi) { } -wallet_api::~wallet_api() -{ -} +wallet_api::~wallet_api() = default; -bool wallet_api::copy_wallet_file(string destination_filename) +bool wallet_api::copy_wallet_file(string destination_filename)const { return my->copy_wallet_file(destination_filename); } -optional wallet_api::get_block(uint32_t num) +optional wallet_api::get_block(uint32_t num)const { return my->_remote_db->get_block(num); } @@ -174,17 +172,17 @@ uint64_t wallet_api::get_account_count() const return my->_remote_db->get_account_count(); } -vector wallet_api::list_my_accounts() +vector wallet_api::list_my_accounts()const { return vector(my->_wallet.my_accounts.begin(), my->_wallet.my_accounts.end()); } -map wallet_api::list_accounts(const string& lowerbound, uint32_t limit) +map wallet_api::list_accounts(const string& lowerbound, uint32_t limit)const { return my->_remote_db->lookup_accounts(lowerbound, limit, {}); } -vector wallet_api::list_account_balances(const string& id) +vector wallet_api::list_account_balances(const string& id)const { return my->_remote_db->get_account_balances(id, flat_set()); } @@ -379,7 +377,7 @@ account_history_operation_detail wallet_api::get_account_history_by_operations( const string& name, const flat_set& operation_types, uint32_t start, - uint32_t limit) + uint32_t limit)const { account_history_operation_detail result; @@ -422,7 +420,7 @@ account_history_operation_detail wallet_api::get_account_history_by_operations( return result; } -full_account wallet_api::get_full_account( const string& name_or_id) +full_account wallet_api::get_full_account( const string& name_or_id)const { return my->_remote_db->get_full_accounts({name_or_id}, false)[name_or_id]; } @@ -443,7 +441,7 @@ vector wallet_api::get_account_limit_orders( const string "e, uint32_t limit, optional ostart_id, - optional ostart_price) + optional ostart_price)const { return my->_remote_db->get_account_limit_orders(name_or_id, base, quote, limit, ostart_id, ostart_price); } @@ -502,14 +500,14 @@ string wallet_api::get_wallet_filename() const return my->get_wallet_filename(); } -transaction_handle_type wallet_api::begin_builder_transaction() +transaction_handle_type wallet_api::begin_builder_transaction()const { return my->begin_builder_transaction(); } void wallet_api::add_operation_to_builder_transaction( transaction_handle_type transaction_handle, - const operation& op) + const operation& op)const { my->add_operation_to_builder_transaction(transaction_handle, op); } @@ -517,34 +515,35 @@ void wallet_api::add_operation_to_builder_transaction( void wallet_api::replace_operation_in_builder_transaction( transaction_handle_type handle, unsigned operation_index, - const operation& new_op) + const operation& new_op)const { my->replace_operation_in_builder_transaction(handle, operation_index, new_op); } -asset wallet_api::set_fees_on_builder_transaction(transaction_handle_type handle, string fee_asset) +asset wallet_api::set_fees_on_builder_transaction(transaction_handle_type handle, string fee_asset)const { return my->set_fees_on_builder_transaction(handle, fee_asset); } -transaction wallet_api::preview_builder_transaction(transaction_handle_type handle) +transaction wallet_api::preview_builder_transaction(transaction_handle_type handle)const { return my->preview_builder_transaction(handle); } -signed_transaction wallet_api::sign_builder_transaction(transaction_handle_type transaction_handle, bool broadcast) +signed_transaction wallet_api::sign_builder_transaction(transaction_handle_type transaction_handle, + bool broadcast)const { return my->sign_builder_transaction(transaction_handle, broadcast); } signed_transaction wallet_api::sign_builder_transaction2(transaction_handle_type transaction_handle, const vector& explicit_keys, - bool broadcast) + bool broadcast)const { return my->sign_builder_transaction2(transaction_handle, explicit_keys, broadcast); } -pair wallet_api::broadcast_transaction(signed_transaction tx) +pair wallet_api::broadcast_transaction(signed_transaction tx)const { return my->broadcast_transaction(tx); } @@ -553,7 +552,7 @@ signed_transaction wallet_api::propose_builder_transaction( transaction_handle_type handle, time_point_sec expiration, uint32_t review_period_seconds, - bool broadcast) + bool broadcast)const { return my->propose_builder_transaction(handle, expiration, review_period_seconds, broadcast); } @@ -563,12 +562,12 @@ signed_transaction wallet_api::propose_builder_transaction2( string account_name_or_id, time_point_sec expiration, uint32_t review_period_seconds, - bool broadcast) + bool broadcast)const { return my->propose_builder_transaction2(handle, account_name_or_id, expiration, review_period_seconds, broadcast); } -void wallet_api::remove_builder_transaction(transaction_handle_type handle) +void wallet_api::remove_builder_transaction(transaction_handle_type handle)const { return my->remove_builder_transaction(handle); } @@ -602,7 +601,7 @@ asset_id_type wallet_api::get_asset_id(const string& asset_symbol_or_id) const return my->get_asset_id(asset_symbol_or_id); } -bool wallet_api::import_key(string account_name_or_id, string wif_key) +bool wallet_api::import_key(string account_name_or_id, string wif_key)const { FC_ASSERT(!is_locked()); // backup wallet @@ -621,7 +620,7 @@ bool wallet_api::import_key(string account_name_or_id, string wif_key) return false; } -map wallet_api::import_accounts( string filename, string password ) +map wallet_api::import_accounts( string filename, string password )const { FC_ASSERT( !is_locked() ); FC_ASSERT( fc::exists( filename ) ); @@ -697,7 +696,7 @@ bool wallet_api::import_account_keys( string filename, string password, string src_account_name, - string dest_account_name ) + string dest_account_name )const { FC_ASSERT( !is_locked() ); FC_ASSERT( fc::exists( filename ) ); @@ -749,7 +748,7 @@ string wallet_api::normalize_brain_key(string s) const return detail::normalize_brain_key( s ); } -variant wallet_api::info() +variant wallet_api::info()const { return my->info(); } @@ -770,14 +769,14 @@ signed_transaction wallet_api::register_account(string name, string registrar_account, string referrer_account, uint32_t referrer_percent, - bool broadcast) + bool broadcast)const { return my->register_account( name, owner_pubkey, active_pubkey, registrar_account, referrer_account, referrer_percent, broadcast ); } signed_transaction wallet_api::create_account_with_brain_key(string brain_key, string account_name, string registrar_account, string referrer_account, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->create_account_with_brain_key( brain_key, account_name, registrar_account, @@ -785,13 +784,13 @@ signed_transaction wallet_api::create_account_with_brain_key(string brain_key, s ); } signed_transaction wallet_api::issue_asset(string to_account, string amount, string symbol, - string memo, bool broadcast) + string memo, bool broadcast)const { return my->issue_asset(to_account, amount, symbol, memo, broadcast); } signed_transaction wallet_api::transfer(string from, string to, string amount, - string asset_symbol, string memo, bool broadcast /* = false */) + string asset_symbol, string memo, bool broadcast /* = false */)const { return my->transfer(from, to, amount, asset_symbol, memo, broadcast); } @@ -800,8 +799,7 @@ signed_transaction wallet_api::create_asset(string issuer, uint8_t precision, asset_options common, fc::optional bitasset_opts, - bool broadcast) - + bool broadcast)const { return my->create_asset(issuer, symbol, precision, common, bitasset_opts, broadcast); } @@ -809,28 +807,28 @@ signed_transaction wallet_api::create_asset(string issuer, signed_transaction wallet_api::update_asset(string symbol, optional new_issuer, asset_options new_options, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->update_asset(symbol, new_issuer, new_options, broadcast); } signed_transaction wallet_api::update_asset_issuer(string symbol, string new_issuer, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->update_asset_issuer(symbol, new_issuer, broadcast); } signed_transaction wallet_api::update_bitasset(string symbol, bitasset_options new_options, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->update_bitasset(symbol, new_options, broadcast); } signed_transaction wallet_api::update_asset_feed_producers(string symbol, flat_set new_feed_producers, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->update_asset_feed_producers(symbol, new_feed_producers, broadcast); } @@ -838,7 +836,7 @@ signed_transaction wallet_api::update_asset_feed_producers(string symbol, signed_transaction wallet_api::publish_asset_feed(string publishing_account, string symbol, price_feed feed, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->publish_asset_feed(publishing_account, symbol, feed, broadcast); } @@ -846,14 +844,14 @@ signed_transaction wallet_api::publish_asset_feed(string publishing_account, signed_transaction wallet_api::fund_asset_fee_pool(string from, string symbol, string amount, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->fund_asset_fee_pool(from, symbol, amount, broadcast); } signed_transaction wallet_api::claim_asset_fee_pool(string symbol, string amount, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->claim_asset_fee_pool(symbol, amount, broadcast); } @@ -861,14 +859,14 @@ signed_transaction wallet_api::claim_asset_fee_pool(string symbol, signed_transaction wallet_api::reserve_asset(string from, string amount, string symbol, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->reserve_asset(from, amount, symbol, broadcast); } signed_transaction wallet_api::global_settle_asset(string symbol, price settle_price, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->global_settle_asset(symbol, settle_price, broadcast); } @@ -876,7 +874,7 @@ signed_transaction wallet_api::global_settle_asset(string symbol, signed_transaction wallet_api::settle_asset(string account_to_settle, string amount_to_settle, string symbol, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->settle_asset(account_to_settle, amount_to_settle, symbol, broadcast); } @@ -884,7 +882,7 @@ signed_transaction wallet_api::settle_asset(string account_to_settle, signed_transaction wallet_api::bid_collateral(string bidder_name, string debt_amount, string debt_symbol, string additional_collateral, - bool broadcast ) + bool broadcast )const { return my->bid_collateral(bidder_name, debt_amount, debt_symbol, additional_collateral, broadcast); } @@ -892,40 +890,40 @@ signed_transaction wallet_api::bid_collateral(string bidder_name, signed_transaction wallet_api::whitelist_account(string authorizing_account, string account_to_list, account_whitelist_operation::account_listing new_listing_status, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->whitelist_account(authorizing_account, account_to_list, new_listing_status, broadcast); } signed_transaction wallet_api::create_committee_member(string owner_account, string url, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->create_committee_member(owner_account, url, broadcast); } -map wallet_api::list_witnesses(const string& lowerbound, uint32_t limit) +map wallet_api::list_witnesses(const string& lowerbound, uint32_t limit)const { return my->_remote_db->lookup_witness_accounts(lowerbound, limit); } -map wallet_api::list_committee_members(const string& lowerbound, uint32_t limit) +map wallet_api::list_committee_members(const string& lowerbound, uint32_t limit)const { return my->_remote_db->lookup_committee_member_accounts(lowerbound, limit); } -witness_object wallet_api::get_witness(string owner_account) +witness_object wallet_api::get_witness(string owner_account)const { return my->get_witness(owner_account); } -committee_member_object wallet_api::get_committee_member(string owner_account) +committee_member_object wallet_api::get_committee_member(string owner_account)const { return my->get_committee_member(owner_account); } signed_transaction wallet_api::create_witness(string owner_account, string url, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->create_witness(owner_account, url, broadcast); } @@ -938,7 +936,7 @@ signed_transaction wallet_api::create_worker( string name, string url, variant worker_settings, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->create_worker( owner_account, work_begin_date, work_end_date, daily_pay, name, url, worker_settings, broadcast ); @@ -947,7 +945,7 @@ signed_transaction wallet_api::create_worker( signed_transaction wallet_api::update_worker_votes( string owner_account, worker_vote_delta delta, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->update_worker_votes( owner_account, delta, broadcast ); } @@ -956,12 +954,12 @@ signed_transaction wallet_api::update_witness( string witness_name, string url, string block_signing_key, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->update_witness(witness_name, url, block_signing_key, broadcast); } -vector< vesting_balance_object_with_info > wallet_api::get_vesting_balances( string account_name ) +vector< vesting_balance_object_with_info > wallet_api::get_vesting_balances( string account_name )const { return my->get_vesting_balances( account_name ); } @@ -970,7 +968,7 @@ signed_transaction wallet_api::withdraw_vesting( string witness_name, string amount, string asset_symbol, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->withdraw_vesting( witness_name, amount, asset_symbol, broadcast ); } @@ -978,7 +976,7 @@ signed_transaction wallet_api::withdraw_vesting( signed_transaction wallet_api::vote_for_committee_member(string voting_account, string witness, bool approve, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->vote_for_committee_member(voting_account, witness, approve, broadcast); } @@ -986,14 +984,14 @@ signed_transaction wallet_api::vote_for_committee_member(string voting_account, signed_transaction wallet_api::vote_for_witness(string voting_account, string witness, bool approve, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->vote_for_witness(voting_account, witness, approve, broadcast); } signed_transaction wallet_api::set_voting_proxy(string account_to_modify, optional voting_account, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->set_voting_proxy(account_to_modify, voting_account, broadcast); } @@ -1001,24 +999,24 @@ signed_transaction wallet_api::set_voting_proxy(string account_to_modify, signed_transaction wallet_api::set_desired_witness_and_committee_member_count(string account_to_modify, uint16_t desired_number_of_witnesses, uint16_t desired_number_of_committee_members, - bool broadcast /* = false */) + bool broadcast /* = false */)const { return my->set_desired_witness_and_committee_member_count(account_to_modify, desired_number_of_witnesses, desired_number_of_committee_members, broadcast); } -void wallet_api::set_wallet_filename(string wallet_filename) +void wallet_api::set_wallet_filename(string wallet_filename)const { my->_wallet_filename = wallet_filename; } -signed_transaction wallet_api::sign_transaction(signed_transaction tx, bool broadcast /* = false */) +signed_transaction wallet_api::sign_transaction(signed_transaction tx, bool broadcast /* = false */)const { try { return my->sign_transaction( tx, broadcast); } FC_CAPTURE_AND_RETHROW( (tx) ) } signed_transaction wallet_api::sign_transaction2(signed_transaction tx, const vector& signing_keys, - bool broadcast /* = false */) + bool broadcast /* = false */)const { try { return my->sign_transaction2( tx, signing_keys, broadcast); } FC_CAPTURE_AND_RETHROW( (tx) ) } @@ -1033,54 +1031,54 @@ vector> wallet_api::get_key_references(const vectorget_key_references(keys); } FC_CAPTURE_AND_RETHROW( (keys) ) } -operation wallet_api::get_prototype_operation(string operation_name) +operation wallet_api::get_prototype_operation(string operation_name)const { return my->get_prototype_operation( operation_name ); } -void wallet_api::dbg_make_uia(string creator, string symbol) +void wallet_api::dbg_make_uia(string creator, string symbol)const { FC_ASSERT(!is_locked()); my->dbg_make_uia(creator, symbol); } -void wallet_api::dbg_make_mia(string creator, string symbol) +void wallet_api::dbg_make_mia(string creator, string symbol)const { FC_ASSERT(!is_locked()); my->dbg_make_mia(creator, symbol); } -void wallet_api::dbg_push_blocks( std::string src_filename, uint32_t count ) +void wallet_api::dbg_push_blocks( std::string src_filename, uint32_t count )const { my->dbg_push_blocks( src_filename, count ); } -void wallet_api::dbg_generate_blocks( std::string debug_wif_key, uint32_t count ) +void wallet_api::dbg_generate_blocks( std::string debug_wif_key, uint32_t count )const { my->dbg_generate_blocks( debug_wif_key, count ); } -void wallet_api::dbg_stream_json_objects( const std::string& filename ) +void wallet_api::dbg_stream_json_objects( const std::string& filename )const { my->dbg_stream_json_objects( filename ); } -void wallet_api::dbg_update_object( fc::variant_object update ) +void wallet_api::dbg_update_object( fc::variant_object update )const { my->dbg_update_object( update ); } -void wallet_api::network_add_nodes( const vector& nodes ) +void wallet_api::network_add_nodes( const vector& nodes )const { my->network_add_nodes( nodes ); } -vector< variant > wallet_api::network_get_connected_peers() +vector< variant > wallet_api::network_get_connected_peers()const { return my->network_get_connected_peers(); } -void wallet_api::flood_network(string prefix, uint32_t number_of_transactions) +void wallet_api::flood_network(string prefix, uint32_t number_of_transactions)const { FC_ASSERT(!is_locked()); my->flood_network(prefix, number_of_transactions); @@ -1091,7 +1089,7 @@ signed_transaction wallet_api::propose_parameter_change( fc::time_point_sec expiration_time, const variant_object& changed_values, bool broadcast /* = false */ - ) + )const { return my->propose_parameter_change( proposing_account, expiration_time, changed_values, broadcast ); } @@ -1101,7 +1099,7 @@ signed_transaction wallet_api::propose_fee_change( fc::time_point_sec expiration_time, const variant_object& changed_fees, bool broadcast /* = false */ - ) + )const { return my->propose_fee_change( proposing_account, expiration_time, changed_fees, broadcast ); } @@ -1111,7 +1109,7 @@ signed_transaction wallet_api::approve_proposal( const string& proposal_id, const approval_delta& delta, bool broadcast /* = false */ - ) + )const { return my->approve_proposal( fee_paying_account, proposal_id, delta, broadcast ); } @@ -1127,7 +1125,7 @@ dynamic_global_property_object wallet_api::get_dynamic_global_properties() const } signed_transaction wallet_api::add_transaction_signature( signed_transaction tx, - bool broadcast ) + bool broadcast )const { return my->add_transaction_signature( tx, broadcast ); } @@ -1213,17 +1211,17 @@ string wallet_api::gethelp(const string& method)const return ss.str(); } -bool wallet_api::load_wallet_file( string wallet_filename ) +bool wallet_api::load_wallet_file( string wallet_filename )const { return my->load_wallet_file( wallet_filename ); } -void wallet_api::quit() +void wallet_api::quit()const { my->quit(); } -void wallet_api::save_wallet_file( string wallet_filename ) +void wallet_api::save_wallet_file( string wallet_filename )const { my->save_wallet_file( wallet_filename ); } @@ -1243,12 +1241,12 @@ bool wallet_api::is_new()const return my->_wallet.cipher_keys.size() == 0; } -void wallet_api::encrypt_keys() +void wallet_api::encrypt_keys()const { my->encrypt_keys(); } -void wallet_api::lock() +void wallet_api::lock()const { try { FC_ASSERT( !is_locked() ); encrypt_keys(); @@ -1259,7 +1257,7 @@ void wallet_api::lock() my->self.lock_changed(true); } FC_CAPTURE_AND_RETHROW() } -void wallet_api::unlock(string password) +void wallet_api::unlock(string password)const { try { FC_ASSERT(password.size() > 0); auto pw = fc::sha512::hash(password.c_str(), password.size()); @@ -1271,7 +1269,7 @@ void wallet_api::unlock(string password) my->self.lock_changed(false); } FC_CAPTURE_AND_RETHROW() } -void wallet_api::set_password( string password ) +void wallet_api::set_password( string password )const { if( !is_new() ) FC_ASSERT( !is_locked(), "The wallet must be unlocked before the password can be set" ); @@ -1282,18 +1280,18 @@ void wallet_api::set_password( string password ) vector< signed_transaction > wallet_api::import_balance( string name_or_id, const vector& wif_keys, - bool broadcast ) + bool broadcast )const { return my->import_balance( name_or_id, wif_keys, broadcast ); } -map wallet_api::dump_private_keys() +map wallet_api::dump_private_keys()const { FC_ASSERT(!is_locked()); return my->_keys; } -signed_transaction wallet_api::upgrade_account( string name, bool broadcast ) +signed_transaction wallet_api::upgrade_account( string name, bool broadcast )const { return my->upgrade_account(name,broadcast); } @@ -1305,14 +1303,14 @@ signed_transaction wallet_api::sell_asset(string seller_account, string symbol_to_receive, uint32_t expiration, bool fill_or_kill, - bool broadcast) + bool broadcast)const { return my->sell_asset(seller_account, amount_to_sell, symbol_to_sell, min_to_receive, symbol_to_receive, expiration, fill_or_kill, broadcast); } signed_transaction wallet_api::borrow_asset(string seller_name, string amount_to_sell, - string asset_symbol, string amount_of_collateral, bool broadcast) + string asset_symbol, string amount_of_collateral, bool broadcast)const { FC_ASSERT(!is_locked()); return my->borrow_asset(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, broadcast); @@ -1321,7 +1319,7 @@ signed_transaction wallet_api::borrow_asset(string seller_name, string amount_to signed_transaction wallet_api::borrow_asset_ext( string seller_name, string amount_to_sell, string asset_symbol, string amount_of_collateral, call_order_update_operation::extensions_type extensions, - bool broadcast) + bool broadcast)const { FC_ASSERT(!is_locked()); return my->borrow_asset_ext(seller_name, amount_to_sell, asset_symbol, @@ -1334,26 +1332,26 @@ signed_transaction wallet_api::cancel_order(const limit_order_id_type& order_id, return my->cancel_order(order_id, broadcast); } -memo_data wallet_api::sign_memo(string from, string to, string memo) +memo_data wallet_api::sign_memo(string from, string to, string memo)const { FC_ASSERT(!is_locked()); return my->sign_memo(from, to, memo); } -string wallet_api::read_memo(const memo_data& memo) +string wallet_api::read_memo(const memo_data& memo)const { FC_ASSERT(!is_locked()); return my->read_memo(memo); } -signed_message wallet_api::sign_message(string signer, string message) +signed_message wallet_api::sign_message(string signer, string message)const { FC_ASSERT(!is_locked()); return my->sign_message(signer, message); } bool wallet_api::verify_message( const string& message, const string& account, int32_t block, const string& msg_time, - const fc::ecc::compact_signature& sig ) + const fc::ecc::compact_signature& sig )const { return my->verify_message( message, account, block, msg_time, sig ); } @@ -1363,7 +1361,7 @@ bool wallet_api::verify_message( const string& message, const string& account, i * @param message the signed_message structure containing message, meta data and signature * @return true if signature matches */ -bool wallet_api::verify_signed_message( signed_message message ) +bool wallet_api::verify_signed_message( signed_message message )const { return my->verify_signed_message( message ); } @@ -1373,7 +1371,7 @@ bool wallet_api::verify_signed_message( signed_message message ) * @param message the complete encapsulated message string including separators and line feeds * @return true if signature matches */ -bool wallet_api::verify_encapsulated_message( string message ) +bool wallet_api::verify_encapsulated_message( string message )const { return my->verify_encapsulated_message( message ); } @@ -1402,7 +1400,7 @@ public_key_type wallet_api::get_public_key( string label )const return public_key_type(); } -bool wallet_api::set_key_label( public_key_type key, string label ) +bool wallet_api::set_key_label( public_key_type key, string label )const { auto result = my->_wallet.labeled_keys.insert( key_label{label,key} ); if( result.second ) return true; @@ -1435,7 +1433,7 @@ map wallet_api::get_my_blind_accounts()const return result; } -public_key_type wallet_api::create_blind_account( string label, string brain_key ) +public_key_type wallet_api::create_blind_account( string label, string brain_key )const { FC_ASSERT( !is_locked() ); @@ -1455,7 +1453,7 @@ public_key_type wallet_api::create_blind_account( string label, string brain_ return pub_key; } -vector wallet_api::get_blind_balances( string key_or_label ) +vector wallet_api::get_blind_balances( string key_or_label )const { vector result; map balances; @@ -1492,7 +1490,7 @@ blind_confirmation wallet_api::transfer_from_blind( string from_blind_account_ke string to_account_id_or_name, string amount_in, string symbol, - bool broadcast ) + bool broadcast )const { try { transfer_from_blind_operation from_blind; @@ -1553,7 +1551,7 @@ blind_confirmation wallet_api::blind_transfer( string from_key_or_label, string to_key_or_label, string amount_in, string symbol, - bool broadcast ) + bool broadcast )const { return blind_transfer_help( from_key_or_label, to_key_or_label, amount_in, symbol, broadcast, false ); } @@ -1562,7 +1560,7 @@ blind_confirmation wallet_api::blind_transfer_help( string from_key_or_label, string amount_in, string symbol, bool broadcast, - bool to_temp ) + bool to_temp )const { blind_confirmation confirm; try { @@ -1739,7 +1737,7 @@ blind_confirmation wallet_api::transfer_to_blind( string from_account_id_or_name string asset_symbol, /* map from key or label to amount */ vector> to_amounts, - bool broadcast ) + bool broadcast )const { try { FC_ASSERT( !is_locked() ); idump((to_amounts)); @@ -1822,7 +1820,7 @@ blind_confirmation wallet_api::transfer_to_blind( string from_account_id_or_name return confirm; } FC_CAPTURE_AND_RETHROW( (from_account_id_or_name)(asset_symbol)(to_amounts) ) } -blind_receipt wallet_api::receive_blind_transfer( string confirmation_receipt, string opt_from, string opt_memo ) +blind_receipt wallet_api::receive_blind_transfer( string confirmation_receipt, string opt_from, string opt_memo )const { FC_ASSERT( !is_locked() ); stealth_confirmation conf(confirmation_receipt); @@ -1899,7 +1897,7 @@ blind_receipt wallet_api::receive_blind_transfer( string confirmation_receipt, s return result; } -vector wallet_api::blind_history( string key_or_account ) +vector wallet_api::blind_history( string key_or_account )const { vector result; auto pub_key = get_public_key( key_or_account ); @@ -1917,19 +1915,19 @@ vector wallet_api::blind_history( string key_or_account ) return result; } -order_book wallet_api::get_order_book( const string& base, const string& quote, unsigned limit ) +order_book wallet_api::get_order_book( const string& base, const string& quote, unsigned limit )const { return( my->_remote_db->get_order_book( base, quote, limit ) ); } // custom operations signed_transaction wallet_api::account_store_map(string account, string catalog, bool remove, - flat_map> key_values, bool broadcast) + flat_map> key_values, bool broadcast)const { return my->account_store_map(account, catalog, remove, key_values, broadcast); } -vector wallet_api::get_account_storage(string account, string catalog) +vector wallet_api::get_account_storage(string account, string catalog)const { try { return my->_custom_operations->get_storage_info(account, catalog); } FC_CAPTURE_AND_RETHROW( (account)(catalog) ) } From 42c6afca019280fac35bbc1d03f22c96eef0ac32 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 25 Oct 2022 16:48:33 +0000 Subject: [PATCH 304/338] Add comparator "std::less<>" to "map" --- libraries/app/database_api.cpp | 10 +++++----- libraries/app/database_api_impl.hxx | 4 ++-- .../app/include/graphene/app/database_api.hpp | 20 +++++++++---------- .../wallet/include/graphene/wallet/wallet.hpp | 2 +- libraries/wallet/wallet.cpp | 2 +- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 3e1bcfa1ca..a0dddc52b0 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -774,16 +774,16 @@ vector> database_api_impl::lookup_account_names(const v return get_accounts( account_names, false ); } -map database_api::lookup_accounts( const string& lower_bound_name, +map> database_api::lookup_accounts( const string& lower_bound_name, uint32_t limit, - optional subscribe )const + const optional& subscribe )const { return my->lookup_accounts( lower_bound_name, limit, subscribe ); } -map database_api_impl::lookup_accounts( const string& lower_bound_name, +map> database_api_impl::lookup_accounts( const string& lower_bound_name, uint32_t limit, - optional subscribe )const + const optional& subscribe )const { FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_lookup_accounts; @@ -792,7 +792,7 @@ map database_api_impl::lookup_accounts( const string& lo ("configured_limit", configured_limit) ); const auto& accounts_by_name = _db.get_index_type().indices().get(); - map result; + map> result; if( limit == 0 ) // shortcut to save a database query return result; diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 04f9f92a24..240793f53b 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -77,9 +77,9 @@ class database_api_impl : public std::enable_shared_from_this optional get_account_by_name( string name )const; vector get_account_references( const std::string account_id_or_name )const; vector> lookup_account_names(const vector& account_names)const; - map lookup_accounts( const string& lower_bound_name, + map> lookup_accounts( const string& lower_bound_name, uint32_t limit, - optional subscribe )const; + const optional& subscribe )const; uint64_t get_account_count()const; // Balances diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 752fefc2b4..73e858878b 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -80,7 +80,7 @@ class database_api /** * @brief Get the objects corresponding to the provided IDs * @param ids IDs of the objects to retrieve - * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return The objects retrieved, in the order they are mentioned in ids @@ -277,7 +277,7 @@ class database_api /** * @brief Get a list of accounts by names or IDs * @param account_names_or_ids names or IDs of the accounts to retrieve - * @param subscribe @a true to subscribe to the queried account objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried account objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return The accounts corresponding to the provided names or IDs @@ -292,7 +292,7 @@ class database_api * @param names_or_ids Each item must be the name or ID of an account to retrieve, * the quantity should not be greater than the configured value of * @a api_limit_get_full_accounts - * @param subscribe @a true to subscribe to the queried full account objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried full account objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return Map of string from @p names_or_ids to the corresponding account @@ -345,7 +345,7 @@ class database_api * @param lower_bound_name Lower bound of the first name to return * @param limit Maximum number of results to return, must not exceed the configured value of * @a api_limit_lookup_accounts - * @param subscribe @a true to subscribe to the queried account objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried account objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return Map of account names to corresponding IDs @@ -353,9 +353,9 @@ class database_api * @note In addition to the common auto-subscription rules, * this API will subscribe to the returned account only if @p limit is 1. */ - map lookup_accounts( const string& lower_bound_name, + map> lookup_accounts( const string& lower_bound_name, uint32_t limit, - optional subscribe = optional() )const; + const optional& subscribe = optional() )const; ////////////// // Balances // @@ -414,7 +414,7 @@ class database_api /** * @brief Get a list of assets by symbol names or IDs * @param asset_symbols_or_ids symbol names or IDs of the assets to retrieve - * @param subscribe @a true to subscribe to the queried asset objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried asset objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return The assets corresponding to the provided symbol names or IDs @@ -802,7 +802,7 @@ class database_api * @param ids IDs of the liquidity pools, * the quantity should not be greater than the configured value of * @a api_limit_get_liquidity_pools - * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @param with_statistics Whether to return statistics @@ -821,7 +821,7 @@ class database_api * @param asset_symbols_or_ids symbol names or IDs of the share assets, * the quantity should not be greater than the configured value of * @a api_limit_get_liquidity_pools - * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @param with_statistics Whether to return statistics @@ -1370,7 +1370,7 @@ class database_api /** * @brief Get HTLC object * @param id HTLC contract id - * @param subscribe @a true to subscribe to the queried HTLC objects; @a false to not subscribe; + * @param subscribe @a true to subscribe to the queried HTLC objects, @a false to not subscribe, * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) * @return HTLC object for the id diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index 657d06d9b4..c7756e9c52 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -98,7 +98,7 @@ class wallet_api * @param limit the maximum number of accounts to return (max: 1000) * @returns a list of accounts mapping account names to account ids */ - map list_accounts(const string& lowerbound, uint32_t limit)const; + map> list_accounts(const string& lowerbound, uint32_t limit)const; /** List the balances of an account. * Each account can have multiple balances, one for each type of asset owned by that * account. The returned list will only contain assets for which the account has a diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index f3fe362b05..940e04d328 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -177,7 +177,7 @@ vector wallet_api::list_my_accounts()const return vector(my->_wallet.my_accounts.begin(), my->_wallet.my_accounts.end()); } -map wallet_api::list_accounts(const string& lowerbound, uint32_t limit)const +map> wallet_api::list_accounts(const string& lowerbound, uint32_t limit)const { return my->_remote_db->lookup_accounts(lowerbound, limit, {}); } From 7772d56d91ebaf33ea24aa531547ee3599fc7f09 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 26 Oct 2022 16:00:47 +0000 Subject: [PATCH 305/338] Bump FC for std::map serialization update --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index ad7b28bca8..41f31f8c60 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit ad7b28bca8eebe68a594ed06ce1993975101e408 +Subproject commit 41f31f8c60561ef8f885f326abc21210cdd7db40 From 0fd16c863e98ae238b3db5ea8e0e71a2c69fdb01 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 26 Oct 2022 18:08:36 +0000 Subject: [PATCH 306/338] Simplify help info in cli_wallet --- libraries/wallet/generate_api_documentation.pl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libraries/wallet/generate_api_documentation.pl b/libraries/wallet/generate_api_documentation.pl index a3f333db82..a515582396 100755 --- a/libraries/wallet/generate_api_documentation.pl +++ b/libraries/wallet/generate_api_documentation.pl @@ -91,6 +91,10 @@ END sub cleanupDoxygenType { my($type) = @_; + $type =~ s/std:://g; + $type =~ s/, less<>//g; + $type =~ s/const //g; + $type =~ s/ &//g; $type =~ s/< //>/g; return $type; From d41793670b301e375987d7a3613f3ecdbfa3aea2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 26 Oct 2022 19:13:41 +0000 Subject: [PATCH 307/338] Replace typedef with using --- .../wallet/include/graphene/wallet/api_documentation.hpp | 4 ++-- libraries/wallet/include/graphene/wallet/wallet_structs.hpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/wallet/include/graphene/wallet/api_documentation.hpp b/libraries/wallet/include/graphene/wallet/api_documentation.hpp index ab303ccaa1..42dbba2d3e 100644 --- a/libraries/wallet/include/graphene/wallet/api_documentation.hpp +++ b/libraries/wallet/include/graphene/wallet/api_documentation.hpp @@ -42,10 +42,10 @@ namespace graphene { namespace wallet { class api_documentation { - typedef boost::multi_index::multi_index_container > > > method_description_set; + boost::multi_index::member > > >; method_description_set method_descriptions; public: api_documentation(); diff --git a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp index 7d5b33dff9..00dba9e19a 100644 --- a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp @@ -34,7 +34,7 @@ using std::vector; namespace graphene { namespace wallet { -typedef uint16_t transaction_handle_type; +using transaction_handle_type = uint16_t; struct plain_keys { From 8fbc1cd5f51648108bda7eec78164573ccd1d3ff Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 26 Oct 2022 19:18:43 +0000 Subject: [PATCH 308/338] Simplify wallet_api::borrow_asset() implementation --- libraries/wallet/wallet.cpp | 2 +- libraries/wallet/wallet_api_impl.hpp | 3 --- libraries/wallet/wallet_transfer.cpp | 7 ------- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 940e04d328..bbaeac3659 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -1313,7 +1313,7 @@ signed_transaction wallet_api::borrow_asset(string seller_name, string amount_to string asset_symbol, string amount_of_collateral, bool broadcast)const { FC_ASSERT(!is_locked()); - return my->borrow_asset(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, broadcast); + return my->borrow_asset_ext(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, {}, broadcast); } signed_transaction wallet_api::borrow_asset_ext( string seller_name, string amount_to_sell, diff --git a/libraries/wallet/wallet_api_impl.hpp b/libraries/wallet/wallet_api_impl.hpp index e9e5810361..3a498dfe46 100644 --- a/libraries/wallet/wallet_api_impl.hpp +++ b/libraries/wallet/wallet_api_impl.hpp @@ -355,9 +355,6 @@ class wallet_api_impl string min_to_receive, string symbol_to_receive, uint32_t timeout_sec = 0, bool fill_or_kill = false, bool broadcast = false); - signed_transaction borrow_asset(string seller_name, string amount_to_borrow, string asset_symbol, - string amount_of_collateral, bool broadcast = false); - signed_transaction borrow_asset_ext( string seller_name, string amount_to_borrow, string asset_symbol, string amount_of_collateral, call_order_update_operation::extensions_type extensions, bool broadcast = false); diff --git a/libraries/wallet/wallet_transfer.cpp b/libraries/wallet/wallet_transfer.cpp index a18890624c..3707b11707 100644 --- a/libraries/wallet/wallet_transfer.cpp +++ b/libraries/wallet/wallet_transfer.cpp @@ -205,13 +205,6 @@ namespace graphene { namespace wallet { namespace detail { return sign_transaction( tx, broadcast ); } - signed_transaction wallet_api_impl::borrow_asset(string seller_name, string amount_to_borrow, - string asset_symbol, string amount_of_collateral, bool broadcast ) - { - return borrow_asset_ext( seller_name, amount_to_borrow, asset_symbol, amount_of_collateral, - {}, broadcast ); - } - signed_transaction wallet_api_impl::borrow_asset_ext( string seller_name, string amount_to_borrow, string asset_symbol, string amount_of_collateral, call_order_update_operation::extensions_type extensions, bool broadcast ) From 2d2bf9cee662fa3d91a9be789e297e2519c7fe58 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 26 Oct 2022 21:32:10 +0000 Subject: [PATCH 309/338] Fix code smells --- libraries/app/database_api.cpp | 36 +- libraries/app/database_api_impl.hxx | 9 +- .../app/include/graphene/app/database_api.hpp | 13 +- .../wallet/include/graphene/wallet/wallet.hpp | 548 +++++++++--------- .../graphene/wallet/wallet_structs.hpp | 10 +- libraries/wallet/wallet.cpp | 513 ++++++++-------- libraries/wallet/wallet_api_impl.hpp | 3 +- libraries/wallet/wallet_results.cpp | 25 +- tests/tests/api_limit_tests.cpp | 6 +- 9 files changed, 592 insertions(+), 571 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index a0dddc52b0..dfb1bac079 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -513,19 +513,14 @@ vector> database_api_impl::get_accounts( const vector database_api::get_full_accounts( const vector& names_or_ids, - optional subscribe ) +std::map> database_api::get_full_accounts( const vector& names_or_ids, + const optional& subscribe ) { return my->get_full_accounts( names_or_ids, subscribe ); } -vector database_api::get_top_voters(uint32_t limit)const -{ - return my->get_top_voters( limit ); -} - -std::map database_api_impl::get_full_accounts( const vector& names_or_ids, - optional subscribe ) +std::map> database_api_impl::get_full_accounts( + const vector& names_or_ids, const optional& subscribe ) { FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_full_accounts; @@ -535,7 +530,7 @@ std::map database_api_impl::get_full_accounts( const bool to_subscribe = get_whether_to_subscribe( subscribe ); - std::map results; + std::map> results; for (const std::string& account_name_or_id : names_or_ids) { @@ -703,6 +698,11 @@ std::map database_api_impl::get_full_accounts( const return results; } +vector database_api::get_top_voters(uint32_t limit)const +{ + return my->get_top_voters( limit ); +} + vector database_api_impl::get_top_voters(uint32_t limit)const { FC_ASSERT( _app_options, "Internal error" ); @@ -2203,14 +2203,14 @@ fc::optional database_api_impl::get_witness_by_account(const std return {}; } -map database_api::lookup_witness_accounts( const string& lower_bound_name, - uint32_t limit )const +map> database_api::lookup_witness_accounts( const string& lower_bound_name, + uint32_t limit )const { return my->lookup_witness_accounts( lower_bound_name, limit ); } -map database_api_impl::lookup_witness_accounts( const string& lower_bound_name, - uint32_t limit )const +map> database_api_impl::lookup_witness_accounts( const string& lower_bound_name, + uint32_t limit )const { FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_lookup_witness_accounts; @@ -2226,7 +2226,7 @@ map database_api_impl::lookup_witness_accounts( const s // records to return. This could be optimized, but we expect the // number of witnesses to be few and the frequency of calls to be rare // TODO optimize - std::map witnesses_by_account_name; + std::map> witnesses_by_account_name; for (const witness_object& witness : witnesses_by_id) if (auto account_iter = _db.find(witness.witness_account)) if (account_iter->name >= lower_bound_name) // we can ignore anything below lower_bound_name @@ -2294,13 +2294,13 @@ fc::optional database_api_impl::get_committee_member_by return {}; } -map database_api::lookup_committee_member_accounts( +map> database_api::lookup_committee_member_accounts( const string& lower_bound_name, uint32_t limit )const { return my->lookup_committee_member_accounts( lower_bound_name, limit ); } -map database_api_impl::lookup_committee_member_accounts( +map> database_api_impl::lookup_committee_member_accounts( const string& lower_bound_name, uint32_t limit )const { FC_ASSERT( _app_options, "Internal error" ); @@ -2317,7 +2317,7 @@ map database_api_impl::lookup_committee_member // records to return. This could be optimized, but we expect the // number of committee_members to be few and the frequency of calls to be rare // TODO optimize - std::map committee_members_by_account_name; + std::map> committee_members_by_account_name; for (const committee_member_object& committee_member : committee_members_by_id) if (auto account_iter = _db.find(committee_member.committee_member_account)) if (account_iter->name >= lower_bound_name) // we can ignore anything below lower_bound_name diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 240793f53b..37c358a2cc 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -71,8 +71,8 @@ class database_api_impl : public std::enable_shared_from_this // Accounts vector> get_accounts( const vector& account_names_or_ids, optional subscribe )const; - std::map get_full_accounts( const vector& names_or_ids, - optional subscribe ); + map> get_full_accounts( const vector& names_or_ids, + const optional& subscribe ); vector get_top_voters(uint32_t limit)const; optional get_account_by_name( string name )const; vector get_account_references( const std::string account_id_or_name )const; @@ -160,7 +160,8 @@ class database_api_impl : public std::enable_shared_from_this // Witnesses vector> get_witnesses(const vector& witness_ids)const; fc::optional get_witness_by_account(const std::string& account_id_or_name)const; - map lookup_witness_accounts(const string& lower_bound_name, uint32_t limit)const; + map> lookup_witness_accounts( + const string& lower_bound_name, uint32_t limit )const; uint64_t get_witness_count()const; // Committee members @@ -168,7 +169,7 @@ class database_api_impl : public std::enable_shared_from_this const vector& committee_member_ids )const; fc::optional get_committee_member_by_account( const std::string& account_id_or_name )const; - map lookup_committee_member_accounts( + map> lookup_committee_member_accounts( const string& lower_bound_name, uint32_t limit )const; uint64_t get_committee_count()const; diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 73e858878b..b101dc84f0 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -306,8 +306,9 @@ class database_api * @a api_limit_get_full_accounts_lists option. Exceeded objects need to be queried with other APIs. * */ - std::map get_full_accounts( const vector& names_or_ids, - optional subscribe = optional() ); + map> get_full_accounts( + const vector& names_or_ids, + const optional& subscribe = optional() ); /** * @brief Returns vector of voting power sorted by reverse vp_active @@ -1141,7 +1142,8 @@ class database_api * @a api_limit_lookup_witness_accounts * @return Map of witness names to corresponding IDs */ - map lookup_witness_accounts(const string& lower_bound_name, uint32_t limit)const; + map> lookup_witness_accounts( const string& lower_bound_name, + uint32_t limit )const; /** * @brief Get the total number of witnesses registered with the blockchain @@ -1176,8 +1178,9 @@ class database_api * @a api_limit_lookup_committee_member_accounts * @return Map of committee_member names to corresponding IDs */ - map lookup_committee_member_accounts( const string& lower_bound_name, - uint32_t limit )const; + map> lookup_committee_member_accounts( + const string& lower_bound_name, + uint32_t limit )const; /** * @brief Get the total number of committee registered with the blockchain diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index c7756e9c52..eaf740e9a7 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -55,12 +55,12 @@ class wallet_api std::shared_ptr my; // Methods - wallet_api( const wallet_data& initial_data, fc::api rapi ); + wallet_api( const wallet_data& initial_data, const fc::api& rapi ); virtual ~wallet_api(); - bool copy_wallet_file( string destination_filename )const; + bool copy_wallet_file( const string& destination_filename )const; - fc::ecc::private_key derive_private_key(const std::string& prefix_string, int sequence_number) const; + fc::ecc::private_key derive_private_key( const string& prefix_string, uint32_t sequence_number ) const; /** Returns info about head block, chain_id, maintenance, participation, current active witnesses and * committee members. @@ -98,7 +98,7 @@ class wallet_api * @param limit the maximum number of accounts to return (max: 1000) * @returns a list of accounts mapping account names to account ids */ - map> list_accounts(const string& lowerbound, uint32_t limit)const; + map> list_accounts( const string& lowerbound, uint32_t limit )const; /** List the balances of an account. * Each account can have multiple balances, one for each type of asset owned by that * account. The returned list will only contain assets for which the account has a @@ -106,7 +106,7 @@ class wallet_api * @param account_name_or_id the name or id of the account whose balances you want * @returns a list of the given account's balances */ - vector list_account_balances(const string& account_name_or_id)const; + vector list_account_balances( const string& account_name_or_id )const; /** Lists all assets registered on the blockchain. * * To list all assets, pass the empty string \c "" for the lowerbound to start @@ -116,7 +116,7 @@ class wallet_api * @param limit the maximum number of assets to return (max: 100) * @returns the list of asset objects, ordered by symbol */ - vector list_assets(const string& lowerbound, uint32_t limit)const; + vector list_assets( const string& lowerbound, uint32_t limit )const; /** Returns assets count registered on the blockchain. * * @returns assets count @@ -131,7 +131,7 @@ class wallet_api * @param limit the number of entries to return (starting from the most recent) * @returns a list of \c operation_history_objects */ - vector get_account_history(const string& account_name_or_id, uint32_t limit)const; + vector get_account_history( const string& account_name_or_id, uint32_t limit )const; /** Returns the relative operations on the named account from start number. * @@ -164,8 +164,8 @@ class wallet_api * @param end the end of the time range * @return A list of OHLCV data, in "least recent first" order. */ - vector get_market_history( string symbol, string symbol2, uint32_t bucket, - fc::time_point_sec start, fc::time_point_sec end )const; + vector get_market_history( const string& symbol, const string& symbol2, uint32_t bucket, + const time_point_sec& start, const time_point_sec& end )const; /** * @brief Fetch all orders relevant to the specified account sorted descendingly by price @@ -186,11 +186,11 @@ class wallet_api * otherwise the \c ostart_price will be used */ vector get_account_limit_orders( const string& name_or_id, - const string &base, - const string "e, + const string& base, + const string& quote, uint32_t limit = 101, - optional ostart_id = optional(), - optional ostart_price = optional())const; + const optional& ostart_id = {}, + const optional& ostart_price = optional() )const; /** * @brief Get limit orders in a given market @@ -199,7 +199,7 @@ class wallet_api * @param limit Maximum number of orders to retrieve * @return The limit orders, ordered from least price to greatest */ - vector get_limit_orders(string a, string b, uint32_t limit)const; + vector get_limit_orders( const string& a, const string& b, uint32_t limit )const; /** * @brief Get call orders (aka margin positions) for a given asset @@ -207,7 +207,7 @@ class wallet_api * @param limit Maximum number of orders to retrieve * @return The call orders, ordered from earliest to be called to latest */ - vector get_call_orders(string asset_symbol_or_id, uint32_t limit)const; + vector get_call_orders( const string& asset_symbol_or_id, uint32_t limit )const; /** * @brief Get forced settlement orders in a given asset @@ -215,7 +215,7 @@ class wallet_api * @param limit Maximum number of orders to retrieve * @return The settle orders, ordered from earliest settlement date to latest */ - vector get_settle_orders(string a, uint32_t limit)const; + vector get_settle_orders( const string& a, uint32_t limit )const; /** Returns the collateral_bid object for the given MPA * @@ -224,8 +224,8 @@ class wallet_api * @param start the sequence number where to start looping back throw the history * @returns a list of \c collateral_bid_objects */ - vector get_collateral_bids(string asset_symbol_or_id, uint32_t limit = 100, - uint32_t start = 0)const; + vector get_collateral_bids( const string& asset_symbol_or_id, uint32_t limit = 100, + uint32_t start = 0 )const; /** Returns the block chain's slowly-changing settings. * This object contains all of the properties of the blockchain that are fixed @@ -248,7 +248,7 @@ class wallet_api */ account_history_operation_detail get_account_history_by_operations( const string& account_name_or_id, const flat_set& operation_types, - uint32_t start, uint32_t limit)const; + uint32_t start, uint32_t limit )const; /** Returns the block chain's rapidly-changing properties. * The returned object contains information that changes every block interval @@ -263,13 +263,13 @@ class wallet_api * @param account_name_or_id the name or ID of the account to provide information about * @returns the public account data stored in the blockchain */ - account_object get_account(string account_name_or_id) const; + account_object get_account( const string& account_name_or_id ) const; /** Returns information about the given asset. * @param asset_symbol_or_id the symbol or id of the asset in question * @returns the information about the asset stored in the block chain */ - extended_asset_object get_asset(string asset_symbol_or_id) const; + extended_asset_object get_asset( const string& asset_symbol_or_id ) const; /** Returns the BitAsset-specific data for a given asset. * Market-issued assets's behavior are determined both by their "BitAsset Data" and @@ -277,26 +277,26 @@ class wallet_api * @param asset_symbol_or_id the symbol or id of the BitAsset in question * @returns the BitAsset-specific data for this asset */ - asset_bitasset_data_object get_bitasset_data(string asset_symbol_or_id)const; + asset_bitasset_data_object get_bitasset_data( const string& asset_symbol_or_id )const; /** * Returns information about the given HTLC object. * @param htlc_id the id of the HTLC object. * @returns the information about the HTLC object */ - fc::optional get_htlc(const htlc_id_type& htlc_id) const; + optional get_htlc( const htlc_id_type& htlc_id ) const; /** Lookup the id of a named account. * @param account_name_or_id the name or ID of the account to look up * @returns the id of the named account */ - account_id_type get_account_id(string account_name_or_id) const; + account_id_type get_account_id( const string& account_name_or_id ) const; /** Lookup the name of an account. * @param account_name_or_id the name or ID of the account to look up * @returns the name of the account */ - string get_account_name(const string& account_name_or_id) const + string get_account_name( const string& account_name_or_id ) const { return get_account( account_name_or_id ).name; } /** @@ -304,14 +304,14 @@ class wallet_api * @param asset_symbol_or_id the symbol or ID of an asset to look up * @returns the id of the given asset */ - asset_id_type get_asset_id(const string& asset_symbol_or_id) const; + asset_id_type get_asset_id( const string& asset_symbol_or_id ) const; /** * Lookup the symbol of an asset. * @param asset_symbol_or_id the symbol or ID of an asset to look up * @returns the symbol of the given asset */ - string get_asset_symbol(const string& asset_symbol_or_id) const + string get_asset_symbol( const string& asset_symbol_or_id ) const { return get_asset( asset_symbol_or_id ).symbol; } /** @@ -319,7 +319,7 @@ class wallet_api * @param asset_symbol_or_id the symbol or ID of an asset to look up * @returns the symbol of the given asset */ - string get_asset_name(const string& asset_symbol_or_id) const + string get_asset_name( const string& asset_symbol_or_id ) const { return get_asset_symbol( asset_symbol_or_id ); } /** @@ -333,7 +333,7 @@ class wallet_api * @param id the id of the object to return * @returns the requested object */ - variant get_object(object_id_type id) const; + variant get_object( const object_id_type& id ) const; /** Returns the current wallet filename. * @@ -350,7 +350,7 @@ class wallet_api * @param pubkey a public key in Base58 format * @return the WIF private key */ - string get_private_key( public_key_type pubkey )const; + string get_private_key( const public_key_type& pubkey )const; /** * @ingroup Transaction Builder API @@ -366,7 +366,8 @@ class wallet_api * @param transaction_handle handle of the transaction builder * @param op the operation in JSON format */ - void add_operation_to_builder_transaction(transaction_handle_type transaction_handle, const operation& op)const; + void add_operation_to_builder_transaction( transaction_handle_type transaction_handle, + const operation& op )const; /** * @ingroup Transaction Builder API * @@ -375,9 +376,9 @@ class wallet_api * @param operation_index the index of the old operation in the builder to be replaced * @param new_op the new operation in JSON format */ - void replace_operation_in_builder_transaction(transaction_handle_type handle, - unsigned operation_index, - const operation& new_op)const; + void replace_operation_in_builder_transaction( transaction_handle_type handle, + uint32_t operation_index, + const operation& new_op )const; /** * @ingroup Transaction Builder API * @@ -386,7 +387,8 @@ class wallet_api * @param fee_asset symbol or ID of an asset that to be used to pay fees * @return total fees */ - asset set_fees_on_builder_transaction(transaction_handle_type handle, string fee_asset = GRAPHENE_SYMBOL)const; + asset set_fees_on_builder_transaction( transaction_handle_type handle, + const string& fee_asset = GRAPHENE_SYMBOL )const; /** * @ingroup Transaction Builder API * @@ -394,7 +396,7 @@ class wallet_api * @param handle handle of the transaction builder * @return a transaction */ - transaction preview_builder_transaction(transaction_handle_type handle)const; + transaction preview_builder_transaction( transaction_handle_type handle )const; /** * @ingroup Transaction Builder API * @@ -403,8 +405,8 @@ class wallet_api * @param broadcast whether to broadcast the signed transaction to the network * @return a signed transaction */ - signed_transaction sign_builder_transaction(transaction_handle_type transaction_handle, - bool broadcast = true)const; + signed_transaction sign_builder_transaction( transaction_handle_type transaction_handle, + bool broadcast = true )const; /** * @ingroup Transaction Builder API @@ -415,15 +417,15 @@ class wallet_api * @param broadcast whether to broadcast the signed transaction to the network * @return a signed transaction */ - signed_transaction sign_builder_transaction2(transaction_handle_type transaction_handle, + signed_transaction sign_builder_transaction2( transaction_handle_type transaction_handle, const vector& signing_keys = vector(), - bool broadcast = true)const; + bool broadcast = true )const; /** Broadcast signed transaction * @param tx signed transaction * @returns the transaction ID along with the signed transaction. */ - pair broadcast_transaction(signed_transaction tx)const; + pair broadcast_transaction( const signed_transaction& tx )const; /** * @ingroup Transaction Builder API @@ -443,7 +445,7 @@ class wallet_api */ signed_transaction propose_builder_transaction( transaction_handle_type handle, - time_point_sec expiration = time_point::now() + fc::minutes(1), + const time_point_sec& expiration = time_point::now() + fc::minutes(1), uint32_t review_period_seconds = 0, bool broadcast = true )const; @@ -464,8 +466,8 @@ class wallet_api */ signed_transaction propose_builder_transaction2( transaction_handle_type handle, - string account_name_or_id, - time_point_sec expiration = time_point::now() + fc::minutes(1), + const string& account_name_or_id, + const time_point_sec& expiration = time_point::now() + fc::minutes(1), uint32_t review_period_seconds = 0, bool broadcast = true )const; @@ -510,7 +512,7 @@ class wallet_api * @param password the password previously set with \c set_password() * @ingroup Wallet Management */ - void unlock(string password)const; + void unlock( const string& password )const; /** Sets a new password on the wallet. * @@ -523,7 +525,7 @@ class wallet_api * @param password a new password * @ingroup Wallet Management */ - void set_password(string password)const; + void set_password( const string& password )const; /** Dumps all private keys owned by the wallet. * @@ -546,7 +548,7 @@ class wallet_api * @param method the name of the API command you want help with * @returns a multi-line string suitable for displaying on a terminal */ - string gethelp(const string& method)const; + string gethelp( const string& method )const; /** Loads a specified BitShares wallet. * @@ -561,7 +563,7 @@ class wallet_api * existing wallet file * @returns true if the specified wallet is loaded */ - bool load_wallet_file(string wallet_filename = "")const; + bool load_wallet_file( const string& wallet_filename = "" )const; /** Quit from the wallet. * @@ -579,7 +581,7 @@ class wallet_api * or overwrite. If \c wallet_filename is empty, * save to the current filename. */ - void save_wallet_file(string wallet_filename = "")const; + void save_wallet_file( const string& wallet_filename = "" )const; /** Sets the wallet filename used for future writes. * @@ -588,7 +590,7 @@ class wallet_api * * @param wallet_filename the new filename to use for future saves */ - void set_wallet_filename(string wallet_filename)const; + void set_wallet_filename( const string& wallet_filename )const; /** Suggests a safe brain key to use for creating your account. * \c create_account_with_brain_key() requires you to specify a 'brain key', @@ -611,7 +613,8 @@ class wallet_api * @param number_of_desired_keys Number of desired keys * @return A list of keys that are deterministically derived from the brainkey */ - vector derive_owner_keys_from_brain_key(string brain_key, int number_of_desired_keys = 1) const; + vector derive_owner_keys_from_brain_key( const string& brain_key, + uint32_t number_of_desired_keys = 1 ) const; /** * Determine whether a textual representation of a public key @@ -620,16 +623,14 @@ class wallet_api * @param public_key Public key * @return Whether a public key is known */ - bool is_public_key_registered(string public_key) const; + bool is_public_key_registered( const string& public_key ) const; /** Converts a signed_transaction in JSON form to its binary representation. * * @param tx the transaction to serialize - * @returns the binary form of the transaction. It will not be hex encoded, - * this returns a raw string that may have null characters embedded - * in it + * @returns the binary form of the transaction, hex encoded. */ - string serialize_transaction(signed_transaction tx) const; + string serialize_transaction( const signed_transaction& tx ) const; /** Imports the private key for an existing account. * @@ -642,7 +643,7 @@ class wallet_api * @param wif_key the private key in WIF format * @returns true if the key was imported */ - bool import_key(string account_name_or_id, string wif_key)const; + bool import_key( const string& account_name_or_id, const string& wif_key )const; /** Imports accounts from a BitShares 0.x wallet file. * Current wallet file must be unlocked to perform the import. @@ -651,7 +652,7 @@ class wallet_api * @param password the password to encrypt the BitShares 0.x wallet file * @returns a map containing the accounts found and whether imported */ - map import_accounts( string filename, string password )const; + map> import_accounts( const string& filename, const string& password )const; /** Imports from a BitShares 0.x wallet file, find keys that were bound to a given account name on the * BitShares 0.x chain, rebind them to an account name on the 2.0 chain. @@ -664,8 +665,8 @@ class wallet_api * can be same or different to \c src_account_name * @returns whether the import has succeeded */ - bool import_account_keys( string filename, string password, - string src_account_name, string dest_account_name )const; + bool import_account_keys( const string& filename, const string& password, + const string& src_account_name, const string& dest_account_name )const; /** * This call will construct transaction(s) that will claim all balances controled @@ -675,7 +676,7 @@ class wallet_api * @param wif_keys private WIF keys of balance objects to claim balances from * @param broadcast true to broadcast the transaction on the network */ - vector< signed_transaction > import_balance( string account_name_or_id, const vector& wif_keys, + vector< signed_transaction > import_balance( const string& account_name_or_id, const vector& wif_keys, bool broadcast )const; /** Transforms a brain key to reduce the chance of errors when re-entering the key from memory. @@ -686,7 +687,7 @@ class wallet_api * @param s the brain key as supplied by the user * @returns the brain key in its normalized form */ - string normalize_brain_key(string s) const; + string normalize_brain_key( const string& s ) const; /** Registers a third party's account on the blockckain. * @@ -713,13 +714,13 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction registering the account */ - signed_transaction register_account(string name, - public_key_type owner, - public_key_type active, - string registrar_account, - string referrer_account, - uint32_t referrer_percent, - bool broadcast = false)const; + signed_transaction register_account( const string& name, + const public_key_type& owner, + const public_key_type& active, + const string& registrar_account, + const string& referrer_account, + uint32_t referrer_percent, + bool broadcast = false )const; /** * Upgrades an account to prime status. @@ -729,7 +730,7 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction upgrading the account */ - signed_transaction upgrade_account(string account_name_or_id, bool broadcast)const; + signed_transaction upgrade_account( const string& account_name_or_id, bool broadcast )const; /** Creates a new account and registers it on the blockchain. * @@ -751,11 +752,11 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction registering the account */ - signed_transaction create_account_with_brain_key(string brain_key, - string account_name, - string registrar_account, - string referrer_account, - bool broadcast = false)const; + signed_transaction create_account_with_brain_key( const string& brain_key, + const string& account_name, + const string& registrar_account, + const string& referrer_account, + bool broadcast = false )const; /** Transfer an amount from one account to another. * @param from the name or id of the account sending the funds @@ -769,12 +770,12 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction transferring funds */ - signed_transaction transfer(string from, - string to, - string amount, - string asset_symbol_or_id, - string memo, - bool broadcast = false)const; + signed_transaction transfer( const string& from, + const string& to, + const string& amount, + const string& asset_symbol_or_id, + const string& memo, + bool broadcast = false )const; /** * This method works just like transfer, except it always broadcasts and @@ -789,11 +790,11 @@ class wallet_api * increase with transaction size * @returns the transaction ID (hash) along with the signed transaction transferring funds */ - pair transfer2(const string& from, - const string& to, - const string& amount, - const string& asset_symbol_or_id, - const string& memo ) const { + pair transfer2( const string& from, + const string& to, + const string& amount, + const string& asset_symbol_or_id, + const string& memo ) const { auto trx = transfer( from, to, amount, asset_symbol_or_id, memo, true ); return std::make_pair(trx.id(),trx); } @@ -814,14 +815,14 @@ class wallet_api * @param memo text to sign * @return the signed memo data */ - memo_data sign_memo(string from, string to, string memo)const; + memo_data sign_memo( const string& from, const string& to, const string& memo )const; /** Read a memo. * * @param memo JSON-encoded memo. * @returns string with decrypted message. */ - string read_memo(const memo_data& memo)const; + string read_memo( const memo_data& memo )const; /** Sign a message using an account's memo key. The signature is generated as in @@ -831,7 +832,7 @@ class wallet_api * @param message text to sign * @return the signed message in an abstract format */ - signed_message sign_message(string signer, string message)const; + signed_message sign_message( const string& signer, const string& message )const; /** Verify a message signed with sign_message using the given account's memo key. * @@ -850,14 +851,14 @@ class wallet_api * @param message the signed_message structure containing message, meta data and signature * @return true if signature matches */ - bool verify_signed_message( signed_message message )const; + bool verify_signed_message( const signed_message& message )const; /** Verify a message signed with sign_message, in its encapsulated form. * * @param message the complete encapsulated message string including separators and line feeds * @return true if signature matches */ - bool verify_encapsulated_message( string message )const; + bool verify_encapsulated_message( const string& message )const; /** These methods are used for stealth transfers */ ///@{ @@ -869,14 +870,14 @@ class wallet_api * @param label a user-defined string as label * @return true if the label was set, otherwise false */ - bool set_key_label( public_key_type key, string label )const; + bool set_key_label( const public_key_type& key, const string& label )const; /** * Get label of a public key. * @param key a public key * @return the label if already set by \c set_key_label(), or an empty string if not set */ - string get_key_label( public_key_type key )const; + string get_key_label( const public_key_type& key )const; /** * Generates a new blind account for the given brain key and assigns it the given label. @@ -884,7 +885,7 @@ class wallet_api * @param brain_key the brain key to be used to generate a new blind account * @return the public key of the new account */ - public_key_type create_blind_account( string label, string brain_key )const; + public_key_type create_blind_account( const string& label, const string& brain_key )const; /** * Return the total balances of all blinded commitments that can be claimed by the @@ -893,23 +894,23 @@ class wallet_api * @return the total balances of all blinded commitments that can be claimed by the * given account key or label */ - vector get_blind_balances( string key_or_label )const; + vector get_blind_balances( const string& key_or_label )const; /** * Get all blind accounts. * @return all blind accounts */ - map get_blind_accounts()const; + map> get_blind_accounts()const; /** * Get all blind accounts for which this wallet has the private key. * @return all blind accounts for which this wallet has the private key */ - map get_my_blind_accounts()const; + map> get_my_blind_accounts()const; /** * Get the public key associated with a given label. * @param label a label * @return the public key associated with the given label */ - public_key_type get_public_key( string label )const; + public_key_type get_public_key( const string& label )const; ///@} /** @@ -917,7 +918,7 @@ class wallet_api * @param key_or_account a public key in Base58 format or an account * @return all blind receipts to/form the account */ - vector blind_history( string key_or_account )const; + vector blind_history( const string& key_or_account )const; /** * Given a confirmation receipt, this method will parse it for a blinded balance and confirm @@ -930,7 +931,9 @@ class wallet_api * @param opt_memo a self-defined label for this transfer to be saved in local wallet file * @return a blind receipt */ - blind_receipt receive_blind_transfer( string confirmation_receipt, string opt_from, string opt_memo )const; + blind_receipt receive_blind_transfer( const string& confirmation_receipt, + const string& opt_from, + const string& opt_memo )const; /** * Transfers a public balance from \c from_account_name_or_id to one or more blinded balances using a @@ -941,9 +944,9 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @return a blind confirmation */ - blind_confirmation transfer_to_blind( string from_account_name_or_id, - string asset_symbol_or_id, - vector> to_amounts, + blind_confirmation transfer_to_blind( const string& from_account_name_or_id, + const string& asset_symbol_or_id, + const vector>& to_amounts, bool broadcast = false )const; /** @@ -955,12 +958,11 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @return a blind confirmation */ - blind_confirmation transfer_from_blind( - string from_blind_account_key_or_label, - string to_account_name_or_id, - string amount, - string asset_symbol_or_id, - bool broadcast = false )const; + blind_confirmation transfer_from_blind( const string& from_blind_account_key_or_label, + const string& to_account_name_or_id, + const string& amount, + const string& asset_symbol_or_id, + bool broadcast = false )const; /** * Transfer from one set of blinded balances to another. @@ -971,10 +973,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @return a blind confirmation */ - blind_confirmation blind_transfer( string from_key_or_label, - string to_key_or_label, - string amount, - string symbol_or_id, + blind_confirmation blind_transfer( const string& from_key_or_label, + const string& to_key_or_label, + const string& amount, + const string& symbol_or_id, bool broadcast = false )const; /** Place a limit order attempting to sell one asset for another. @@ -1017,14 +1019,14 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction selling the funds */ - signed_transaction sell_asset(string seller_account, - string amount_to_sell, - string symbol_or_id_to_sell, - string min_to_receive, - string symbol_or_id_to_receive, - uint32_t timeout_sec = 0, - bool fill_or_kill = false, - bool broadcast = false)const; + signed_transaction sell_asset( const string& seller_account, + const string& amount_to_sell, + const string& symbol_or_id_to_sell, + const string& min_to_receive, + const string& symbol_or_id_to_receive, + uint32_t timeout_sec = 0, + bool fill_or_kill = false, + bool broadcast = false )const; /** Borrow an asset or update the debt/collateral ratio for the loan. * @@ -1040,8 +1042,9 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction borrowing the asset */ - signed_transaction borrow_asset(string borrower, string amount_to_borrow, string asset_symbol_or_id, - string amount_of_collateral, bool broadcast = false)const; + signed_transaction borrow_asset( const string& borrower, const string& amount_to_borrow, + const string& asset_symbol_or_id, + const string& amount_of_collateral, bool broadcast = false )const; /** Borrow an asset or update the debt/collateral ratio for the loan, with additional options. * @@ -1058,9 +1061,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction borrowing the asset */ - signed_transaction borrow_asset_ext( string borrower, string amount_to_borrow, string asset_symbol_or_id, - string amount_of_collateral, - call_order_update_operation::extensions_type extensions, + signed_transaction borrow_asset_ext( const string& borrower, const string& amount_to_borrow, + const string& asset_symbol_or_id, + const string& amount_of_collateral, + const call_order_update_operation::extensions_type& extensions, bool broadcast = false )const; /** Cancel an existing order @@ -1069,7 +1073,7 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction canceling the order */ - signed_transaction cancel_order(const limit_order_id_type& order_id, bool broadcast = false) const; + signed_transaction cancel_order( const limit_order_id_type& order_id, bool broadcast = false ) const; /** Creates a new user-issued or market-issued asset. * @@ -1093,12 +1097,12 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction creating a new asset */ - signed_transaction create_asset(string issuer, - string symbol, - uint8_t precision, - asset_options common, - fc::optional bitasset_opts, - bool broadcast = false)const; + signed_transaction create_asset( const string& issuer, + const string& symbol, + uint8_t precision, + const asset_options& common, + const optional& bitasset_opts, + bool broadcast = false )const; /** Create the specified amount of the specified asset and credit into the specified account. * @@ -1109,10 +1113,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction issuing the new supply */ - signed_transaction issue_asset(string to_account, string amount, - string symbol_or_id, - string memo, - bool broadcast = false)const; + signed_transaction issue_asset( const string& to_account, const string& amount, + const string& symbol_or_id, + const string& memo, + bool broadcast = false )const; /** Update the core options on an asset. * There are a number of options which all assets in the network use. These options are @@ -1130,10 +1134,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction updating the asset */ - signed_transaction update_asset(string symbol_or_id, - optional new_issuer, - asset_options new_options, - bool broadcast = false)const; + signed_transaction update_asset( const string& symbol_or_id, + const optional& new_issuer, + const asset_options& new_options, + bool broadcast = false )const; /** Update the issuer of an asset * Since this call requires the owner authority of the current issuer to sign the transaction, @@ -1146,14 +1150,14 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction updating the asset */ - signed_transaction update_asset_issuer(string symbol_or_id, - string new_issuer, - bool broadcast = false)const; + signed_transaction update_asset_issuer( const string& symbol_or_id, + const string& new_issuer, + bool broadcast = false )const; /** Update the options specific to a BitAsset. * - * BitAssets have some options which are not relevant to other asset types. This operation is used to update those - * options an an existing BitAsset. + * BitAssets have some options which are not relevant to other asset types. + * This operation is used to update those options an an existing BitAsset. * * @see update_asset() * @@ -1163,13 +1167,14 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction updating the bitasset */ - signed_transaction update_bitasset(string symbol_or_id, - bitasset_options new_options, - bool broadcast = false)const; + signed_transaction update_bitasset( const string& symbol_or_id, + const bitasset_options& new_options, + bool broadcast = false )const; /** Update the set of feed-producing accounts for a BitAsset. * - * BitAssets have price feeds selected by taking the median values of recommendations from a set of feed producers. + * BitAssets have price feeds selected by taking the median values of recommendations from + * a set of feed producers. * This command is used to specify which accounts may produce feeds for a given BitAsset. * @param symbol_or_id the symbol or id of the asset to update * @param new_feed_producers a list of account names or ids which are authorized to produce feeds for the asset. @@ -1177,23 +1182,25 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction updating the bitasset's feed producers */ - signed_transaction update_asset_feed_producers(string symbol_or_id, - flat_set new_feed_producers, - bool broadcast = false)const; + signed_transaction update_asset_feed_producers( const string& symbol_or_id, + const flat_set& new_feed_producers, + bool broadcast = false )const; /** Publishes a price feed for the named asset. * * Price feed providers use this command to publish their price feeds for market-issued assets. A price feed is - * used to tune the market for a particular market-issued asset. For each value in the feed, the median across all - * committee_member feeds for that asset is calculated and the market for the asset is configured with the median of that - * value. - * - * The feed object in this command contains three prices: a call price limit, a short price limit, and a settlement price. - * The call limit price is structured as (collateral asset) / (debt asset) and the short limit price is structured - * as (asset for sale) / (collateral asset). Note that the asset IDs are opposite to eachother, so if we're - * publishing a feed for USD, the call limit price will be CORE/USD and the short limit price will be USD/CORE. The - * settlement price may be flipped either direction, as long as it is a ratio between the market-issued asset and - * its collateral. + * used to tune the market for a particular market-issued asset. For each value in the feed, the median across + * all committee_member feeds for that asset is calculated and the market for the asset is configured with the + * median of that value. + * + * The feed object in this command contains three prices: a call price limit, a short price limit, + * and a settlement price. + * The call limit price is structured as (collateral asset) / (debt asset) and the short limit price is + * structured as (asset for sale) / (collateral asset). + * Note that the asset IDs are opposite to eachother, so if we're + * publishing a feed for USD, the call limit price will be CORE/USD and the short limit price will be USD/CORE. + * The settlement price may be flipped either direction, as long as it is a ratio between the market-issued + * asset and its collateral. * * @param publishing_account the account publishing the price feed * @param symbol_or_id the symbol or id of the asset whose feed we're publishing @@ -1201,10 +1208,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction updating the price feed for the given asset */ - signed_transaction publish_asset_feed(string publishing_account, - string symbol_or_id, - price_feed feed, - bool broadcast = false)const; + signed_transaction publish_asset_feed( const string& publishing_account, + const string& symbol_or_id, + const price_feed& feed, + bool broadcast = false )const; /** Pay into the fee pool for the given asset. * @@ -1220,10 +1227,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction funding the fee pool */ - signed_transaction fund_asset_fee_pool(string from, - string symbol_or_id, - string amount, - bool broadcast = false)const; + signed_transaction fund_asset_fee_pool( const string& from, + const string& symbol_or_id, + const string& amount, + bool broadcast = false )const; /** Claim funds from the fee pool for the given asset. * @@ -1238,9 +1245,9 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction claiming from the fee pool */ - signed_transaction claim_asset_fee_pool(string symbol_or_id, - string amount, - bool broadcast = false)const; + signed_transaction claim_asset_fee_pool( const string& symbol_or_id, + const string& amount, + bool broadcast = false )const; /** Burns an amount of given asset to its reserve pool. * @@ -1252,10 +1259,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction burning the asset */ - signed_transaction reserve_asset(string from, - string amount, - string symbol_or_id, - bool broadcast = false)const; + signed_transaction reserve_asset( const string& from, + const string& amount, + const string& symbol_or_id, + bool broadcast = false )const; /** Forces a global settling of the given asset (black swan or prediction markets). * @@ -1274,9 +1281,9 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction settling the named asset */ - signed_transaction global_settle_asset(string symbol_or_id, - price settle_price, - bool broadcast = false)const; + signed_transaction global_settle_asset( const string& symbol_or_id, + const price& settle_price, + bool broadcast = false )const; /** Schedules a market-issued asset for automatic settlement. * @@ -1296,10 +1303,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction settling the named asset */ - signed_transaction settle_asset(string account_to_settle, - string amount_to_settle, - string symbol_or_id, - bool broadcast = false)const; + signed_transaction settle_asset( const string& account_to_settle, + const string& amount_to_settle, + const string& symbol_or_id, + bool broadcast = false )const; /** Creates or updates a bid on an MPA after global settlement. * @@ -1317,8 +1324,9 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction creating/updating the bid */ - signed_transaction bid_collateral(string bidder, string debt_amount, string debt_symbol_or_id, - string additional_collateral, bool broadcast = false)const; + signed_transaction bid_collateral( const string& bidder, const string& debt_amount, + const string& debt_symbol_or_id, + const string& additional_collateral, bool broadcast = false )const; /** Whitelist and blacklist accounts, primarily for transacting in whitelisted assets. * @@ -1340,10 +1348,10 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction changing the whitelisting status */ - signed_transaction whitelist_account(string authorizing_account, - string account_to_list, - account_whitelist_operation::account_listing new_listing_status, - bool broadcast = false)const; + signed_transaction whitelist_account( const string& authorizing_account, + const string& account_to_list, + account_whitelist_operation::account_listing new_listing_status, + bool broadcast = false )const; /** Creates a committee_member object owned by the given account. * @@ -1355,9 +1363,9 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction registering a committee_member */ - signed_transaction create_committee_member(string owner_account, - string url, - bool broadcast = false)const; + signed_transaction create_committee_member( const string& owner_account, + const string& url, + bool broadcast = false )const; /** Lists all witnesses registered in the blockchain. * This returns a list of all account names that own witnesses, and the associated witness id, @@ -1372,7 +1380,7 @@ class wallet_api * @param limit the maximum number of witnesss to return (max: 1000) * @returns a list of witnesss mapping witness names to witness ids */ - map list_witnesses(const string& lowerbound, uint32_t limit)const; + map> list_witnesses( const string& lowerbound, uint32_t limit )const; /** Lists all committee_members registered in the blockchain. * This returns a list of all account names that own committee_members, and the associated committee_member id, @@ -1387,19 +1395,20 @@ class wallet_api * @param limit the maximum number of committee_members to return (max: 1000) * @returns a list of committee_members mapping committee_member names to committee_member ids */ - map list_committee_members(const string& lowerbound, uint32_t limit)const; + map> list_committee_members( + const string& lowerbound, uint32_t limit )const; /** Returns information about the given witness. * @param owner_account the name or id of the witness account owner, or the id of the witness * @returns the information about the witness stored in the block chain */ - witness_object get_witness(string owner_account)const; + witness_object get_witness( const string& owner_account )const; /** Returns information about the given committee_member. * @param owner_account the name or id of the committee_member account owner, or the id of the committee_member * @returns the information about the committee_member stored in the block chain */ - committee_member_object get_committee_member(string owner_account)const; + committee_member_object get_committee_member( const string& owner_account )const; /** Creates a witness object owned by the given account. * @@ -1411,9 +1420,9 @@ class wallet_api * @param broadcast true to broadcast the transaction on the network * @returns the signed transaction registering a witness */ - signed_transaction create_witness(string owner_account, - string url, - bool broadcast = false)const; + signed_transaction create_witness( const string& owner_account, + const string& url, + bool broadcast = false )const; /** * Update a witness object owned by the given account. @@ -1425,10 +1434,10 @@ class wallet_api * @param broadcast true if you wish to broadcast the transaction. * @return the signed transaction */ - signed_transaction update_witness(string witness_name, - string url, - string block_signing_key, - bool broadcast = false)const; + signed_transaction update_witness( const string& witness_name, + const string& url, + const string& block_signing_key, + bool broadcast = false )const; /** @@ -1445,13 +1454,13 @@ class wallet_api * @return the signed transaction */ signed_transaction create_worker( - string owner_account, - time_point_sec work_begin_date, - time_point_sec work_end_date, - share_type daily_pay, - string name, - string url, - variant worker_settings, + const string& owner_account, + const time_point_sec& work_begin_date, + const time_point_sec& work_end_date, + const share_type& daily_pay, + const string& name, + const string& url, + const variant& worker_settings, bool broadcast = false )const; @@ -1464,8 +1473,8 @@ class wallet_api * @return the signed transaction */ signed_transaction update_worker_votes( - string account, - worker_vote_delta delta, + const string& account, + const worker_vote_delta& delta, bool broadcast = false )const; @@ -1476,7 +1485,7 @@ class wallet_api * @param destination The account that will receive the funds if the preimage is presented * @param amount the amount of the asset that is to be traded * @param asset_symbol_or_id The asset that is to be traded - * @param hash_algorithm the algorithm used to generate the hash from the preimage. Can be RIPEMD160, SHA1 or SHA256. + * @param hash_algorithm the algorithm used to generate the hash from the preimage. Can be RIPEMD160 or SHA256. * @param preimage_hash the hash of the preimage * @param preimage_size the size of the preimage in bytes * @param claim_period_seconds how long after creation until the lock expires @@ -1487,7 +1496,7 @@ class wallet_api signed_transaction htlc_create( const string& source, const string& destination, const string& amount, const string& asset_symbol_or_id, const string& hash_algorithm, const string& preimage_hash, uint32_t preimage_size, - uint32_t claim_period_seconds, const string& memo, bool broadcast = false) const; + uint32_t claim_period_seconds, const string& memo, bool broadcast = false ) const; /**** * Update a hashed time lock contract @@ -1497,7 +1506,7 @@ class wallet_api * @param preimage the preimage that should evaluate to the preimage_hash * @return the signed transaction */ - signed_transaction htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, const std::string& preimage, + signed_transaction htlc_redeem( const htlc_id_type& htlc_id, const string& issuer, const string& preimage, bool broadcast = false ) const; /***** @@ -1510,7 +1519,7 @@ class wallet_api * @return the signed transaction */ signed_transaction htlc_extend( const htlc_id_type& htlc_id, const string& issuer, uint32_t seconds_to_add, - bool broadcast = false) const; + bool broadcast = false ) const; /** * Get information about a vesting balance object or vesting balance objects owned by an account. @@ -1518,7 +1527,7 @@ class wallet_api * @param account_name An account name, account ID, or vesting balance object ID. * @return a list of vesting balance objects with additional info */ - vector< vesting_balance_object_with_info > get_vesting_balances( string account_name )const; + vector< vesting_balance_object_with_info > get_vesting_balances( const string& account_name )const; /** * Withdraw a vesting balance. @@ -1530,10 +1539,10 @@ class wallet_api * @return the signed transaction */ signed_transaction withdraw_vesting( - string witness_name, - string amount, - string asset_symbol_or_id, - bool broadcast = false)const; + const string& witness_name, + const string& amount, + const string& asset_symbol_or_id, + bool broadcast = false )const; /** Vote for a given committee_member. * @@ -1552,10 +1561,10 @@ class wallet_api * @param broadcast true if you wish to broadcast the transaction * @return the signed transaction changing your vote for the given committee_member */ - signed_transaction vote_for_committee_member(string voting_account, - string committee_member, - bool approve, - bool broadcast = false)const; + signed_transaction vote_for_committee_member( const string& voting_account, + const string& committee_member, + bool approve, + bool broadcast = false )const; /** Vote for a given witness. * @@ -1574,10 +1583,10 @@ class wallet_api * @param broadcast true if you wish to broadcast the transaction * @return the signed transaction changing your vote for the given witness */ - signed_transaction vote_for_witness(string voting_account, - string witness, - bool approve, - bool broadcast = false)const; + signed_transaction vote_for_witness( const string& voting_account, + const string& witness, + bool approve, + bool broadcast = false )const; /** Set the voting proxy for an account. * @@ -1597,9 +1606,9 @@ class wallet_api * @param broadcast true if you wish to broadcast the transaction * @return the signed transaction changing your vote proxy settings */ - signed_transaction set_voting_proxy(string account_to_modify, - optional voting_account, - bool broadcast = false)const; + signed_transaction set_voting_proxy( const string& account_to_modify, + const optional& voting_account, + bool broadcast = false )const; /** Set your vote for the number of witnesses and committee_members in the system. * @@ -1622,34 +1631,34 @@ class wallet_api * @param broadcast true if you wish to broadcast the transaction * @return the signed transaction changing your vote proxy settings */ - signed_transaction set_desired_witness_and_committee_member_count(string account_to_modify, + signed_transaction set_desired_witness_and_committee_member_count( const string& account_to_modify, uint16_t desired_number_of_witnesses, uint16_t desired_number_of_committee_members, - bool broadcast = false)const; + bool broadcast = false )const; /** Signs a transaction. * * Given a fully-formed transaction that is only lacking signatures, this signs * the transaction with the necessary keys and optionally broadcasts the transaction - * @param tx the unsigned transaction + * @param tx the transaction to be signed * @param broadcast true if you wish to broadcast the transaction * @return the signed version of the transaction */ - signed_transaction sign_transaction(signed_transaction tx, bool broadcast = false)const; + signed_transaction sign_transaction( const signed_transaction& tx, bool broadcast = false )const; /** Signs a transaction. * * Given a fully-formed transaction that is only lacking signatures, this signs * the transaction with the inferred necessary keys and the explicitly provided keys, * and optionally broadcasts the transaction - * @param tx the unsigned transaction + * @param tx the transaction to be signed * @param signing_keys Keys that must be used when signing the transaction * @param broadcast true if you wish to broadcast the transaction * @return the signed version of the transaction */ - signed_transaction sign_transaction2(signed_transaction tx, - const vector& signing_keys = vector(), - bool broadcast = true)const; + signed_transaction sign_transaction2( const signed_transaction& tx, + const vector& signing_keys = vector(), + bool broadcast = true )const; /** Get transaction signers. @@ -1659,7 +1668,7 @@ class wallet_api * @param tx the signed transaction * @return the set of public_keys */ - flat_set get_transaction_signers(const signed_transaction &tx) const; + flat_set get_transaction_signers( const signed_transaction& tx ) const; /** Get key references. * @@ -1667,7 +1676,7 @@ class wallet_api * @param keys public keys to search for related accounts * @return the set of related accounts */ - vector> get_key_references(const vector &keys) const; + vector> get_key_references( const vector& keys ) const; /** Returns an uninitialized object representing a given blockchain operation. * @@ -1685,7 +1694,7 @@ class wallet_api * (e.g., "global_parameters_update_operation") * @return a default-constructed operation of the given type */ - operation get_prototype_operation(string operation_type)const; + operation get_prototype_operation( const string& operation_type )const; /** Creates a transaction to propose a parameter change. * @@ -1700,9 +1709,9 @@ class wallet_api */ signed_transaction propose_parameter_change( const string& proposing_account, - fc::time_point_sec expiration_time, + const time_point_sec& expiration_time, const variant_object& changed_values, - bool broadcast = false)const; + bool broadcast = false )const; /** Propose a fee change. * @@ -1715,9 +1724,9 @@ class wallet_api */ signed_transaction propose_fee_change( const string& proposing_account, - fc::time_point_sec expiration_time, + const time_point_sec& expiration_time, const variant_object& changed_values, - bool broadcast = false)const; + bool broadcast = false )const; /** Approve or disapprove a proposal. * @@ -1741,7 +1750,7 @@ class wallet_api * @param limit depth of the order book to retrieve, for bids and asks each, capped at 50 * @return Order book of the market */ - order_book get_order_book( const string& base, const string& quote, unsigned limit = 50)const; + order_book get_order_book( const string& base, const string& quote, uint32_t limit = 50 )const; /** Signs a transaction. * @@ -1749,22 +1758,22 @@ class wallet_api * the transaction with the owned keys and optionally broadcasts the * transaction. * - * @param tx the unsigned transaction + * @param tx the transaction to add signature to * @param broadcast true if you wish to broadcast the transaction * * @return the signed transaction */ - signed_transaction add_transaction_signature( signed_transaction tx, + signed_transaction add_transaction_signature( const signed_transaction& tx, bool broadcast = false )const; - void dbg_make_uia(string creator, string symbol)const; - void dbg_make_mia(string creator, string symbol)const; - void dbg_push_blocks( std::string src_filename, uint32_t count )const; - void dbg_generate_blocks( std::string debug_wif_key, uint32_t count )const; - void dbg_stream_json_objects( const std::string& filename )const; - void dbg_update_object( fc::variant_object update )const; + void dbg_make_uia( const string& creator, const string& symbol )const; + void dbg_make_mia( const string& creator, const string& symbol )const; + void dbg_push_blocks( const string& src_filename, uint32_t count )const; + void dbg_generate_blocks( const string& debug_wif_key, uint32_t count )const; + void dbg_stream_json_objects( const string& filename )const; + void dbg_update_object( const variant_object& update )const; - void flood_network(string prefix, uint32_t number_of_transactions)const; + void flood_network( const string& prefix, uint32_t number_of_transactions )const; void network_add_nodes( const vector& nodes )const; vector< variant > network_get_connected_peers()const; @@ -1772,15 +1781,16 @@ class wallet_api /** * Used to transfer from one set of blinded balances to another */ - blind_confirmation blind_transfer_help( string from_key_or_label, - string to_key_or_label, - string amount, - string symbol, - bool broadcast = false, - bool to_temp = false )const; + blind_confirmation blind_transfer_help( const string& from_key_or_label, + const string& to_key_or_label, + const string& amount, + const string& symbol, + bool broadcast = false, + bool to_temp = false )const; - std::map> get_result_formatters() const; + std::map< string, std::function< string( const variant&, const fc::variants& ) >, std::less<> > + get_result_formatters() const; void encrypt_keys()const; @@ -1798,8 +1808,8 @@ class wallet_api * * @return The signed transaction */ - signed_transaction account_store_map(string account, string catalog, bool remove, - flat_map> key_values, bool broadcast)const; + signed_transaction account_store_map( const string& account, const string& catalog, bool remove, + const flat_map>& key_values, bool broadcast )const; /** * Get \c account_storage_object of an account by using the custom operations plugin. @@ -1811,7 +1821,7 @@ class wallet_api * * @return An \c account_storage_object or empty. */ - vector get_account_storage(string account, string catalog)const; + vector get_account_storage( const string& account, const string& catalog )const; }; diff --git a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp index 00dba9e19a..b4a69306b2 100644 --- a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp @@ -228,8 +228,7 @@ struct worker_vote_delta struct signed_block_with_info : public signed_block { - signed_block_with_info( const signed_block& block ); - signed_block_with_info( const signed_block_with_info& block ) = default; + explicit signed_block_with_info( const signed_block& block ); block_id_type block_id; public_key_type signing_key; @@ -238,8 +237,7 @@ struct signed_block_with_info : public signed_block struct vesting_balance_object_with_info : public vesting_balance_object { - vesting_balance_object_with_info( const vesting_balance_object& vbo, fc::time_point_sec now ); - vesting_balance_object_with_info( const vesting_balance_object_with_info& vbo ) = default; + vesting_balance_object_with_info( const vesting_balance_object& vbo, const fc::time_point_sec& now ); /** * How much is allowed to be withdrawn. @@ -289,8 +287,8 @@ class utility { * @param number_of_desired_keys Number of desired keys * @return A list of keys that are deterministically derived from the brainkey */ - static vector derive_owner_keys_from_brain_key( string brain_key, - int number_of_desired_keys = 1 ); + static vector derive_owner_keys_from_brain_key( const string& brain_key, + uint32_t number_of_desired_keys = 1 ); /** Suggests a safe brain key to use for creating your account. * \c create_account_with_brain_key() requires you to specify a 'brain key', diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index bbaeac3659..848e26c711 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -90,20 +90,22 @@ namespace graphene { namespace wallet { fc::stringstream to_sign; to_sign << message << '\n'; to_sign << "account=" << meta.account << '\n'; - to_sign << "memokey=" << std::string( meta.memo_key ) << '\n'; + to_sign << "memokey=" << string( meta.memo_key ) << '\n'; to_sign << "block=" << meta.block << '\n'; to_sign << "timestamp=" << meta.time; return fc::sha256::hash( to_sign.str() ); } - vector utility::derive_owner_keys_from_brain_key(string brain_key, int number_of_desired_keys) + vector utility::derive_owner_keys_from_brain_key( const string& brain_key, + uint32_t number_of_desired_keys ) { // Safety-check - FC_ASSERT( number_of_desired_keys >= 1 ); + FC_ASSERT( number_of_desired_keys >= 1, "number_of_desired_keys should be at least 1" ); // Create as many derived owner keys as requested vector results; - for (int i = 0; i < number_of_desired_keys; ++i) { + for( uint32_t i = 0; i < number_of_desired_keys; ++i ) + { fc::ecc::private_key priv_key = graphene::wallet::detail::derive_private_key( brain_key, i ); brain_key_info result; @@ -130,7 +132,7 @@ namespace graphene { namespace wallet { entropy += entropy2; string brain_key = ""; - for (int i = 0; i < BRAIN_KEY_WORD_COUNT; i++) + for( uint32_t i = 0; i < BRAIN_KEY_WORD_COUNT; ++i ) { fc::bigint choice = entropy % graphene::words::word_list_size; entropy /= graphene::words::word_list_size; @@ -150,14 +152,14 @@ namespace graphene { namespace wallet { namespace graphene { namespace wallet { -wallet_api::wallet_api(const wallet_data& initial_data, fc::api rapi) +wallet_api::wallet_api( const wallet_data& initial_data, const fc::api& rapi ) : my( std::make_unique(*this, initial_data, rapi) ) { } wallet_api::~wallet_api() = default; -bool wallet_api::copy_wallet_file(string destination_filename)const +bool wallet_api::copy_wallet_file( const string& destination_filename )const { return my->copy_wallet_file(destination_filename); } @@ -177,17 +179,17 @@ vector wallet_api::list_my_accounts()const return vector(my->_wallet.my_accounts.begin(), my->_wallet.my_accounts.end()); } -map> wallet_api::list_accounts(const string& lowerbound, uint32_t limit)const +map> wallet_api::list_accounts( const string& lowerbound, uint32_t limit )const { return my->_remote_db->lookup_accounts(lowerbound, limit, {}); } -vector wallet_api::list_account_balances(const string& id)const +vector wallet_api::list_account_balances( const string& id )const { return my->_remote_db->get_account_balances(id, flat_set()); } -vector wallet_api::list_assets(const string& lowerbound, uint32_t limit)const +vector wallet_api::list_assets( const string& lowerbound, uint32_t limit )const { return my->_remote_db->list_assets( lowerbound, limit ); } @@ -237,7 +239,7 @@ fc::optional wallet_api::get_htlc(const htlc_id_type& htlc_id) cons result_type operator()(const fc::hash160& obj)const { return convert("HASH160", obj.str()); } private: - result_type convert(const std::string& type, const std::string& hash)const + result_type convert(const string& type, const string& hash)const { fc::mutable_variant_object ret_val; ret_val["hash_algo"] = type; @@ -275,7 +277,7 @@ signed_transaction wallet_api::htlc_extend( const htlc_id_type& htlc_id, const s return my->htlc_extend(htlc_id, issuer, seconds_to_add, broadcast); } -vector wallet_api::get_account_history(const string& name, uint32_t limit)const +vector wallet_api::get_account_history( const string& name, uint32_t limit )const { vector result; @@ -337,7 +339,7 @@ vector wallet_api::get_relative_account_history( const string& name, uint32_t stop, uint32_t limit, - uint32_t start)const + uint32_t start )const { vector result; auto account_id = get_account(name).get_id(); @@ -377,7 +379,7 @@ account_history_operation_detail wallet_api::get_account_history_by_operations( const string& name, const flat_set& operation_types, uint32_t start, - uint32_t limit)const + uint32_t limit )const { account_history_operation_detail result; @@ -420,50 +422,50 @@ account_history_operation_detail wallet_api::get_account_history_by_operations( return result; } -full_account wallet_api::get_full_account( const string& name_or_id)const +full_account wallet_api::get_full_account( const string& name_or_id )const { return my->_remote_db->get_full_accounts({name_or_id}, false)[name_or_id]; } vector wallet_api::get_market_history( - string symbol1, - string symbol2, + const string& symbol1, + const string& symbol2, uint32_t bucket, - fc::time_point_sec start, - fc::time_point_sec end )const + const fc::time_point_sec& start, + const fc::time_point_sec& end )const { return my->_remote_hist->get_market_history( symbol1, symbol2, bucket, start, end ); } vector wallet_api::get_account_limit_orders( const string& name_or_id, - const string &base, - const string "e, + const string& base, + const string& quote, uint32_t limit, - optional ostart_id, - optional ostart_price)const + const optional& ostart_id, + const optional& ostart_price )const { return my->_remote_db->get_account_limit_orders(name_or_id, base, quote, limit, ostart_id, ostart_price); } -vector wallet_api::get_limit_orders(std::string a, std::string b, uint32_t limit)const +vector wallet_api::get_limit_orders( const string& a, const string& b, uint32_t limit )const { return my->_remote_db->get_limit_orders(a, b, limit); } -vector wallet_api::get_call_orders(std::string a, uint32_t limit)const +vector wallet_api::get_call_orders( const string& a, uint32_t limit )const { return my->_remote_db->get_call_orders(a, limit); } -vector wallet_api::get_settle_orders(std::string a, uint32_t limit)const +vector wallet_api::get_settle_orders( const string& a, uint32_t limit )const { return my->_remote_db->get_settle_orders(a, limit); } -vector wallet_api::get_collateral_bids(std::string asset, uint32_t limit, uint32_t start)const +vector wallet_api::get_collateral_bids( const string& a, uint32_t limit, uint32_t start )const { - return my->_remote_db->get_collateral_bids(asset, limit, start); + return my->_remote_db->get_collateral_bids(a, limit, start); } brain_key_info wallet_api::suggest_brain_key()const @@ -472,25 +474,25 @@ brain_key_info wallet_api::suggest_brain_key()const } vector wallet_api::derive_owner_keys_from_brain_key( - string brain_key, - int number_of_desired_keys) const + const string& brain_key, + uint32_t number_of_desired_keys ) const { return graphene::wallet::utility::derive_owner_keys_from_brain_key(brain_key, number_of_desired_keys); } -bool wallet_api::is_public_key_registered(string public_key) const +bool wallet_api::is_public_key_registered( const string& public_key ) const { bool is_known = my->_remote_db->is_public_key_registered(public_key); return is_known; } -string wallet_api::serialize_transaction( signed_transaction tx )const +string wallet_api::serialize_transaction( const signed_transaction& tx )const { return fc::to_hex(fc::raw::pack(tx)); } -variant wallet_api::get_object( object_id_type id ) const +variant wallet_api::get_object( const object_id_type& id ) const { return my->_remote_db->get_objects({id}, {}); } @@ -507,20 +509,20 @@ transaction_handle_type wallet_api::begin_builder_transaction()const void wallet_api::add_operation_to_builder_transaction( transaction_handle_type transaction_handle, - const operation& op)const + const operation& op )const { my->add_operation_to_builder_transaction(transaction_handle, op); } void wallet_api::replace_operation_in_builder_transaction( transaction_handle_type handle, - unsigned operation_index, - const operation& new_op)const + uint32_t operation_index, + const operation& new_op )const { my->replace_operation_in_builder_transaction(handle, operation_index, new_op); } -asset wallet_api::set_fees_on_builder_transaction(transaction_handle_type handle, string fee_asset)const +asset wallet_api::set_fees_on_builder_transaction( transaction_handle_type handle, const string& fee_asset )const { return my->set_fees_on_builder_transaction(handle, fee_asset); } @@ -543,26 +545,26 @@ signed_transaction wallet_api::sign_builder_transaction2(transaction_handle_type return my->sign_builder_transaction2(transaction_handle, explicit_keys, broadcast); } -pair wallet_api::broadcast_transaction(signed_transaction tx)const +pair wallet_api::broadcast_transaction( const signed_transaction& tx )const { return my->broadcast_transaction(tx); } signed_transaction wallet_api::propose_builder_transaction( transaction_handle_type handle, - time_point_sec expiration, + const time_point_sec& expiration, uint32_t review_period_seconds, - bool broadcast)const + bool broadcast )const { return my->propose_builder_transaction(handle, expiration, review_period_seconds, broadcast); } signed_transaction wallet_api::propose_builder_transaction2( transaction_handle_type handle, - string account_name_or_id, - time_point_sec expiration, + const string& account_name_or_id, + const time_point_sec& expiration, uint32_t review_period_seconds, - bool broadcast)const + bool broadcast )const { return my->propose_builder_transaction2(handle, account_name_or_id, expiration, review_period_seconds, broadcast); } @@ -572,36 +574,36 @@ void wallet_api::remove_builder_transaction(transaction_handle_type handle)const return my->remove_builder_transaction(handle); } -account_object wallet_api::get_account(string account_name_or_id) const +account_object wallet_api::get_account( const string& account_name_or_id ) const { return my->get_account(account_name_or_id); } -extended_asset_object wallet_api::get_asset(string asset_name_or_id) const +extended_asset_object wallet_api::get_asset( const string& asset_name_or_id ) const { auto found_asset = my->find_asset(asset_name_or_id); FC_ASSERT( found_asset, "Unable to find asset '${a}'", ("a",asset_name_or_id) ); return *found_asset; } -asset_bitasset_data_object wallet_api::get_bitasset_data(string asset_name_or_id) const +asset_bitasset_data_object wallet_api::get_bitasset_data( const string& asset_name_or_id ) const { auto asset = get_asset(asset_name_or_id); FC_ASSERT(asset.is_market_issued() && asset.bitasset_data_id); return my->get_object(*asset.bitasset_data_id); } -account_id_type wallet_api::get_account_id(string account_name_or_id) const +account_id_type wallet_api::get_account_id( const string& account_name_or_id ) const { return my->get_account_id(account_name_or_id); } -asset_id_type wallet_api::get_asset_id(const string& asset_symbol_or_id) const +asset_id_type wallet_api::get_asset_id( const string& asset_symbol_or_id ) const { return my->get_asset_id(asset_symbol_or_id); } -bool wallet_api::import_key(string account_name_or_id, string wif_key)const +bool wallet_api::import_key( const string& account_name_or_id, const string& wif_key )const { FC_ASSERT(!is_locked()); // backup wallet @@ -620,7 +622,7 @@ bool wallet_api::import_key(string account_name_or_id, string wif_key)const return false; } -map wallet_api::import_accounts( string filename, string password )const +map> wallet_api::import_accounts( const string& filename, const string& password )const { FC_ASSERT( !is_locked() ); FC_ASSERT( fc::exists( filename ) ); @@ -630,7 +632,7 @@ map wallet_api::import_accounts( string filename, string password const auto password_hash = fc::sha512::hash( password ); FC_ASSERT( fc::sha512::hash( password_hash ) == imported_keys.password_checksum ); - map result; + map> result; for( const auto& item : imported_keys.account_keys ) { const auto import_this_account = [ & ]() -> bool @@ -693,10 +695,10 @@ map wallet_api::import_accounts( string filename, string password } bool wallet_api::import_account_keys( - string filename, - string password, - string src_account_name, - string dest_account_name )const + const string& filename, + const string& password, + const string& src_account_name, + const string& dest_account_name )const { FC_ASSERT( !is_locked() ); FC_ASSERT( fc::exists( filename ) ); @@ -743,7 +745,7 @@ bool wallet_api::import_account_keys( return false; } -string wallet_api::normalize_brain_key(string s) const +string wallet_api::normalize_brain_key( const string& s ) const { return detail::normalize_brain_key( s ); } @@ -758,312 +760,315 @@ variant_object wallet_api::about() const return my->about(); } -fc::ecc::private_key wallet_api::derive_private_key(const std::string& prefix_string, int sequence_number) const +fc::ecc::private_key wallet_api::derive_private_key( const string& prefix_string, uint32_t sequence_number ) const { return detail::derive_private_key( prefix_string, sequence_number ); } -signed_transaction wallet_api::register_account(string name, - public_key_type owner_pubkey, - public_key_type active_pubkey, - string registrar_account, - string referrer_account, - uint32_t referrer_percent, - bool broadcast)const +signed_transaction wallet_api::register_account( const string& name, + const public_key_type& owner_pubkey, + const public_key_type& active_pubkey, + const string& registrar_account, + const string& referrer_account, + uint32_t referrer_percent, + bool broadcast )const { return my->register_account( name, owner_pubkey, active_pubkey, registrar_account, referrer_account, referrer_percent, broadcast ); } -signed_transaction wallet_api::create_account_with_brain_key(string brain_key, string account_name, - string registrar_account, string referrer_account, - bool broadcast /* = false */)const +signed_transaction wallet_api::create_account_with_brain_key( const string& brain_key, const string& account_name, + const string& registrar_account, const string& referrer_account, + bool broadcast /* = false */ )const { return my->create_account_with_brain_key( brain_key, account_name, registrar_account, referrer_account, broadcast ); } -signed_transaction wallet_api::issue_asset(string to_account, string amount, string symbol, - string memo, bool broadcast)const +signed_transaction wallet_api::issue_asset( const string& to_account, const string& amount, const string& symbol, + const string& memo, bool broadcast )const { return my->issue_asset(to_account, amount, symbol, memo, broadcast); } -signed_transaction wallet_api::transfer(string from, string to, string amount, - string asset_symbol, string memo, bool broadcast /* = false */)const +signed_transaction wallet_api::transfer( const string& from, const string& to, const string& amount, + const string& asset_symbol, const string& memo, + bool broadcast /* = false */ )const { return my->transfer(from, to, amount, asset_symbol, memo, broadcast); } -signed_transaction wallet_api::create_asset(string issuer, - string symbol, - uint8_t precision, - asset_options common, - fc::optional bitasset_opts, - bool broadcast)const +signed_transaction wallet_api::create_asset( const string& issuer, + const string& symbol, + uint8_t precision, + const asset_options& common, + const optional& bitasset_opts, + bool broadcast )const { return my->create_asset(issuer, symbol, precision, common, bitasset_opts, broadcast); } -signed_transaction wallet_api::update_asset(string symbol, - optional new_issuer, - asset_options new_options, - bool broadcast /* = false */)const +signed_transaction wallet_api::update_asset( const string& symbol, + const optional& new_issuer, + const asset_options& new_options, + bool broadcast /* = false */ )const { return my->update_asset(symbol, new_issuer, new_options, broadcast); } -signed_transaction wallet_api::update_asset_issuer(string symbol, - string new_issuer, - bool broadcast /* = false */)const +signed_transaction wallet_api::update_asset_issuer( const string& symbol, + const string& new_issuer, + bool broadcast /* = false */ )const { return my->update_asset_issuer(symbol, new_issuer, broadcast); } -signed_transaction wallet_api::update_bitasset(string symbol, - bitasset_options new_options, - bool broadcast /* = false */)const +signed_transaction wallet_api::update_bitasset( const string& symbol, + const bitasset_options& new_options, + bool broadcast /* = false */ )const { return my->update_bitasset(symbol, new_options, broadcast); } -signed_transaction wallet_api::update_asset_feed_producers(string symbol, - flat_set new_feed_producers, - bool broadcast /* = false */)const +signed_transaction wallet_api::update_asset_feed_producers( const string& symbol, + const flat_set& new_feed_producers, + bool broadcast /* = false */ )const { return my->update_asset_feed_producers(symbol, new_feed_producers, broadcast); } -signed_transaction wallet_api::publish_asset_feed(string publishing_account, - string symbol, - price_feed feed, - bool broadcast /* = false */)const +signed_transaction wallet_api::publish_asset_feed( const string& publishing_account, + const string& symbol, + const price_feed& feed, + bool broadcast /* = false */ )const { return my->publish_asset_feed(publishing_account, symbol, feed, broadcast); } -signed_transaction wallet_api::fund_asset_fee_pool(string from, - string symbol, - string amount, - bool broadcast /* = false */)const +signed_transaction wallet_api::fund_asset_fee_pool( const string& from, + const string& symbol, + const string& amount, + bool broadcast /* = false */ )const { return my->fund_asset_fee_pool(from, symbol, amount, broadcast); } -signed_transaction wallet_api::claim_asset_fee_pool(string symbol, - string amount, - bool broadcast /* = false */)const +signed_transaction wallet_api::claim_asset_fee_pool( const string& symbol, + const string& amount, + bool broadcast /* = false */ )const { return my->claim_asset_fee_pool(symbol, amount, broadcast); } -signed_transaction wallet_api::reserve_asset(string from, - string amount, - string symbol, - bool broadcast /* = false */)const +signed_transaction wallet_api::reserve_asset( const string& from, + const string& amount, + const string& symbol, + bool broadcast /* = false */ )const { return my->reserve_asset(from, amount, symbol, broadcast); } -signed_transaction wallet_api::global_settle_asset(string symbol, - price settle_price, - bool broadcast /* = false */)const +signed_transaction wallet_api::global_settle_asset( const string& symbol, + const price& settle_price, + bool broadcast /* = false */ )const { return my->global_settle_asset(symbol, settle_price, broadcast); } -signed_transaction wallet_api::settle_asset(string account_to_settle, - string amount_to_settle, - string symbol, - bool broadcast /* = false */)const +signed_transaction wallet_api::settle_asset( const string& account_to_settle, + const string& amount_to_settle, + const string& symbol, + bool broadcast /* = false */ )const { return my->settle_asset(account_to_settle, amount_to_settle, symbol, broadcast); } -signed_transaction wallet_api::bid_collateral(string bidder_name, - string debt_amount, string debt_symbol, - string additional_collateral, - bool broadcast )const +signed_transaction wallet_api::bid_collateral( const string& bidder_name, + const string& debt_amount, const string& debt_symbol, + const string& additional_collateral, + bool broadcast )const { return my->bid_collateral(bidder_name, debt_amount, debt_symbol, additional_collateral, broadcast); } -signed_transaction wallet_api::whitelist_account(string authorizing_account, - string account_to_list, - account_whitelist_operation::account_listing new_listing_status, - bool broadcast /* = false */)const +signed_transaction wallet_api::whitelist_account( const string& authorizing_account, + const string& account_to_list, + account_whitelist_operation::account_listing new_listing_status, + bool broadcast /* = false */ )const { return my->whitelist_account(authorizing_account, account_to_list, new_listing_status, broadcast); } -signed_transaction wallet_api::create_committee_member(string owner_account, string url, - bool broadcast /* = false */)const +signed_transaction wallet_api::create_committee_member( const string& owner_account, const string& url, + bool broadcast /* = false */ )const { return my->create_committee_member(owner_account, url, broadcast); } -map wallet_api::list_witnesses(const string& lowerbound, uint32_t limit)const +map> wallet_api::list_witnesses( const string& lowerbound, uint32_t limit )const { return my->_remote_db->lookup_witness_accounts(lowerbound, limit); } -map wallet_api::list_committee_members(const string& lowerbound, uint32_t limit)const +map> wallet_api::list_committee_members( + const string& lowerbound, uint32_t limit )const { return my->_remote_db->lookup_committee_member_accounts(lowerbound, limit); } -witness_object wallet_api::get_witness(string owner_account)const +witness_object wallet_api::get_witness( const string& owner_account )const { return my->get_witness(owner_account); } -committee_member_object wallet_api::get_committee_member(string owner_account)const +committee_member_object wallet_api::get_committee_member( const string& owner_account )const { return my->get_committee_member(owner_account); } -signed_transaction wallet_api::create_witness(string owner_account, - string url, - bool broadcast /* = false */)const +signed_transaction wallet_api::create_witness( const string& owner_account, + const string& url, + bool broadcast /* = false */ )const { return my->create_witness(owner_account, url, broadcast); } signed_transaction wallet_api::create_worker( - string owner_account, - time_point_sec work_begin_date, - time_point_sec work_end_date, - share_type daily_pay, - string name, - string url, - variant worker_settings, - bool broadcast /* = false */)const + const string& owner_account, + const time_point_sec& work_begin_date, + const time_point_sec& work_end_date, + const share_type& daily_pay, + const string& name, + const string& url, + const variant& worker_settings, + bool broadcast /* = false */ )const { return my->create_worker( owner_account, work_begin_date, work_end_date, daily_pay, name, url, worker_settings, broadcast ); } signed_transaction wallet_api::update_worker_votes( - string owner_account, - worker_vote_delta delta, - bool broadcast /* = false */)const + const string& owner_account, + const worker_vote_delta& delta, + bool broadcast /* = false */ )const { return my->update_worker_votes( owner_account, delta, broadcast ); } signed_transaction wallet_api::update_witness( - string witness_name, - string url, - string block_signing_key, - bool broadcast /* = false */)const + const string& witness_name, + const string& url, + const string& block_signing_key, + bool broadcast /* = false */ )const { return my->update_witness(witness_name, url, block_signing_key, broadcast); } -vector< vesting_balance_object_with_info > wallet_api::get_vesting_balances( string account_name )const +vector< vesting_balance_object_with_info > wallet_api::get_vesting_balances( const string& account_name )const { return my->get_vesting_balances( account_name ); } signed_transaction wallet_api::withdraw_vesting( - string witness_name, - string amount, - string asset_symbol, - bool broadcast /* = false */)const + const string& witness_name, + const string& amount, + const string& asset_symbol, + bool broadcast /* = false */ )const { return my->withdraw_vesting( witness_name, amount, asset_symbol, broadcast ); } -signed_transaction wallet_api::vote_for_committee_member(string voting_account, - string witness, - bool approve, - bool broadcast /* = false */)const +signed_transaction wallet_api::vote_for_committee_member( const string& voting_account, + const string& witness, + bool approve, + bool broadcast /* = false */ )const { return my->vote_for_committee_member(voting_account, witness, approve, broadcast); } -signed_transaction wallet_api::vote_for_witness(string voting_account, - string witness, - bool approve, - bool broadcast /* = false */)const +signed_transaction wallet_api::vote_for_witness( const string& voting_account, + const string& witness, + bool approve, + bool broadcast /* = false */ )const { return my->vote_for_witness(voting_account, witness, approve, broadcast); } -signed_transaction wallet_api::set_voting_proxy(string account_to_modify, - optional voting_account, - bool broadcast /* = false */)const +signed_transaction wallet_api::set_voting_proxy( const string& account_to_modify, + const optional& voting_account, + bool broadcast /* = false */ )const { return my->set_voting_proxy(account_to_modify, voting_account, broadcast); } -signed_transaction wallet_api::set_desired_witness_and_committee_member_count(string account_to_modify, +signed_transaction wallet_api::set_desired_witness_and_committee_member_count( const string& account_to_modify, uint16_t desired_number_of_witnesses, uint16_t desired_number_of_committee_members, - bool broadcast /* = false */)const + bool broadcast /* = false */ )const { return my->set_desired_witness_and_committee_member_count(account_to_modify, desired_number_of_witnesses, desired_number_of_committee_members, broadcast); } -void wallet_api::set_wallet_filename(string wallet_filename)const +void wallet_api::set_wallet_filename( const string& wallet_filename )const { my->_wallet_filename = wallet_filename; } -signed_transaction wallet_api::sign_transaction(signed_transaction tx, bool broadcast /* = false */)const +signed_transaction wallet_api::sign_transaction( const signed_transaction& tx, bool broadcast /* = false */ )const { try { return my->sign_transaction( tx, broadcast); } FC_CAPTURE_AND_RETHROW( (tx) ) } -signed_transaction wallet_api::sign_transaction2(signed_transaction tx, const vector& signing_keys, - bool broadcast /* = false */)const +signed_transaction wallet_api::sign_transaction2( const signed_transaction& tx, + const vector& signing_keys, + bool broadcast /* = false */ )const { try { return my->sign_transaction2( tx, signing_keys, broadcast); } FC_CAPTURE_AND_RETHROW( (tx) ) } -flat_set wallet_api::get_transaction_signers(const signed_transaction &tx) const +flat_set wallet_api::get_transaction_signers( const signed_transaction& tx ) const { try { return my->get_transaction_signers(tx); } FC_CAPTURE_AND_RETHROW( (tx) ) } -vector> wallet_api::get_key_references(const vector &keys) const +vector> wallet_api::get_key_references( const vector& keys ) const { try { return my->get_key_references(keys); } FC_CAPTURE_AND_RETHROW( (keys) ) } -operation wallet_api::get_prototype_operation(string operation_name)const +operation wallet_api::get_prototype_operation( const string& operation_name )const { return my->get_prototype_operation( operation_name ); } -void wallet_api::dbg_make_uia(string creator, string symbol)const +void wallet_api::dbg_make_uia( const string& creator, const string& symbol )const { FC_ASSERT(!is_locked()); my->dbg_make_uia(creator, symbol); } -void wallet_api::dbg_make_mia(string creator, string symbol)const +void wallet_api::dbg_make_mia( const string& creator, const string& symbol )const { FC_ASSERT(!is_locked()); my->dbg_make_mia(creator, symbol); } -void wallet_api::dbg_push_blocks( std::string src_filename, uint32_t count )const +void wallet_api::dbg_push_blocks( const string& src_filename, uint32_t count )const { my->dbg_push_blocks( src_filename, count ); } -void wallet_api::dbg_generate_blocks( std::string debug_wif_key, uint32_t count )const +void wallet_api::dbg_generate_blocks( const string& debug_wif_key, uint32_t count )const { my->dbg_generate_blocks( debug_wif_key, count ); } -void wallet_api::dbg_stream_json_objects( const std::string& filename )const +void wallet_api::dbg_stream_json_objects( const string& filename )const { my->dbg_stream_json_objects( filename ); } -void wallet_api::dbg_update_object( fc::variant_object update )const +void wallet_api::dbg_update_object( const fc::variant_object& update )const { my->dbg_update_object( update ); } @@ -1078,7 +1083,7 @@ vector< variant > wallet_api::network_get_connected_peers()const return my->network_get_connected_peers(); } -void wallet_api::flood_network(string prefix, uint32_t number_of_transactions)const +void wallet_api::flood_network( const string& prefix, uint32_t number_of_transactions )const { FC_ASSERT(!is_locked()); my->flood_network(prefix, number_of_transactions); @@ -1086,7 +1091,7 @@ void wallet_api::flood_network(string prefix, uint32_t number_of_transactions)co signed_transaction wallet_api::propose_parameter_change( const string& proposing_account, - fc::time_point_sec expiration_time, + const fc::time_point_sec& expiration_time, const variant_object& changed_values, bool broadcast /* = false */ )const @@ -1096,7 +1101,7 @@ signed_transaction wallet_api::propose_parameter_change( signed_transaction wallet_api::propose_fee_change( const string& proposing_account, - fc::time_point_sec expiration_time, + const fc::time_point_sec& expiration_time, const variant_object& changed_fees, bool broadcast /* = false */ )const @@ -1124,7 +1129,7 @@ dynamic_global_property_object wallet_api::get_dynamic_global_properties() const return my->get_dynamic_global_properties(); } -signed_transaction wallet_api::add_transaction_signature( signed_transaction tx, +signed_transaction wallet_api::add_transaction_signature( const signed_transaction& tx, bool broadcast )const { return my->add_transaction_signature( tx, broadcast ); @@ -1132,9 +1137,9 @@ signed_transaction wallet_api::add_transaction_signature( signed_transaction tx, string wallet_api::help()const { - std::vector method_names = my->method_documentation.get_method_names(); + std::vector method_names = my->method_documentation.get_method_names(); std::stringstream ss; - for (const std::string& method_name : method_names) + for (const string& method_name : method_names) { try { @@ -1148,13 +1153,13 @@ string wallet_api::help()const return ss.str(); } -string wallet_api::gethelp(const string& method)const +string wallet_api::gethelp( const string& method )const { fc::api tmp; std::stringstream ss; ss << "\n"; - std::string doxygenHelpString = my->method_documentation.get_detailed_description(method); + string doxygenHelpString = my->method_documentation.get_detailed_description(method); if (!doxygenHelpString.empty()) ss << doxygenHelpString << "\n"; @@ -1211,7 +1216,7 @@ string wallet_api::gethelp(const string& method)const return ss.str(); } -bool wallet_api::load_wallet_file( string wallet_filename )const +bool wallet_api::load_wallet_file( const string& wallet_filename )const { return my->load_wallet_file( wallet_filename ); } @@ -1221,13 +1226,13 @@ void wallet_api::quit()const my->quit(); } -void wallet_api::save_wallet_file( string wallet_filename )const +void wallet_api::save_wallet_file( const string& wallet_filename )const { my->save_wallet_file( wallet_filename ); } -std::map > -wallet_api::get_result_formatters() const +std::map< string, std::function< string( const fc::variant&, const fc::variants& ) >, std::less<> > + wallet_api::get_result_formatters() const { return my->get_result_formatters(); } @@ -1257,7 +1262,7 @@ void wallet_api::lock()const my->self.lock_changed(true); } FC_CAPTURE_AND_RETHROW() } -void wallet_api::unlock(string password)const +void wallet_api::unlock( const string& password )const { try { FC_ASSERT(password.size() > 0); auto pw = fc::sha512::hash(password.c_str(), password.size()); @@ -1269,7 +1274,7 @@ void wallet_api::unlock(string password)const my->self.lock_changed(false); } FC_CAPTURE_AND_RETHROW() } -void wallet_api::set_password( string password )const +void wallet_api::set_password( const string& password )const { if( !is_new() ) FC_ASSERT( !is_locked(), "The wallet must be unlocked before the password can be set" ); @@ -1278,7 +1283,7 @@ void wallet_api::set_password( string password )const } vector< signed_transaction > wallet_api::import_balance( - string name_or_id, + const string& name_or_id, const vector& wif_keys, bool broadcast )const { @@ -1291,60 +1296,61 @@ map wallet_api::dump_private_keys()const return my->_keys; } -signed_transaction wallet_api::upgrade_account( string name, bool broadcast )const +signed_transaction wallet_api::upgrade_account( const string& name, bool broadcast )const { return my->upgrade_account(name,broadcast); } -signed_transaction wallet_api::sell_asset(string seller_account, - string amount_to_sell, - string symbol_to_sell, - string min_to_receive, - string symbol_to_receive, - uint32_t expiration, - bool fill_or_kill, - bool broadcast)const +signed_transaction wallet_api::sell_asset( const string& seller_account, + const string& amount_to_sell, + const string& symbol_to_sell, + const string& min_to_receive, + const string& symbol_to_receive, + uint32_t expiration, + bool fill_or_kill, + bool broadcast )const { return my->sell_asset(seller_account, amount_to_sell, symbol_to_sell, min_to_receive, symbol_to_receive, expiration, fill_or_kill, broadcast); } -signed_transaction wallet_api::borrow_asset(string seller_name, string amount_to_sell, - string asset_symbol, string amount_of_collateral, bool broadcast)const +signed_transaction wallet_api::borrow_asset( const string& seller_name, const string& amount_to_sell, + const string& asset_symbol, const string& amount_of_collateral, + bool broadcast )const { FC_ASSERT(!is_locked()); - return my->borrow_asset_ext(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, {}, broadcast); + return my->borrow_asset_ext( seller_name, amount_to_sell, asset_symbol, amount_of_collateral, {}, broadcast ); } -signed_transaction wallet_api::borrow_asset_ext( string seller_name, string amount_to_sell, - string asset_symbol, string amount_of_collateral, - call_order_update_operation::extensions_type extensions, - bool broadcast)const +signed_transaction wallet_api::borrow_asset_ext( const string& seller_name, const string& amount_to_sell, + const string& asset_symbol, const string& amount_of_collateral, + const call_order_update_operation::extensions_type& extensions, + bool broadcast )const { FC_ASSERT(!is_locked()); return my->borrow_asset_ext(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, extensions, broadcast); } -signed_transaction wallet_api::cancel_order(const limit_order_id_type& order_id, bool broadcast) const +signed_transaction wallet_api::cancel_order( const limit_order_id_type& order_id, bool broadcast ) const { FC_ASSERT(!is_locked()); return my->cancel_order(order_id, broadcast); } -memo_data wallet_api::sign_memo(string from, string to, string memo)const +memo_data wallet_api::sign_memo( const string& from, const string& to, const string& memo )const { FC_ASSERT(!is_locked()); return my->sign_memo(from, to, memo); } -string wallet_api::read_memo(const memo_data& memo)const +string wallet_api::read_memo( const memo_data& memo )const { FC_ASSERT(!is_locked()); return my->read_memo(memo); } -signed_message wallet_api::sign_message(string signer, string message)const +signed_message wallet_api::sign_message( const string& signer, const string& message )const { FC_ASSERT(!is_locked()); return my->sign_message(signer, message); @@ -1361,7 +1367,7 @@ bool wallet_api::verify_message( const string& message, const string& account, i * @param message the signed_message structure containing message, meta data and signature * @return true if signature matches */ -bool wallet_api::verify_signed_message( signed_message message )const +bool wallet_api::verify_signed_message( const signed_message& message )const { return my->verify_signed_message( message ); } @@ -1371,13 +1377,13 @@ bool wallet_api::verify_signed_message( signed_message message )const * @param message the complete encapsulated message string including separators and line feeds * @return true if signature matches */ -bool wallet_api::verify_encapsulated_message( string message )const +bool wallet_api::verify_encapsulated_message( const string& message )const { return my->verify_encapsulated_message( message ); } -string wallet_api::get_key_label( public_key_type key )const +string wallet_api::get_key_label( const public_key_type& key )const { auto key_itr = my->_wallet.labeled_keys.get().find(key); if( key_itr != my->_wallet.labeled_keys.get().end() ) @@ -1385,12 +1391,12 @@ string wallet_api::get_key_label( public_key_type key )const return string(); } -string wallet_api::get_private_key( public_key_type pubkey )const +string wallet_api::get_private_key( const public_key_type& pubkey )const { return key_to_wif( my->get_private_key( pubkey ) ); } -public_key_type wallet_api::get_public_key( string label )const +public_key_type wallet_api::get_public_key( const string& label )const { try { return fc::variant(label, 1).as( 1 ); } catch ( ... ){} @@ -1400,7 +1406,7 @@ public_key_type wallet_api::get_public_key( string label )const return public_key_type(); } -bool wallet_api::set_key_label( public_key_type key, string label )const +bool wallet_api::set_key_label( const public_key_type& key, const string& label )const { auto result = my->_wallet.labeled_keys.insert( key_label{label,key} ); if( result.second ) return true; @@ -1414,17 +1420,17 @@ bool wallet_api::set_key_label( public_key_type key, string label } return false; } -map wallet_api::get_blind_accounts()const +map> wallet_api::get_blind_accounts()const { - map result; + map> result; for( const auto& item : my->_wallet.labeled_keys ) result[item.label] = item.key; return result; } -map wallet_api::get_my_blind_accounts()const +map> wallet_api::get_my_blind_accounts()const { FC_ASSERT( !is_locked() ); - map result; + map> result; for( const auto& item : my->_wallet.labeled_keys ) { if( my->_keys.find(item.key) != my->_keys.end() ) @@ -1433,14 +1439,14 @@ map wallet_api::get_my_blind_accounts()const return result; } -public_key_type wallet_api::create_blind_account( string label, string brain_key )const +public_key_type wallet_api::create_blind_account( const string& label, const string& p_brain_key )const { FC_ASSERT( !is_locked() ); auto label_itr = my->_wallet.labeled_keys.get().find(label); if( label_itr != my->_wallet.labeled_keys.get().end() ) FC_ASSERT( !"Key with label already exists" ); - brain_key = fc::trim_and_normalize_spaces( brain_key ); + auto brain_key = fc::trim_and_normalize_spaces( p_brain_key ); auto secret = fc::sha256::hash( brain_key.c_str(), brain_key.size() ); auto priv_key = fc::ecc::private_key::regenerate( secret ); public_key_type pub_key = priv_key.get_public_key(); @@ -1453,7 +1459,7 @@ public_key_type wallet_api::create_blind_account( string label, string brain_ return pub_key; } -vector wallet_api::get_blind_balances( string key_or_label )const +vector wallet_api::get_blind_balances( const string& key_or_label )const { vector result; map balances; @@ -1486,10 +1492,10 @@ vector wallet_api::get_blind_balances( string key_or_label )const return result; } -blind_confirmation wallet_api::transfer_from_blind( string from_blind_account_key_or_label, - string to_account_id_or_name, - string amount_in, - string symbol, +blind_confirmation wallet_api::transfer_from_blind( const string& from_blind_account_key_or_label, + const string& to_account_id_or_name, + const string& amount_in, + const string& symbol, bool broadcast )const { try { transfer_from_blind_operation from_blind; @@ -1547,20 +1553,20 @@ blind_confirmation wallet_api::transfer_from_blind( string from_blind_account_ke return conf; } FC_CAPTURE_AND_RETHROW( (from_blind_account_key_or_label)(to_account_id_or_name)(amount_in)(symbol) ) } -blind_confirmation wallet_api::blind_transfer( string from_key_or_label, - string to_key_or_label, - string amount_in, - string symbol, +blind_confirmation wallet_api::blind_transfer( const string& from_key_or_label, + const string& to_key_or_label, + const string& amount_in, + const string& symbol, bool broadcast )const { return blind_transfer_help( from_key_or_label, to_key_or_label, amount_in, symbol, broadcast, false ); } -blind_confirmation wallet_api::blind_transfer_help( string from_key_or_label, - string to_key_or_label, - string amount_in, - string symbol, - bool broadcast, - bool to_temp )const +blind_confirmation wallet_api::blind_transfer_help( const string& from_key_or_label, + const string& to_key_or_label, + const string& amount_in, + const string& symbol, + bool broadcast, + bool to_temp )const { blind_confirmation confirm; try { @@ -1733,10 +1739,10 @@ blind_confirmation wallet_api::blind_transfer_help( string from_key_or_label, * Transfers a public balance from @from to one or more blinded balances using a * stealth transfer. */ -blind_confirmation wallet_api::transfer_to_blind( string from_account_id_or_name, - string asset_symbol, +blind_confirmation wallet_api::transfer_to_blind( const string& from_account_id_or_name, + const string& asset_symbol, /* map from key or label to amount */ - vector> to_amounts, + const vector>& to_amounts, bool broadcast )const { try { FC_ASSERT( !is_locked() ); @@ -1820,7 +1826,8 @@ blind_confirmation wallet_api::transfer_to_blind( string from_account_id_or_name return confirm; } FC_CAPTURE_AND_RETHROW( (from_account_id_or_name)(asset_symbol)(to_amounts) ) } -blind_receipt wallet_api::receive_blind_transfer( string confirmation_receipt, string opt_from, string opt_memo )const +blind_receipt wallet_api::receive_blind_transfer( const string& confirmation_receipt, + const string& opt_from, const string& opt_memo )const { FC_ASSERT( !is_locked() ); stealth_confirmation conf(confirmation_receipt); @@ -1897,7 +1904,7 @@ blind_receipt wallet_api::receive_blind_transfer( string confirmation_receipt, s return result; } -vector wallet_api::blind_history( string key_or_account )const +vector wallet_api::blind_history( const string& key_or_account )const { vector result; auto pub_key = get_public_key( key_or_account ); @@ -1915,28 +1922,28 @@ vector wallet_api::blind_history( string key_or_account )const return result; } -order_book wallet_api::get_order_book( const string& base, const string& quote, unsigned limit )const +order_book wallet_api::get_order_book( const string& base, const string& quote, uint32_t limit )const { return( my->_remote_db->get_order_book( base, quote, limit ) ); } // custom operations -signed_transaction wallet_api::account_store_map(string account, string catalog, bool remove, - flat_map> key_values, bool broadcast)const +signed_transaction wallet_api::account_store_map( const string& account, const string& catalog, bool remove, + const flat_map>& key_values, bool broadcast )const { return my->account_store_map(account, catalog, remove, key_values, broadcast); } -vector wallet_api::get_account_storage(string account, string catalog)const +vector wallet_api::get_account_storage( const string& account, const string& catalog )const { try { return my->_custom_operations->get_storage_info(account, catalog); } FC_CAPTURE_AND_RETHROW( (account)(catalog) ) } signed_block_with_info::signed_block_with_info( const signed_block& block ) - : signed_block( block ) + : signed_block( block ), + block_id { id() }, + signing_key { signee() } { - block_id = id(); - signing_key = signee(); transaction_ids.reserve( transactions.size() ); for( const processed_transaction& tx : transactions ) transaction_ids.push_back( tx.id() ); @@ -1944,11 +1951,11 @@ signed_block_with_info::signed_block_with_info( const signed_block& block ) vesting_balance_object_with_info::vesting_balance_object_with_info( const vesting_balance_object& vbo, - fc::time_point_sec now ) - : vesting_balance_object( vbo ) + const fc::time_point_sec& now ) + : vesting_balance_object( vbo ), + allowed_withdraw { get_allowed_withdraw( now ) }, + allowed_withdraw_time { now } { - allowed_withdraw = get_allowed_withdraw( now ); - allowed_withdraw_time = now; } } } // graphene::wallet diff --git a/libraries/wallet/wallet_api_impl.hpp b/libraries/wallet/wallet_api_impl.hpp index 3a498dfe46..02994a0b86 100644 --- a/libraries/wallet/wallet_api_impl.hpp +++ b/libraries/wallet/wallet_api_impl.hpp @@ -367,7 +367,8 @@ class wallet_api_impl signed_transaction issue_asset(string to_account, string amount, string symbol, string memo, bool broadcast = false); - std::map> get_result_formatters() const; + std::map< string, std::function< string( const fc::variant&, const fc::variants& ) >, std::less<> > + get_result_formatters() const; signed_transaction propose_parameter_change( const string& proposing_account, fc::time_point_sec expiration_time, const variant_object& changed_values, bool broadcast = false); diff --git a/libraries/wallet/wallet_results.cpp b/libraries/wallet/wallet_results.cpp index 8bdefff6b7..a47e28476f 100644 --- a/libraries/wallet/wallet_results.cpp +++ b/libraries/wallet/wallet_results.cpp @@ -27,21 +27,22 @@ namespace graphene { namespace wallet { namespace detail { -std::map> wallet_api_impl::get_result_formatters() const + std::map< string, std::function< string( const fc::variant&, const fc::variants& ) >, std::less<> > + wallet_api_impl::get_result_formatters() const { - std::map > m; + std::map< string, std::function< string( const fc::variant&, const fc::variants& ) >, std::less<> > m; - m["help"] = [](variant result, const fc::variants&) + m["help"] = [](const variant& result, const fc::variants&) { return result.get_string(); }; - m["gethelp"] = [](variant result, const fc::variants&) + m["gethelp"] = [](const variant& result, const fc::variants&) { return result.get_string(); }; - auto format_account_history = [this](variant result, const fc::variants&) + auto format_account_history = [this](const variant& result, const fc::variants&) { auto r = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; @@ -64,7 +65,7 @@ std::map> wallet_a m["get_account_history"] = format_account_history; m["get_relative_account_history"] = format_account_history; - m["get_account_history_by_operations"] = [this](variant result, const fc::variants&) { + m["get_account_history_by_operations"] = [this](const variant& result, const fc::variants&) { auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; ss << "total_count : " << r.total_count << " \n"; @@ -85,7 +86,7 @@ std::map> wallet_a return ss.str(); }; - auto format_balances = [this](variant result, const fc::variants&) + auto format_balances = [this](const variant& result, const fc::variants&) { auto r = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); vector asset_recs; @@ -103,7 +104,7 @@ std::map> wallet_a m["list_account_balances"] = format_balances; m["get_blind_balances"] = format_balances; - auto format_blind_transfers = [this](variant result, const fc::variants&) + auto format_blind_transfers = [this](const variant& result, const fc::variants&) { auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; @@ -121,7 +122,7 @@ std::map> wallet_a m["transfer_to_blind"] = format_blind_transfers; m["blind_transfer"] = format_blind_transfers; - m["receive_blind_transfer"] = [this](variant result, const fc::variants&) + m["receive_blind_transfer"] = [this](const variant& result, const fc::variants&) { auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; @@ -131,7 +132,7 @@ std::map> wallet_a return ss.str(); }; - m["blind_history"] = [this](variant result, const fc::variants&) + m["blind_history"] = [this](const variant& result, const fc::variants&) { auto records = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; @@ -148,7 +149,7 @@ std::map> wallet_a return ss.str(); }; - m["get_order_book"] = [](variant result, const fc::variants&) + m["get_order_book"] = [](const variant& result, const fc::variants&) { auto orders = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); auto bids = orders.bids; @@ -236,7 +237,7 @@ std::map> wallet_a return ss.str(); }; - m["sign_message"] = [](variant result, const fc::variants&) + m["sign_message"] = [](const variant& result, const fc::variants&) { auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); diff --git a/tests/tests/api_limit_tests.cpp b/tests/tests/api_limit_tests.cpp index 40baf71808..18167074b4 100644 --- a/tests/tests/api_limit_tests.cpp +++ b/tests/tests/api_limit_tests.cpp @@ -250,7 +250,7 @@ BOOST_AUTO_TEST_CASE( api_limit_lookup_accounts ) { graphene::app::database_api db_api( db, &( app.get_options() )); ACTOR(bob); GRAPHENE_CHECK_THROW(db_api.lookup_accounts("bob",220), fc::exception); - map result =db_api.lookup_accounts("bob",190); + auto result =db_api.lookup_accounts("bob",190); BOOST_REQUIRE_EQUAL( result.size(), 17u); } catch (fc::exception& e) { @@ -264,7 +264,7 @@ BOOST_AUTO_TEST_CASE( api_limit_lookup_witness_accounts ) { graphene::app::database_api db_api( db, &( app.get_options() )); ACTORS((bob)) ; GRAPHENE_CHECK_THROW(db_api.lookup_witness_accounts("bob",220), fc::exception); - map result =db_api.lookup_witness_accounts("bob",190); + auto result =db_api.lookup_witness_accounts("bob",190); BOOST_REQUIRE_EQUAL( result.size(), 10u); } catch (fc::exception& e) { @@ -549,7 +549,7 @@ BOOST_AUTO_TEST_CASE( api_limit_lookup_committee_member_accounts ) { graphene::app::database_api db_api( db, &( app.get_options() )); ACTORS((bob)); GRAPHENE_CHECK_THROW(db_api.lookup_committee_member_accounts("bob",220), fc::exception); - std::map result =db_api.lookup_committee_member_accounts("bob",190); + auto result =db_api.lookup_committee_member_accounts("bob",190); BOOST_REQUIRE_EQUAL( result.size(), 10u); } catch (fc::exception& e) { From 5f803130b66ee52ae0e4fdbe67f54496aa7e6106 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 26 Oct 2022 22:02:54 +0000 Subject: [PATCH 310/338] Fix a long line --- libraries/wallet/include/graphene/wallet/wallet.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index eaf740e9a7..bc5d965650 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -826,7 +826,7 @@ class wallet_api /** Sign a message using an account's memo key. The signature is generated as in - * https://github.com/xeroc/python-graphenelib/blob/d9634d74273ebacc92555499eca7c444217ecba0/graphenecommon/message.py#L64 . + * https://github.com/xeroc/python-graphenelib/blob/d9634d74/graphenecommon/message.py#L64 . * * @param signer the name or id of signing account * @param message text to sign From 805e052f8c5dbdbe55332a6f5058d359893b141d Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 27 Oct 2022 10:50:25 +0000 Subject: [PATCH 311/338] Bump FC for typename of map> --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 41f31f8c60..4d024a83b7 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 41f31f8c60561ef8f885f326abc21210cdd7db40 +Subproject commit 4d024a83b774da0e186c0c6c070e695f563da373 From c44e91c4ea94a587a279e6f2de17ecc7196e7add Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 27 Oct 2022 15:57:32 +0000 Subject: [PATCH 312/338] Avoid using reserved identifier "remove" --- libraries/wallet/include/graphene/wallet/wallet.hpp | 4 ++-- libraries/wallet/wallet.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/wallet/include/graphene/wallet/wallet.hpp b/libraries/wallet/include/graphene/wallet/wallet.hpp index bc5d965650..d444e656b9 100644 --- a/libraries/wallet/include/graphene/wallet/wallet.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet.hpp @@ -1802,13 +1802,13 @@ class wallet_api * * @param account The account name or ID that we are adding additional information to. * @param catalog The name of the catalog the operation will insert data to. - * @param remove true if you want to remove stuff from a catalog. + * @param is_to_remove true if you want to remove stuff from a catalog. * @param key_values The map to be inserted/removed to/from the catalog * @param broadcast true if you wish to broadcast the transaction * * @return The signed transaction */ - signed_transaction account_store_map( const string& account, const string& catalog, bool remove, + signed_transaction account_store_map( const string& account, const string& catalog, bool is_to_remove, const flat_map>& key_values, bool broadcast )const; /** diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 848e26c711..616ee54b64 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -1928,10 +1928,10 @@ order_book wallet_api::get_order_book( const string& base, const string& quote, } // custom operations -signed_transaction wallet_api::account_store_map( const string& account, const string& catalog, bool remove, +signed_transaction wallet_api::account_store_map( const string& account, const string& catalog, bool is_to_remove, const flat_map>& key_values, bool broadcast )const { - return my->account_store_map(account, catalog, remove, key_values, broadcast); + return my->account_store_map(account, catalog, is_to_remove, key_values, broadcast); } vector wallet_api::get_account_storage( const string& account, const string& catalog )const From d8090cae4cf418b57ab94053edffc2e8e29b6b82 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 27 Oct 2022 20:46:10 +0000 Subject: [PATCH 313/338] Rename a variable --- libraries/chain/balance_evaluator.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/chain/balance_evaluator.cpp b/libraries/chain/balance_evaluator.cpp index 2d0c4810bf..c91a2b892a 100644 --- a/libraries/chain/balance_evaluator.cpp +++ b/libraries/chain/balance_evaluator.cpp @@ -31,15 +31,15 @@ void_result balance_claim_evaluator::do_evaluate(const balance_claim_operation& database& d = db(); balance = &op.balance_to_claim(d); - bool is_balance_owner_match = ( + bool is_balance_owner = ( op.balance_owner_key == balance->owner || pts_address(op.balance_owner_key, false) == balance->owner || // version = 56 (default) pts_address(op.balance_owner_key, true) == balance->owner ); // version = 56 (default) - is_balance_owner_match = ( - is_balance_owner_match || + is_balance_owner = ( + is_balance_owner || pts_address(op.balance_owner_key, false, 0) == balance->owner || pts_address(op.balance_owner_key, true, 0) == balance->owner ); - GRAPHENE_ASSERT( is_balance_owner_match, + GRAPHENE_ASSERT( is_balance_owner, balance_claim_owner_mismatch, "Balance owner key was specified as '${op}' but balance's actual owner is '${bal}'", ("op", op.balance_owner_key) From 31932639e0d316c6b58f2078c533720eee1f94e6 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 27 Oct 2022 21:18:00 +0000 Subject: [PATCH 314/338] Extend transaction_handle_type to 32 bits and handle overflow --- libraries/wallet/include/graphene/wallet/wallet_structs.hpp | 2 +- libraries/wallet/wallet_builder.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp index b4a69306b2..c4007127f7 100644 --- a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp @@ -34,7 +34,7 @@ using std::vector; namespace graphene { namespace wallet { -using transaction_handle_type = uint16_t; +using transaction_handle_type = uint32_t; struct plain_keys { diff --git a/libraries/wallet/wallet_builder.cpp b/libraries/wallet/wallet_builder.cpp index 864daf28f8..b7005fbbaf 100644 --- a/libraries/wallet/wallet_builder.cpp +++ b/libraries/wallet/wallet_builder.cpp @@ -27,9 +27,9 @@ namespace graphene { namespace wallet { namespace detail { transaction_handle_type wallet_api_impl::begin_builder_transaction() { - int trx_handle = _builder_transactions.empty()? 0 + transaction_handle_type trx_handle = _builder_transactions.empty() ? 0 : (--_builder_transactions.end())->first + 1; - _builder_transactions[trx_handle]; + _builder_transactions[trx_handle] = {}; // Reset if exists already return trx_handle; } From 830555d1412e2a19c744b243ad3a46b4d1cdc4d9 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 28 Oct 2022 13:43:43 +0000 Subject: [PATCH 315/338] Declare database_api::get_full_accounts() as const --- libraries/app/database_api.cpp | 2 +- libraries/app/include/graphene/app/database_api.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index dfb1bac079..01b695a8a1 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -514,7 +514,7 @@ vector> database_api_impl::get_accounts( const vector> database_api::get_full_accounts( const vector& names_or_ids, - const optional& subscribe ) + const optional& subscribe )const { return my->get_full_accounts( names_or_ids, subscribe ); } diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index b101dc84f0..479b0a0549 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -308,7 +308,7 @@ class database_api */ map> get_full_accounts( const vector& names_or_ids, - const optional& subscribe = optional() ); + const optional& subscribe = optional() )const; /** * @brief Returns vector of voting power sorted by reverse vp_active From ad7d0b901de928ac10cd4b6f6389ade41fe5b667 Mon Sep 17 00:00:00 2001 From: abitmore Date: Thu, 27 Oct 2022 23:33:55 +0000 Subject: [PATCH 316/338] Remove .travis.yml and related files --- .travis.yml | 41 --------- programs/build_helpers/build_and_test | 15 ---- programs/build_helpers/build_for_cache | 15 ---- programs/build_helpers/buildstep | 55 ------------ programs/build_helpers/scan_with_sonar_step_1 | 19 ----- programs/build_helpers/scan_with_sonar_step_2 | 24 ------ programs/build_helpers/scan_with_sonar_step_3 | 26 ------ programs/build_helpers/set_sonar_branch | 85 ------------------- 8 files changed, 280 deletions(-) delete mode 100644 .travis.yml delete mode 100755 programs/build_helpers/build_and_test delete mode 100755 programs/build_helpers/build_for_cache delete mode 100755 programs/build_helpers/buildstep delete mode 100755 programs/build_helpers/scan_with_sonar_step_1 delete mode 100755 programs/build_helpers/scan_with_sonar_step_2 delete mode 100755 programs/build_helpers/scan_with_sonar_step_3 delete mode 100755 programs/build_helpers/set_sonar_branch diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 58e58a277d..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,41 +0,0 @@ -language: c++ - -cache: - ccache: true - directories: - - sonar_cache - -git: - depth: 1 - -dist: xenial - -sudo: true - -install: - - sudo apt-get install --allow-unauthenticated libboost-thread-dev libboost-iostreams-dev libboost-date-time-dev libboost-system-dev libboost-filesystem-dev libboost-program-options-dev libboost-chrono-dev libboost-test-dev libboost-context-dev libboost-regex-dev libboost-coroutine-dev cmake parallel - -addons: - sonarcloud: - organization: "flwyiq7go36p6lipr64tbesy5jayad3q" - token: - secure: "Ik4xQhs9imtsFIC1SMAPmdLId9lVadY/4PEgo5tM4M5cQRvyt4xeuMMV+CRIT6tGEEqF71ea74qVJTxT7qinWZ3kmHliFjbqDxk1FbjCpK6NGQDyTdfWMVJFIlk7WefvtGAwFBkf6pSTs553bKNNM0HbBYQGKe08waLwv7R+lOmVjTTKIRF/cCVw+C5QQZdXFnUMTg+mRuUqGk4WvNNPmcBfkX0ekHPrXwAD5ATVS1q0iloA0nzHq8CPNmPE+IyXdPw0EBp+fl3cL9MgrlwRbELxrnCKFy+ObdjhDj7z3FDIxDe+03gVlgd+6Fame+9EJCeeeNLF4G4qNR1sLEvHRqVz12/NYnRU9hQL0c/jJtiUquOJA5+HqrhhB9XUZjS1xbHV3aIU5PR0bdDP6MKatvIVwRhwxwhaDXh7VSimis8eL+LvXT7EO+rGjco0c17RuzZpFCsKmXCej4Q8iDBMdOIWwe2WuWi8zb6MFvnLyK2EcM53hAn2yMwU+nprbpHwzU5oJTFZLD+J78zCSGk7uu7vsF+EEnheMwfqafP9MpMEXGXaXZiq7QKy3KvxQTg+1ozPIu+fgxvY0xdyrjJHOSJlrvXN7osjD4IDTs6D5cLAZ04WGIKsulZDr7ZN5n3gmA9h4cfhJsIEia0uQzLmWnfF6RksxWElK1i1+xmse7E=" - -env: - global: - - CCACHE_COMPRESS=exists_means_true - - CCACHE_MAXSIZE=1Gi - - CCACHE_SLOPPINESS=include_file_ctime,include_file_mtime,time_macros - -jobs: - include: - - stage: build for cache - script: ./programs/build_helpers/build_for_cache - - stage: build and test - script: ./programs/build_helpers/build_and_test - - stage: scan with sonar, step 1 - script: ./programs/build_helpers/scan_with_sonar_step_1 - - stage: scan with sonar, step 2 - script: ./programs/build_helpers/scan_with_sonar_step_2 - - stage: scan with sonar, step 3 - script: ./programs/build_helpers/scan_with_sonar_step_3 diff --git a/programs/build_helpers/build_and_test b/programs/build_helpers/build_and_test deleted file mode 100755 index 70e7056e3a..0000000000 --- a/programs/build_helpers/build_and_test +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -programs/build_helpers/buildstep -s 3500 -ccache -s -programs/build_helpers/buildstep Prepare 1 "sed -i '/tests/d' libraries/fc/CMakeLists.txt" -programs/build_helpers/buildstep cmake 5 "cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS=--coverage -DCMAKE_CXX_FLAGS=--coverage -DBoost_USE_STATIC_LIBS=OFF -DCMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON ." -programs/build_helpers/buildstep make.some.targets 2400 "make -j 2 witness_node cli_wallet chain_test cli_test js_operation_serializer get_dev_key network_mapper" -programs/build_helpers/buildstep make.others 600 "make" -set -o pipefail -programs/build_helpers/buildstep run.chain_test 300 "libraries/fc/tests/run-parallel-tests.sh tests/chain_test -l message" -programs/build_helpers/buildstep run.cli_test 120 "libraries/fc/tests/run-parallel-tests.sh tests/cli_test -l message" -programs/build_helpers/buildstep end 0 -ccache -s - diff --git a/programs/build_helpers/build_for_cache b/programs/build_helpers/build_for_cache deleted file mode 100755 index 3586c746b6..0000000000 --- a/programs/build_helpers/build_for_cache +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -programs/build_helpers/buildstep -s 3500 -ccache -s -programs/build_helpers/buildstep Prepare 1 "sed -i '/tests/d' libraries/fc/CMakeLists.txt" -programs/build_helpers/buildstep cmake 5 "cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS=--coverage -DCMAKE_CXX_FLAGS=--coverage -DBoost_USE_STATIC_LIBS=OFF -DCMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON ." -programs/build_helpers/buildstep make.fc 230 "make -j 2 fc" -programs/build_helpers/buildstep make.custom_auths 700 "make -j 1 graphene_protocol_custom_auths" -programs/build_helpers/buildstep make.protocol 250 "make -j 2 graphene_protocol" -programs/build_helpers/buildstep make.chain 450 "make -j 2 graphene_chain" -programs/build_helpers/buildstep make.node 600 "make -j 2 witness_node" -programs/build_helpers/buildstep make.cli 500 "make -j 2 cli_wallet" -programs/build_helpers/buildstep end 0 -ccache -s diff --git a/programs/build_helpers/buildstep b/programs/build_helpers/buildstep deleted file mode 100755 index 44d5458a10..0000000000 --- a/programs/build_helpers/buildstep +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh - -usage () { - echo Usage: - echo " ${0##*/} [-h | --help] Display this help message" - echo " ${0##*/} -s | --start Initialize timing" - echo " ${0##*/} " - echo "The last form executes build step consisting of shell " - echo "if imated time is still available, otherwise it fails fast." - echo " and must be specified in seconds." - exit $1 -} - -if [ "$#" = 0 -o "$1" = "--help" -o "$1" = "-h" ]; then - usage `test "$#" = 1; echo $?` -fi - -NOW="$(date +%s)" - -if [ "$1" = "--start" -o "$1" = "-s" ]; then - if [ "$#" != 2 ]; then - usage 1 - fi - echo "$2 $NOW" >_start_time - echo "Starting at $(date --date=@$NOW)" - exit 0 -fi - -NAME="$1" -EST="$2" -CMD="$3" - -if [ ! -r _start_time ]; then - echo "Need to initialize with '$0 -s ' first!" 1>&2 - exit 1 -fi - -read max begin prev_name prev_begin <_start_time - -if [ "$prev_name" != "" ]; then - echo "'$prev_name' took $(($NOW - $prev_begin))s" -fi - -if [ "$CMD" != "" ]; then - if [ $(($NOW - $begin + $EST)) -lt $max ]; then - echo "Running '$NAME' at $NOW..." - echo "sh -c '$CMD'" - echo "$max $begin $NAME $NOW" >_start_time - exec bash -c "$CMD" - fi - echo "$(($begin + $max - $NOW))s left - insufficient to run '$NAME', exiting!" 1>&2 - exit 1 -fi - -exit 0 diff --git a/programs/build_helpers/scan_with_sonar_step_1 b/programs/build_helpers/scan_with_sonar_step_1 deleted file mode 100755 index 8dbbcf9856..0000000000 --- a/programs/build_helpers/scan_with_sonar_step_1 +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -e - -programs/build_helpers/buildstep -s 3500 -ccache -s -programs/build_helpers/buildstep Prepare 1 "sed -i '/tests/d' libraries/fc/CMakeLists.txt" -programs/build_helpers/buildstep cmake 5 "cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS=--coverage -DCMAKE_CXX_FLAGS=--coverage -DBoost_USE_STATIC_LIBS=OFF -DCMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON ." -programs/build_helpers/buildstep make.programs 1800 "programs/build_helpers/make_with_sonar bw-output -j 2 witness_node cli_wallet js_operation_serializer get_dev_key network_mapper app_test chain_test cli_test" -set -o pipefail -programs/build_helpers/buildstep prepare.sonar 20 "find libraries/[acdenptuw]*/CMakeFiles/*.dir programs/[cdgjsw]*/CMakeFiles/*.dir -type d -print | while read d; do gcov -o \"\$d\" \"\${d/CMakeFiles*.dir//}\"/*.cpp; done >/dev/null; programs/build_helpers/set_sonar_branch sonar-project.properties" -du -hs sonar_cache -# The first pass, skip some files. This will remove the skipped files from the cache, but is an acceptable trade-off -programs/build_helpers/buildstep prepare.sonar.part1 1 "cp sonar-project.properties sonar-project.properties.bak; sed -i '/sonar\.exclusions=/d;s/#sonar\.exclusions.part1/sonar.exclusions/' sonar-project.properties" -programs/build_helpers/buildstep run.sonar.part1 1500 "which sonar-scanner && sonar-scanner" -programs/build_helpers/buildstep post.sonar.part1 1 "cp sonar-project.properties.bak sonar-project.properties" -du -hs sonar_cache -programs/build_helpers/buildstep end 0 -ccache -s - diff --git a/programs/build_helpers/scan_with_sonar_step_2 b/programs/build_helpers/scan_with_sonar_step_2 deleted file mode 100755 index 745b2bc12f..0000000000 --- a/programs/build_helpers/scan_with_sonar_step_2 +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -set -e - -programs/build_helpers/buildstep -s 3500 -ccache -s -programs/build_helpers/buildstep Prepare 1 "sed -i '/tests/d' libraries/fc/CMakeLists.txt" -programs/build_helpers/buildstep cmake 5 "cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS=--coverage -DCMAKE_CXX_FLAGS=--coverage -DBoost_USE_STATIC_LIBS=OFF -DCMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON ." -programs/build_helpers/buildstep make.programs 1800 "programs/build_helpers/make_with_sonar bw-output -j 2 witness_node cli_wallet js_operation_serializer get_dev_key network_mapper app_test chain_test cli_test" -set -o pipefail -programs/build_helpers/buildstep prepare.sonar 20 "find libraries/[acdenptuw]*/CMakeFiles/*.dir programs/[cdgjsw]*/CMakeFiles/*.dir -type d -print | while read d; do gcov -o \"\$d\" \"\${d/CMakeFiles*.dir//}\"/*.cpp; done >/dev/null; programs/build_helpers/set_sonar_branch sonar-project.properties" -du -hs sonar_cache -# The first pass, skip some files. This will remove the skipped files from the cache, but is an acceptable trade-off -programs/build_helpers/buildstep prepare.sonar.part1 1 "cp sonar-project.properties sonar-project.properties.bak; sed -i '/sonar\.exclusions=/d;s/#sonar\.exclusions.part1/sonar.exclusions/' sonar-project.properties" -programs/build_helpers/buildstep run.sonar.part1 1500 "which sonar-scanner && sonar-scanner" -programs/build_helpers/buildstep post.sonar.part1 1 "cp sonar-project.properties.bak sonar-project.properties" -du -hs sonar_cache -# The second pass, scan more files -programs/build_helpers/buildstep prepare.sonar.part2 1 "cp sonar-project.properties sonar-project.properties.bak; sed -i '/sonar\.exclusions=/d;s/#sonar\.exclusions.part2/sonar.exclusions/' sonar-project.properties" -programs/build_helpers/buildstep run.sonar.part2 1200 "which sonar-scanner && sonar-scanner" -programs/build_helpers/buildstep post.sonar.part2 1 "cp sonar-project.properties.bak sonar-project.properties" -du -hs sonar_cache -programs/build_helpers/buildstep end 0 -ccache -s - diff --git a/programs/build_helpers/scan_with_sonar_step_3 b/programs/build_helpers/scan_with_sonar_step_3 deleted file mode 100755 index 6ab41f23d7..0000000000 --- a/programs/build_helpers/scan_with_sonar_step_3 +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -set -e - -programs/build_helpers/buildstep -s 3500 -ccache -s -programs/build_helpers/buildstep Prepare 1 "sed -i '/tests/d' libraries/fc/CMakeLists.txt" -programs/build_helpers/buildstep cmake 5 "cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS=--coverage -DCMAKE_CXX_FLAGS=--coverage -DBoost_USE_STATIC_LIBS=OFF -DCMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON ." -programs/build_helpers/buildstep make.programs 1800 "programs/build_helpers/make_with_sonar bw-output -j 2 witness_node cli_wallet js_operation_serializer get_dev_key network_mapper app_test chain_test cli_test" -set -o pipefail -programs/build_helpers/buildstep run.chain_test 300 "libraries/fc/tests/run-parallel-tests.sh tests/chain_test -l message" -programs/build_helpers/buildstep run.cli_test 120 "libraries/fc/tests/run-parallel-tests.sh tests/cli_test -l message" -programs/build_helpers/buildstep run.app_test 120 "tests/app_test -l message" -programs/build_helpers/buildstep prepare.sonar 20 "find libraries/[acdenptuw]*/CMakeFiles/*.dir programs/[cdgjsw]*/CMakeFiles/*.dir -type d -print | while read d; do gcov -o \"\$d\" \"\${d/CMakeFiles*.dir//}\"/*.cpp; done >/dev/null; programs/build_helpers/set_sonar_branch sonar-project.properties" -du -hs sonar_cache -# The first pass is skipped here. -# The second pass, scan some files. -programs/build_helpers/buildstep prepare.sonar.part2 1 "cp sonar-project.properties sonar-project.properties.bak; sed -i '/sonar\.exclusions=/d;s/#sonar\.exclusions.part2/sonar.exclusions/' sonar-project.properties" -programs/build_helpers/buildstep run.sonar.part2 1200 "which sonar-scanner && sonar-scanner" -programs/build_helpers/buildstep post.sonar.part2 1 "cp sonar-project.properties.bak sonar-project.properties" -du -hs sonar_cache -# The third pass, scan all files -programs/build_helpers/buildstep run.sonar.full 1200 "which sonar-scanner && sonar-scanner" -du -hs sonar_cache -programs/build_helpers/buildstep end 0 -ccache -s - diff --git a/programs/build_helpers/set_sonar_branch b/programs/build_helpers/set_sonar_branch deleted file mode 100755 index fe2f9a0982..0000000000 --- a/programs/build_helpers/set_sonar_branch +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/sh - -# Relevant variables set by travis: -# TRAVIS_BRANCH: -# * for push builds, or builds not triggered by a pull request, this is the -# name of the branch. -# * for builds triggered by a pull request this is the name of the branch -# targeted by the pull request. -# * for builds triggered by a tag, this is the same as the name of the tag -# (see TRAVIS_TAG). -# TRAVIS_PULL_REQUEST: The pull request number if the current job is a pull -# request, “false” if it’s not a pull request. -# TRAVIS_TAG: If the current build is for a git tag, this variable is set to -# the tag’s name. - -if [ "$#" != 1 ]; then - echo "Usage: $0 " 1>&2 - exit 1 -fi - -clear_branch () { - sed -i '/sonar\.branch/d' "$1" -} - -ORIGINAL_TARGET="$( grep 'sonar\.branch\.target' "$1" | sed 's=^.*[:=] *==' )" - -BRANCH= -TARGET="$ORIGINAL_TARGET" - -if [ -n "$TRAVIS_PULL_REQUEST" -a "$TRAVIS_PULL_REQUEST" != "false" ]; then - # PRs work per default, remove sonar.branch.* since they only work with sonar.pullrequest.* - echo "Detected PR '$TRAVIS_PULL_REQUEST'" - TARGET= -elif [ -n "$TRAVIS_TAG" ]; then - # Tag build is either master or testnet - echo "Detected tag '$TRAVIS_TAG'" - BRANCH="$TRAVIS_BRANCH" - case "$TRAVIS_TAG" in - *test*) TARGET=testnet; ;; - *) TARGET=master; ;; - esac -else - BRANCH="$TRAVIS_BRANCH" - case "$TRAVIS_BRANCH" in - master|develop|testnet|hardfork) - # Long-lived branches stand for themselves, no target - echo "Detected long-lived branch '$TRAVIS_BRANCH'" - TARGET= - ;; - *test*release*) - # Testnet release branch will be merged into testnet - echo "Detected testnet release branch '$TRAVIS_BRANCH'" - TARGET=testnet - ;; - *release*) - # Release branch will be merged into default (master) - echo "Detected release branch '$TRAVIS_BRANCH'" - TARGET=master - ;; - *) - # All other branches should have sonar.branch.target in their - # sonar.properties, leave it unchanged - echo "Detected normal branch '$TRAVIS_BRANCH'" - esac -fi - -echo "Branch '$BRANCH', target '$TARGET'" - -git fetch --no-tags --unshallow -git fetch --no-tags origin +refs/heads/$TRAVIS_BRANCH:refs/remotes/origin/$TRAVIS_BRANCH -if [ -n "$TARGET" ]; then - git fetch --no-tags origin +refs/heads/$TARGET:refs/remotes/origin/$TARGET -fi - -if [ "x$TARGET" != "x$ORIGINAL_TARGET" ]; then - clear_branch "$1" - if [ -n "$TARGET" ]; then - echo "sonar.branch.target=$TARGET" >>"$1" - fi -fi -if [ -n "$BRANCH" ]; then - echo "sonar.branch.name=$BRANCH" >>"$1" -fi - -exit 0 From 9299b52557612c87bb6ffd3a5d2c7aedcc74ada7 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 28 Oct 2022 14:12:01 +0000 Subject: [PATCH 317/338] Update dependency versions used in Github Actions - actions/checkout@v2 -> v3 - actions/cache@v2 -> v3 - docker/setup-buildx-action@v1 -> v2 - docker/build-push-action@v2 -> v3 - docker/login-action@v1 -> v2 - docker://elasticsearch:7.17.5 -> 7.17.7 - CURL_VERSION: 7.67.0 -> 7.86.0 - OPENSSL_VERSION: 1.1.1d -> 1.1.1q --- .github/workflows/build-and-test.mac.yml | 4 ++-- .github/workflows/build-and-test.ubuntu-debug.yml | 6 +++--- .github/workflows/build-and-test.ubuntu-release.yml | 6 +++--- .github/workflows/build-and-test.win.yml | 12 ++++++------ .github/workflows/build-docker.yml | 10 +++++----- .github/workflows/sonar-scan.yml | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.github/workflows/build-and-test.mac.yml b/.github/workflows/build-and-test.mac.yml index 8817ab1e9f..edc02f659c 100644 --- a/.github/workflows/build-and-test.mac.yml +++ b/.github/workflows/build-and-test.mac.yml @@ -17,7 +17,7 @@ jobs: brew install ccache brew install parallel brew install bitshares/boost/boost@1.69 - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: recursive - name: Configure @@ -31,7 +31,7 @@ jobs: -D OPENSSL_ROOT_DIR=/usr/local/opt/openssl@1.1 \ .. - name: Load Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ccache key: ccache-${{ matrix.os }}-${{ github.ref }}-${{ github.sha }} diff --git a/.github/workflows/build-and-test.ubuntu-debug.yml b/.github/workflows/build-and-test.ubuntu-debug.yml index 5b45178185..9c626766ca 100644 --- a/.github/workflows/build-and-test.ubuntu-debug.yml +++ b/.github/workflows/build-and-test.ubuntu-debug.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.17.5 + image: docker://elasticsearch:7.17.7 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Install dependencies @@ -43,7 +43,7 @@ jobs: df -h sudo du -hs /mnt/* sudo ls -alr /mnt/ - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: recursive - name: Configure @@ -77,7 +77,7 @@ jobs: .. popd - name: Load Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ccache key: ccache-debug-${{ matrix.os }}-${{ github.ref }}-${{ github.sha }} diff --git a/.github/workflows/build-and-test.ubuntu-release.yml b/.github/workflows/build-and-test.ubuntu-release.yml index 42a9886de0..a2bd68ebe9 100644 --- a/.github/workflows/build-and-test.ubuntu-release.yml +++ b/.github/workflows/build-and-test.ubuntu-release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.17.5 + image: docker://elasticsearch:7.17.7 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Install dependencies @@ -40,7 +40,7 @@ jobs: sudo apt-get auto-remove -y sudo apt-get clean -y df -h - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: recursive - name: Configure @@ -57,7 +57,7 @@ jobs: .. popd - name: Load Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ccache key: ccache-release-${{ matrix.os }}-${{ github.ref }}-${{ github.sha }} diff --git a/.github/workflows/build-and-test.win.yml b/.github/workflows/build-and-test.win.yml index 641cd7c497..d566486e3b 100644 --- a/.github/workflows/build-and-test.win.yml +++ b/.github/workflows/build-and-test.win.yml @@ -6,8 +6,8 @@ env: # The following are for windows cross-build only: BOOST_VERSION: 1_69_0 BOOST_DOTTED_VERSION: 1.69.0 - CURL_VERSION: 7.67.0 - OPENSSL_VERSION: 1.1.1d + CURL_VERSION: 7.86.0 + OPENSSL_VERSION: 1.1.1q ZLIB_VERSION: 1.2.13 jobs: prepare-mingw64-libs: @@ -19,7 +19,7 @@ jobs: echo "OS_VERSION=`lsb_release -sr`" >> $GITHUB_ENV - name: Load Cache id: cache-libs - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: libs key: mingw64-libs-${{ env.OS_VERSION }}-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} @@ -113,13 +113,13 @@ jobs: sudo apt-get auto-remove -y sudo apt-get clean -y df -h - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: recursive - run: | echo "OS_VERSION=`lsb_release -sr`" >> $GITHUB_ENV - name: Load external libraries - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: libs key: mingw64-libs-${{ env.OS_VERSION }}-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} @@ -143,7 +143,7 @@ jobs: -D GRAPHENE_DISABLE_UNITY_BUILD=ON \ .. - name: Load Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ccache key: ccache-mingw64-${{ env.OS_VERSION }}-${{ github.ref }}-${{ github.sha }} diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 7af0459012..a2e30c67a0 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -23,23 +23,23 @@ jobs: if: env.DOCKER_PUSH_TAG != '' run: echo "${DOCKER_PUSH_TAG}" - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 - name: Build only - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v3 with: context: . load: true - name: Login to DockerHub if: env.DOCKER_PUSH_TAG != '' - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Push to DockerHub if: env.DOCKER_PUSH_TAG != '' - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v3 with: context: . push: true diff --git a/.github/workflows/sonar-scan.yml b/.github/workflows/sonar-scan.yml index 1d840963c3..3fb866c7be 100644 --- a/.github/workflows/sonar-scan.yml +++ b/.github/workflows/sonar-scan.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} services: elasticsearch: - image: docker://elasticsearch:7.17.5 + image: docker://elasticsearch:7.17.7 options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 steps: - name: Download and install latest SonarScanner CLI tool @@ -61,7 +61,7 @@ jobs: df -h sudo du -hs /mnt/* sudo ls -alr /mnt/ - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 submodules: recursive @@ -103,7 +103,7 @@ jobs: - run: | echo "OS_VERSION=`lsb_release -sr`" >> $GITHUB_ENV - name: Load Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: | ccache From fdfeead7ba57eb3e3050f58ef5faecdabf1fc3a5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 28 Oct 2022 14:19:50 +0000 Subject: [PATCH 318/338] Run sonar-scan workflow with Ubuntu 20.04 instead of ubuntu-latest --- .github/workflows/sonar-scan.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sonar-scan.yml b/.github/workflows/sonar-scan.yml index 3fb866c7be..eca0b916ba 100644 --- a/.github/workflows/sonar-scan.yml +++ b/.github/workflows/sonar-scan.yml @@ -8,7 +8,7 @@ jobs: name: Scan with SonarScanner strategy: matrix: - os: [ ubuntu-latest ] + os: [ ubuntu-20.04 ] runs-on: ${{ matrix.os }} services: elasticsearch: From db763c458a40c7578fe8994e687683e9cdcc02d5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 28 Oct 2022 14:25:06 +0000 Subject: [PATCH 319/338] Use GITHUB_REF_NAME variable provided by Github instead of rlespinasse/github-slug-action. And simplify master branch detection. --- .github/workflows/build-docker.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index a2e30c67a0..3c9dad5d94 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -4,15 +4,13 @@ jobs: docker: runs-on: ubuntu-latest steps: - - name: Inject slug/short environment variables - uses: rlespinasse/github-slug-action@v3.x - name: Decide whether to push to DockerHub, and set tag if: | github.event_name == 'push' && ( startsWith( github.ref, 'refs/tags/' ) || - contains( fromJSON('["master","develop","testnet","hardfork"]'), env.GITHUB_REF_NAME ) ) + contains( fromJSON('["master","develop","testnet","hardfork"]'), github.ref_name ) ) run: | - if [[ "${GITHUB_REF_NAME}" == "master" ]] ; then + if [ "${GITHUB_REF_NAME}" == "master" ] ; then DOCKER_PUSH_TAG=latest else DOCKER_PUSH_TAG=${GITHUB_REF_NAME} From e3568f94a9d9d4b359bfde29f1357cf569abc486 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 28 Oct 2022 17:37:40 +0000 Subject: [PATCH 320/338] Update base image to focal-1.2.0, and upgrade libs ... at build time, and do not complain if /etc/bitshares exists already. --- Dockerfile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index fcce4cbef0..52d4cc0405 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,11 @@ -FROM phusion/baseimage:focal-1.0.0 +FROM phusion/baseimage:focal-1.2.0 MAINTAINER The bitshares decentralized organisation ENV LANG=en_US.UTF-8 RUN \ - apt-get update -y && \ + apt-get update && \ + apt-get upgrade -y -o Dpkg::Options::="--force-confold" && \ + apt-get update && \ apt-get install -y \ g++ \ autoconf \ @@ -53,7 +55,7 @@ RUN \ install -s programs/witness_node/witness_node programs/genesis_util/get_dev_key programs/cli_wallet/cli_wallet /usr/local/bin && \ # # Obtain version - mkdir /etc/bitshares && \ + mkdir -p /etc/bitshares && \ git rev-parse --short HEAD > /etc/bitshares/version && \ cd / && \ rm -rf /bitshares-core From e6c07443d287edc851802da2189cd98ed8eeb9e7 Mon Sep 17 00:00:00 2001 From: abitmore Date: Fri, 28 Oct 2022 20:52:11 +0000 Subject: [PATCH 321/338] Support building with curl v7.77 or higher in mac by linking with the Core Foundation framework and the System Configuration framework. --- CMakeLists.txt | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 75028de33c..6df34a6e01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,6 +59,13 @@ macro(FIND_CURL) find_package(CURL REQUIRED) endif (NOT WIN32 AND NOT APPLE AND CURL_STATICLIB) + if( APPLE AND NOT "${CURL_VERSION_STRING}" VERSION_LESS "7.77.0" ) + list( APPEND CURL_LIBRARIES "-framework CoreFoundation" ) + list( APPEND CURL_LIBRARIES "-framework SystemConfiguration" ) + endif() + + message(STATUS "CURL libraries: ${CURL_LIBRARIES}") + if( WIN32 ) if ( MSVC ) list( APPEND CURL_LIBRARIES Wldap32 ) @@ -261,7 +268,7 @@ else( WIN32 ) # Apple AND Linux if( APPLE ) # Apple Specific Options Here - message( STATUS "Configuring BitShares on OS X" ) + message( STATUS "Configuring BitShares on macOS" ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -stdlib=libc++ -Wall -fvisibility-inlines-hidden -fvisibility=hidden" ) else( APPLE ) if ( "${CMAKE_SYSTEM_NAME}" STREQUAL "OpenBSD" ) From 9309426ed2cc406d8ff6d37bc84c536b7e7b5431 Mon Sep 17 00:00:00 2001 From: Abit Date: Sun, 30 Oct 2022 21:58:15 +0100 Subject: [PATCH 322/338] Update Docker Hub repository path to a variable --- .github/workflows/build-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 3c9dad5d94..fd75ec386e 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -41,4 +41,4 @@ jobs: with: context: . push: true - tags: bitshares/bitshares-core:${{ env.DOCKER_PUSH_TAG }} + tags: ${{ secrets.DOCKERHUB_REPO_PATH }}:${{ env.DOCKER_PUSH_TAG }} From 6abef1822ec7579aa4867aaf3ac9e6242779fd6f Mon Sep 17 00:00:00 2001 From: Abit Date: Mon, 31 Oct 2022 01:03:11 +0100 Subject: [PATCH 323/338] Push the major.minor version tag to Docker Hub too --- .github/workflows/build-docker.yml | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index fd75ec386e..a1a5d134ef 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -17,6 +17,15 @@ jobs: fi echo "DOCKER_PUSH_TAG=${DOCKER_PUSH_TAG}" echo "DOCKER_PUSH_TAG=${DOCKER_PUSH_TAG}" >> $GITHUB_ENV + VERSION_MAJOR=`echo "${DOCKER_PUSH_TAG}" | cut -f1 -d'.'` + if [ "${VERSION_MAJOR}" != "${DOCKER_PUSH_TAG}" ]; then + VERSION_MINOR=`echo "${DOCKER_PUSH_TAG}" | cut -f2 -d'.'` + DOCKER_PUSH_TAG_SHORT=${VERSION_MAJOR}.${VERSION_MINOR} + if [ "${DOCKER_PUSH_TAG_SHORT}" != "${DOCKER_PUSH_TAG}" ]; then + echo "DOCKER_PUSH_TAG_SHORT=${DOCKER_PUSH_TAG_SHORT}" + echo "DOCKER_PUSH_TAG_SHORT=${DOCKER_PUSH_TAG_SHORT}" >> $GITHUB_ENV + fi + fi - name: Test tag if: env.DOCKER_PUSH_TAG != '' run: echo "${DOCKER_PUSH_TAG}" @@ -35,10 +44,19 @@ jobs: with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Push to DockerHub - if: env.DOCKER_PUSH_TAG != '' + - name: Push to DockerHub (for branches) + if: env.DOCKER_PUSH_TAG != '' && env.DOCKER_PUSH_TAG_SHORT == '' uses: docker/build-push-action@v3 with: context: . push: true tags: ${{ secrets.DOCKERHUB_REPO_PATH }}:${{ env.DOCKER_PUSH_TAG }} + - name: Push to DockerHub (for tags) + if: env.DOCKER_PUSH_TAG != '' && env.DOCKER_PUSH_TAG_SHORT != '' + uses: docker/build-push-action@v3 + with: + context: . + push: true + tags: | + ${{ secrets.DOCKERHUB_REPO_PATH }}:${{ env.DOCKER_PUSH_TAG }} + ${{ secrets.DOCKERHUB_REPO_PATH }}:${{ env.DOCKER_PUSH_TAG_SHORT }} From 673035b92ad3e368ed9fa3dabdf3ae5823398192 Mon Sep 17 00:00:00 2001 From: abitmore Date: Mon, 31 Oct 2022 15:02:26 +0000 Subject: [PATCH 324/338] Update Docker user and group - Set UID to 10000 and GID to 10001 statically - Run with the bitshares user --- Dockerfile | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 52d4cc0405..c6f1bdf7f7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -52,7 +52,10 @@ RUN \ -DGRAPHENE_DISABLE_UNITY_BUILD=ON \ . && \ make witness_node cli_wallet get_dev_key && \ - install -s programs/witness_node/witness_node programs/genesis_util/get_dev_key programs/cli_wallet/cli_wallet /usr/local/bin && \ + install -s programs/witness_node/witness_node \ + programs/genesis_util/get_dev_key \ + programs/cli_wallet/cli_wallet \ + /usr/local/bin && \ # # Obtain version mkdir -p /etc/bitshares && \ @@ -62,7 +65,8 @@ RUN \ # Home directory $HOME WORKDIR / -RUN useradd -s /bin/bash -m -d /var/lib/bitshares bitshares +RUN groupadd -g 10001 bitshares +RUN useradd -u 10000 -g bitshares -s /bin/bash -m -d /var/lib/bitshares --no-log-init bitshares ENV HOME /var/lib/bitshares RUN chown bitshares:bitshares -R /var/lib/bitshares @@ -83,5 +87,7 @@ RUN chmod a+x /usr/local/bin/bitsharesentry.sh # Make Docker send SIGINT instead of SIGTERM to the daemon STOPSIGNAL SIGINT +USER bitshares:bitshares + # default execute entry CMD ["/usr/local/bin/bitsharesentry.sh"] From cd5a1d83fe85e47f7dd25f07272364e18e3774c8 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 1 Nov 2022 01:04:44 +0000 Subject: [PATCH 325/338] Drop root privileges in bitsharesentry.sh instead This is a temporary solution compatible with older images. --- Dockerfile | 2 +- docker/bitsharesentry.sh | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index c6f1bdf7f7..8e657a244b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -87,7 +87,7 @@ RUN chmod a+x /usr/local/bin/bitsharesentry.sh # Make Docker send SIGINT instead of SIGTERM to the daemon STOPSIGNAL SIGINT -USER bitshares:bitshares +#USER bitshares:bitshares # default execute entry CMD ["/usr/local/bin/bitsharesentry.sh"] diff --git a/docker/bitsharesentry.sh b/docker/bitsharesentry.sh index 58a2cd5150..262d1c29a3 100644 --- a/docker/bitsharesentry.sh +++ b/docker/bitsharesentry.sh @@ -84,10 +84,14 @@ fi ln -f -s /etc/bitshares/config.ini /var/lib/bitshares ln -f -s /etc/bitshares/logging.ini /var/lib/bitshares +chown -R bitshares:bitshares /var/lib/bitshares + # Plugins need to be provided in a space-separated list, which # makes it necessary to write it like this if [[ ! -z "$BITSHARESD_PLUGINS" ]]; then - exec "$BITSHARESD" --data-dir "${HOME}" ${ARGS} ${BITSHARESD_ARGS} --plugins "${BITSHARESD_PLUGINS}" + exec /usr/bin/setpriv --reuid=bitshares --regid=bitshares --clear-groups \ + "$BITSHARESD" --data-dir "${HOME}" ${ARGS} ${BITSHARESD_ARGS} --plugins "${BITSHARESD_PLUGINS}" else - exec "$BITSHARESD" --data-dir "${HOME}" ${ARGS} ${BITSHARESD_ARGS} + exec /usr/bin/setpriv --reuid=bitshares --regid=bitshares --clear-groups \ + "$BITSHARESD" --data-dir "${HOME}" ${ARGS} ${BITSHARESD_ARGS} fi From 233dbb77a1646194f631eaefdb20769170a2ef79 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 1 Nov 2022 19:07:09 +0000 Subject: [PATCH 326/338] Use Docker Multistage Builds --- Dockerfile | 44 +++++++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8e657a244b..ac3f1c1c4d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,8 @@ -FROM phusion/baseimage:focal-1.2.0 -MAINTAINER The bitshares decentralized organisation - +# The image for building +FROM phusion/baseimage:focal-1.2.0 as build ENV LANG=en_US.UTF-8 + +# Install dependencies RUN \ apt-get update && \ apt-get upgrade -y -o Dpkg::Options::="--force-confold" && \ @@ -63,13 +64,39 @@ RUN \ cd / && \ rm -rf /bitshares-core -# Home directory $HOME +# The final image +FROM phusion/baseimage:focal-1.2.0 +LABEL maintainer="The bitshares decentralized organisation" +ENV LANG=en_US.UTF-8 + +# Install required libraries +RUN \ + apt-get update && \ + apt-get upgrade -y -o Dpkg::Options::="--force-confold" && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + libcurl4 \ + ca-certificates \ + && \ + mkdir -p /etc/bitshares && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +COPY --from=build /usr/local/bin/* /usr/local/bin/ +COPY --from=build /etc/bitshares/version /etc/bitshares/ + WORKDIR / RUN groupadd -g 10001 bitshares RUN useradd -u 10000 -g bitshares -s /bin/bash -m -d /var/lib/bitshares --no-log-init bitshares ENV HOME /var/lib/bitshares RUN chown bitshares:bitshares -R /var/lib/bitshares +# default exec/config files +ADD docker/default_config.ini /etc/bitshares/config.ini +ADD docker/default_logging.ini /etc/bitshares/logging.ini +ADD docker/bitsharesentry.sh /usr/local/bin/bitsharesentry.sh +RUN chmod a+x /usr/local/bin/bitsharesentry.sh + # Volume VOLUME ["/var/lib/bitshares", "/etc/bitshares"] @@ -78,16 +105,11 @@ EXPOSE 8090 # p2p service: EXPOSE 1776 -# default exec/config files -ADD docker/default_config.ini /etc/bitshares/config.ini -ADD docker/default_logging.ini /etc/bitshares/logging.ini -ADD docker/bitsharesentry.sh /usr/local/bin/bitsharesentry.sh -RUN chmod a+x /usr/local/bin/bitsharesentry.sh - # Make Docker send SIGINT instead of SIGTERM to the daemon STOPSIGNAL SIGINT +# Temporarily commented out due to permission issues cuased by older versions, to be restored in a future version #USER bitshares:bitshares # default execute entry -CMD ["/usr/local/bin/bitsharesentry.sh"] +ENTRYPOINT ["/usr/local/bin/bitsharesentry.sh"] From 501cec475762ca358458829c7321902f83d0db67 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 1 Nov 2022 19:13:15 +0000 Subject: [PATCH 327/338] Do not install fish --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ac3f1c1c4d..2420735774 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,7 +30,6 @@ RUN \ libtool \ doxygen \ ca-certificates \ - fish \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* From 65befca9ed58153cf305d030ceced5ddaf88506d Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 1 Nov 2022 23:41:53 +0000 Subject: [PATCH 328/338] Upgrade libraries in the entry script --- docker/bitsharesentry.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/bitsharesentry.sh b/docker/bitsharesentry.sh index 262d1c29a3..4f8bd95664 100644 --- a/docker/bitsharesentry.sh +++ b/docker/bitsharesentry.sh @@ -86,6 +86,9 @@ ln -f -s /etc/bitshares/logging.ini /var/lib/bitshares chown -R bitshares:bitshares /var/lib/bitshares +# Get the latest security updates +apt-get update && apt-get upgrade -y -o Dpkg::Options::="--force-confold" + # Plugins need to be provided in a space-separated list, which # makes it necessary to write it like this if [[ ! -z "$BITSHARESD_PLUGINS" ]]; then From cdc03c7040fff1ae28fac0f961f85348cb83bda2 Mon Sep 17 00:00:00 2001 From: Abit Date: Mon, 7 Nov 2022 01:01:40 +0100 Subject: [PATCH 329/338] Fix a typo in a comment --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2420735774..b948626e93 100644 --- a/Dockerfile +++ b/Dockerfile @@ -107,7 +107,7 @@ EXPOSE 1776 # Make Docker send SIGINT instead of SIGTERM to the daemon STOPSIGNAL SIGINT -# Temporarily commented out due to permission issues cuased by older versions, to be restored in a future version +# Temporarily commented out due to permission issues caused by older versions, to be restored in a future version #USER bitshares:bitshares # default execute entry From 2715bcd3b1fb9dd7b6df78760c141c274757ab56 Mon Sep 17 00:00:00 2001 From: Abit Date: Mon, 7 Nov 2022 16:35:13 +0100 Subject: [PATCH 330/338] Fix a typo and a symbol in README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9d30fff277..54547b0586 100644 --- a/README.md +++ b/README.md @@ -106,8 +106,8 @@ The blockchain data will be stored under the directory `witness_node_data_dir`. **Stop Node Software:** -For stopping the node run cleanly; you will need to access the node run terminal then press on `Ctrl+C` then wait for the run to stop, please note that it may take usually few minutes to exit the run. -It's recommended to use linux command [screen](https://help.ubuntu.com/community/Screen) to inisiate the node run so you can go back to the node run screen to stop it. +For stopping the node run cleanly, you will need to access the node run terminal then press on `Ctrl+C` then wait for the run to stop, please note that it may take usually few minutes to exit the run. +It's recommended to use linux command [screen](https://help.ubuntu.com/community/Screen) to initiate the node run so you can go back to the node run screen to stop it. **IMPORTANT:** By default the node will start in reduced memory mode by using some of the commands detailed in [Memory reduction for nodes](https://github.com/bitshares/bitshares-core/wiki/Memory-reduction-for-nodes). From ed73b55ec81ba8114e2eabf468e2b50c9c804f0d Mon Sep 17 00:00:00 2001 From: Abit Date: Mon, 7 Nov 2022 16:52:36 +0100 Subject: [PATCH 331/338] Fix a link, update wording and parameters used --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 54547b0586..f2b22093cf 100644 --- a/README.md +++ b/README.md @@ -256,12 +256,12 @@ The `cli_wallet` program can also be configured to serve **all of its commands** Start `cli_wallet` with RPC connection enabled: - $ ./programs/cli_wallet/cli_wallet --rpc-endpoint=127.0.0.1:8091 + $ ./programs/cli_wallet/cli_wallet --rpc-http-endpoint=127.0.0.1:8093 Access the wallet API using an HTTP client: - $ curl --data '{"jsonrpc": "2.0", "method": "info", "params": [], "id": 1}' http://127.0.0.1:8091/ - $ curl --data '{"jsonrpc": "2.0", "method": "get_account", "params": ["1.2.0"], "id": 1}' http://127.0.0.1:8091/ + $ curl --data '{"jsonrpc": "2.0", "method": "info", "params": [], "id": 1}' http://127.0.0.1:8093/ + $ curl --data '{"jsonrpc": "2.0", "method": "get_account", "params": ["1.2.0"], "id": 1}' http://127.0.0.1:8093/ Note: The syntax to access wallet API is a bit different than accessing node API. @@ -343,8 +343,8 @@ FAQ - Is there a way to allow external program to drive `cli_wallet` via websocket, JSONRPC, or HTTP? Yes. External programs may connect to the command-line wallet and make its calls over a websockets API. To do this, run the wallet in - server mode, i.e. `cli_wallet -s "127.0.0.1:9999"` and then have the external program connect to it over the specified port - (in this example, port 9999). Please check the ["Using the API"](#using-the-api) section for more info. + server mode, i.e. `cli_wallet -H "127.0.0.1:9999"` and then have the external program connect to it over the specified port + (in this example, port 9999). Please check the ["Using Built-In APIs"](#using-built-in-apis) section for more info. - Is there a way to access methods which require login over HTTP? @@ -370,7 +370,7 @@ FAQ - The answer to the previous question was really confusing. Can you make it clearer? All account ID's are of the form `1.2.x`. If you were the 9735th account to be registered, - your account's ID will be `1.2.9735`. Account `0` is special (it's the "committee account," + your account's ID will be `1.2.9735`. Account `0` is special (it's the "committee account", which is controlled by the committee members and has a few abilities and restrictions other accounts do not). From 98565fc9e602a11053405777e1c22a773c6bd618 Mon Sep 17 00:00:00 2001 From: Abit Date: Mon, 7 Nov 2022 16:59:23 +0100 Subject: [PATCH 332/338] Update wording or text formatting --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f2b22093cf..d9339890be 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,7 @@ Stay on `bitshares-core/build` directory before you run the below `cli_wallet` c ./programs/cli_wallet/cli_wallet -**IMPORTANT:** The cli_wallet or API interfaces to the witness node wouldn't be fully functional unless the witness node is fully synchronized with the blockchain. The cli_wallet command `info` will show result `head_block_age` which will tell you how far you are from the live current block of the blockchain. +**IMPORTANT:** The `cli_wallet` or API interfaces to the node wouldn't be fully functional unless the node is fully synchronized with the blockchain. The `cli_wallet` command `info` will show result `head_block_age` which will tell you how far you are from the live current block of the blockchain. To check your current block: @@ -274,7 +274,7 @@ Accessing restrictable node API sets ------------------------------------ You can restrict node API sets to particular users by specifying an `api-access` file in `config.ini` -or by using the `--api-access /full/path/to/api-access.json` command line option on node startup. Here is an example `api-access` file which allows +or by using the `--api-access /full/path/to/api-access.json` command-line option on node startup. Here is an example `api-access` file which allows user `bytemaster` with password `supersecret` to access four different API sets, while allowing any other user to access the three public API sets necessary to use the node: @@ -354,13 +354,13 @@ FAQ - What is the meaning of `a.b.c` numbers? - The first number specifies the *space*. Space 1 is for protocol objects, 2 is for implementation objects. + The first number specifies the *space*. Space `1` is for protocol objects, `2` is for implementation objects. Protocol space objects can appear on the wire, for example in the binary form of transactions. Implementation space objects cannot appear on the wire and solely exist for implementation purposes, such as optimization or internal bookkeeping. The second number specifies the *type*. The type of the object determines what fields it has. For a - complete list of type ID's, see `GRAPHENE_DEFINE_IDS(protocol, protocol_ids ...)` in + complete list of type IDs, see `GRAPHENE_DEFINE_IDS(protocol, protocol_ids ...)` in [protocol/types.hpp](https://github.com/bitshares/bitshares-core/blob/master/libraries/protocol/include/graphene/protocol/types.hpp) and `GRAPHENE_DEFINE_IDS(chain, implementation_ids ...)` in [chain/types.hpp](https://github.com/bitshares/bitshares-core/blob/master/libraries/chain/include/graphene/chain/types.hpp). @@ -369,12 +369,12 @@ FAQ - The answer to the previous question was really confusing. Can you make it clearer? - All account ID's are of the form `1.2.x`. If you were the 9735th account to be registered, + All account IDs are of the form `1.2.x`. If you were the 9735th account to be registered, your account's ID will be `1.2.9735`. Account `0` is special (it's the "committee account", which is controlled by the committee members and has a few abilities and restrictions other accounts do not). - All asset ID's are of the form `1.3.x`. If you were the 29th asset to be registered, + All asset IDs are of the form `1.3.x`. If you were the 29th asset to be registered, your asset's ID will be `1.3.29`. Asset `0` is special (it's BTS, which is considered the "core asset"). The first and second number together identify the kind of thing you're talking about (`1.2` for accounts, From a7beefa4062548622c2c9f9c3703a0b6f3bbe7ff Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 8 Nov 2022 01:30:31 +0000 Subject: [PATCH 333/338] Update sonar.branch.target in release branch --- sonar-project.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sonar-project.properties b/sonar-project.properties index 9fe1699f22..b66dedea45 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -26,4 +26,4 @@ sonar.cfamily.cache.path=sonar_cache # Decide which tree the current build belongs to in SonarCloud. # Managed by the `set_sonar_branch*` script(s) when building with CI. -sonar.branch.target=develop +sonar.branch.target=master From 534c5bae67f697b6e3a6639b2b0c9610abc0ffba Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 8 Nov 2022 01:31:13 +0000 Subject: [PATCH 334/338] Remove unused code --- sonar-project.properties | 3 --- 1 file changed, 3 deletions(-) diff --git a/sonar-project.properties b/sonar-project.properties index b66dedea45..9cd8574e9f 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -13,9 +13,6 @@ sonar.links.issue=https://github.com/bitshares/bitshares-core/issues sonar.links.scm=https://github.com/bitshares/bitshares-core/tree/master sonar.tests=tests -# Used by the `build_and_test` script for the first pass when building with Travis CI, to skip some files -#sonar.exclusions.part1=programs/build_helper/**/*,libraries/fc/**/*,libraries/egenesis/egenesis_full.cpp,libraries/chain/**/*.cpp,libraries/protocol/**/*.cpp -#sonar.exclusions.part2=programs/build_helper/**/*,libraries/fc/**/*,libraries/egenesis/egenesis_full.cpp,libraries/chain/**/*.cpp sonar.exclusions=programs/build_helper/**/*,libraries/fc/**/*,libraries/egenesis/egenesis_full.cpp sonar.sources=libraries,programs sonar.cfamily.build-wrapper-output=bw-output From ecdcd1b893efe8384731b73246f296868785f2b2 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 8 Nov 2022 17:59:28 +0000 Subject: [PATCH 335/338] Fix code smells --- libraries/app/include/graphene/app/api.hpp | 2 +- .../app/include/graphene/app/api_objects.hpp | 3 +- libraries/chain/asset_evaluator.cpp | 2 +- libraries/chain/db_debug.cpp | 11 ++-- .../include/graphene/chain/account_object.hpp | 18 +++-- .../include/graphene/chain/asset_object.hpp | 2 +- .../graphene/chain/confidential_object.hpp | 3 +- .../chain/include/graphene/chain/database.hpp | 2 +- .../graphene/chain/global_property_object.hpp | 2 +- .../include/graphene/chain/htlc_object.hpp | 9 ++- .../graphene/chain/proposal_object.hpp | 5 +- .../chain/special_authority_object.hpp | 3 +- .../include/graphene/chain/ticket_object.hpp | 4 +- .../chain/withdraw_permission_object.hpp | 36 +++++----- libraries/net/include/graphene/net/config.hpp | 6 +- .../net/include/graphene/net/message.hpp | 10 +-- libraries/net/include/graphene/net/node.hpp | 9 +-- .../include/graphene/net/peer_connection.hpp | 66 ++++++++++++------- .../include/graphene/net/peer_database.hpp | 27 ++++---- .../include/graphene/protocol/asset_ops.hpp | 42 +++++++----- .../include/graphene/protocol/base.hpp | 8 +-- .../include/graphene/protocol/market.hpp | 23 ++++--- .../include/graphene/protocol/pts_address.hpp | 8 +-- .../include/graphene/protocol/vote.hpp | 3 +- .../graphene/wallet/wallet_structs.hpp | 2 +- 25 files changed, 176 insertions(+), 130 deletions(-) diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index ad92678ab2..42f1f576dd 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -864,7 +864,7 @@ FC_API(graphene::app::crypto_api, ) FC_API(graphene::app::asset_api, (get_asset_holders) - (get_asset_holders_count) + (get_asset_holders_count) (get_all_asset_holders) ) FC_API(graphene::app::orders_api, diff --git a/libraries/app/include/graphene/app/api_objects.hpp b/libraries/app/include/graphene/app/api_objects.hpp index 7dccae802c..758bed6a04 100644 --- a/libraries/app/include/graphene/app/api_objects.hpp +++ b/libraries/app/include/graphene/app/api_objects.hpp @@ -218,7 +218,8 @@ FC_REFLECT( graphene::app::order, (price)(quote)(base)(id)(owner_id)(owner_name) FC_REFLECT( graphene::app::order_book, (base)(quote)(bids)(asks) ) FC_REFLECT( graphene::app::market_ticker, (time)(base)(quote)(latest)(lowest_ask)(lowest_ask_base_size)(lowest_ask_quote_size) - (highest_bid)(highest_bid_base_size)(highest_bid_quote_size)(percent_change)(base_volume)(quote_volume)(mto_id) ) + (highest_bid)(highest_bid_base_size)(highest_bid_quote_size)(percent_change)(base_volume)(quote_volume) + (mto_id) ) FC_REFLECT( graphene::app::market_volume, (time)(base)(quote)(base_volume)(quote_volume) ) FC_REFLECT( graphene::app::market_trade, (sequence)(date)(price)(amount)(value)(type) (side1_account_id)(side2_account_id) ) diff --git a/libraries/chain/asset_evaluator.cpp b/libraries/chain/asset_evaluator.cpp index 17ab836be9..5e23ece291 100644 --- a/libraries/chain/asset_evaluator.cpp +++ b/libraries/chain/asset_evaluator.cpp @@ -1206,7 +1206,7 @@ static extendable_operation_result pay_settle_from_gs_fund( database& d, asset settled_amount = ( op.amount.amount == mia_dyn.current_supply ) ? asset( bitasset.settlement_fund, bitasset.options.short_backing_asset ) - : op.amount * bitasset.settlement_price; // round down, favors global settlement fund + : ( op.amount * bitasset.settlement_price ); // round down, favors global settlement fund if( op.amount.amount != mia_dyn.current_supply ) { // should be strictly < except for PM with zero outcome since in that case bitasset.settlement_fund is zero diff --git a/libraries/chain/db_debug.cpp b/libraries/chain/db_debug.cpp index 6f4a6872d1..9700cfba3d 100644 --- a/libraries/chain/db_debug.cpp +++ b/libraries/chain/db_debug.cpp @@ -116,12 +116,11 @@ void database::debug_dump() void debug_apply_update( database& db, const fc::variant_object& vo ) { - static const uint8_t - db_action_nil = 0, - db_action_create = 1, - db_action_write = 2, - db_action_update = 3, - db_action_delete = 4; + constexpr uint8_t db_action_nil = 0; + constexpr uint8_t db_action_create = 1; + constexpr uint8_t db_action_write = 2; + constexpr uint8_t db_action_update = 3; + constexpr uint8_t db_action_delete = 4; // "_action" : "create" object must not exist, unspecified fields take defaults // "_action" : "write" object may exist, is replaced entirely, unspecified fields take defaults diff --git a/libraries/chain/include/graphene/chain/account_object.hpp b/libraries/chain/include/graphene/chain/account_object.hpp index b25d6f48d7..a314a1ac70 100644 --- a/libraries/chain/include/graphene/chain/account_object.hpp +++ b/libraries/chain/include/graphene/chain/account_object.hpp @@ -99,9 +99,10 @@ namespace graphene { namespace chain { uint64_t vp_committee = 0; ///< the final voting power for the committees. uint64_t vp_witness = 0; ///< the final voting power for the witnesses. uint64_t vp_worker = 0; ///< the final voting power for the workers. - /// timestamp of the last count of votes. - /// if there is no statistics, the date is less than `_db.get_dynamic_global_properties().last_vote_tally_time`. - time_point_sec vote_tally_time; + /// Timestamp of the last count of votes. + /// If there is no statistics, + /// the date is less than `_db.get_dynamic_global_properties().last_vote_tally_time`. + time_point_sec vote_tally_time; ///@} /// Whether this account owns some CORE asset and is voting @@ -160,7 +161,8 @@ namespace graphene { namespace chain { account_id_type owner; asset_id_type asset_type; share_type balance; - bool maintenance_flag = false; ///< Whether need to process this balance object in maintenance interval + /// Whether need to process this balance object in maintenance interval + bool maintenance_flag = false; asset get_balance()const { return asset(balance, asset_type); } void adjust_balance(const asset& delta); @@ -364,8 +366,10 @@ namespace graphene { namespace chain { virtual void about_to_modify( const object& before ) override; virtual void object_modified( const object& after ) override; - const map< asset_id_type, const account_balance_object* >& get_account_balances( const account_id_type& acct )const; - const account_balance_object* get_account_balance( const account_id_type& acct, const asset_id_type& asset )const; + const map< asset_id_type, const account_balance_object* >& get_account_balances( + const account_id_type& acct )const; + const account_balance_object* get_account_balance( const account_id_type& acct, + const asset_id_type& asset )const; private: static const uint8_t bits; @@ -428,7 +432,7 @@ namespace graphene { namespace chain { struct by_maintenance_seq; struct by_voting_power_active; - + /** * @ingroup object_index */ diff --git a/libraries/chain/include/graphene/chain/asset_object.hpp b/libraries/chain/include/graphene/chain/asset_object.hpp index f93a3e4993..66938a2390 100644 --- a/libraries/chain/include/graphene/chain/asset_object.hpp +++ b/libraries/chain/include/graphene/chain/asset_object.hpp @@ -176,7 +176,7 @@ namespace graphene { namespace chain { { return db.get(dynamic_asset_data_id); } /** - * The total amount of an asset that is reserved for future issuance. + * The total amount of an asset that is reserved for future issuance. */ template share_type reserved( const DB& db )const diff --git a/libraries/chain/include/graphene/chain/confidential_object.hpp b/libraries/chain/include/graphene/chain/confidential_object.hpp index bc9d849dfc..d742974ad3 100644 --- a/libraries/chain/include/graphene/chain/confidential_object.hpp +++ b/libraries/chain/include/graphene/chain/confidential_object.hpp @@ -56,7 +56,8 @@ typedef multi_index_container< blinded_balance_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, - ordered_unique< tag, member > + ordered_unique< tag, + member > > > blinded_balance_object_multi_index_type; typedef generic_index blinded_balance_index; diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index 2e0b201ffc..511a025615 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -282,7 +282,7 @@ namespace graphene { namespace chain { * to newly created VBID and return it. * * Otherwise, credit amount to ovbid. - * + * * @return ID of newly created VBO, but only if VBO was created. */ optional< vesting_balance_id_type > deposit_lazy_vesting( diff --git a/libraries/chain/include/graphene/chain/global_property_object.hpp b/libraries/chain/include/graphene/chain/global_property_object.hpp index 9d7079e624..22e5958eef 100644 --- a/libraries/chain/include/graphene/chain/global_property_object.hpp +++ b/libraries/chain/include/graphene/chain/global_property_object.hpp @@ -68,7 +68,7 @@ namespace graphene { namespace chain { time_point_sec time; witness_id_type current_witness; time_point_sec next_maintenance_time; - time_point_sec last_vote_tally_time; + time_point_sec last_vote_tally_time; time_point_sec last_budget_time; share_type witness_budget; share_type total_pob; diff --git a/libraries/chain/include/graphene/chain/htlc_object.hpp b/libraries/chain/include/graphene/chain/htlc_object.hpp index 47934f6418..ab86da29ac 100644 --- a/libraries/chain/include/graphene/chain/htlc_object.hpp +++ b/libraries/chain/include/graphene/chain/htlc_object.hpp @@ -33,7 +33,7 @@ namespace graphene { namespace chain { /** * @brief database object to store HTLCs - * + * * This object is stored in the database while an HTLC is active. The HTLC will * become inactive at expiration or when unlocked via the preimage. */ @@ -47,7 +47,7 @@ namespace graphene { namespace chain { asset_id_type asset_id; } transfer; struct condition_info { - struct hash_lock_info { + struct hash_lock_info { htlc_hash preimage_hash; uint16_t preimage_size; } hash_lock; @@ -91,13 +91,13 @@ namespace graphene { namespace chain { indexed_by< ordered_unique< tag< by_id >, member< object, object_id_type, &object::id > >, - ordered_unique< tag< by_expiration >, + ordered_unique< tag< by_expiration >, composite_key< htlc_object, htlc_object::timelock_extractor, member< object, object_id_type, &object::id > > >, ordered_unique< tag< by_from_id >, - composite_key< htlc_object, + composite_key< htlc_object, htlc_object::from_extractor, member< object, object_id_type, &object::id > > >, @@ -106,7 +106,6 @@ namespace graphene { namespace chain { htlc_object::to_extractor, member< object, object_id_type, &object::id > > > > - > htlc_object_index_type; typedef generic_index< htlc_object, htlc_object_index_type > htlc_index; diff --git a/libraries/chain/include/graphene/chain/proposal_object.hpp b/libraries/chain/include/graphene/chain/proposal_object.hpp index dde9cedf74..9f228e1910 100644 --- a/libraries/chain/include/graphene/chain/proposal_object.hpp +++ b/libraries/chain/include/graphene/chain/proposal_object.hpp @@ -33,7 +33,7 @@ namespace graphene { namespace chain { class database; /** - * @brief tracks the approval of a partially approved transaction + * @brief tracks the approval of a partially approved transaction * @ingroup object * @ingroup protocol */ @@ -56,7 +56,7 @@ class proposal_object : public abstract_object, member< object, object_id_type, &object::id > >, - //ordered_non_unique< tag< by_expiration >, member< proposal_object, time_point_sec, &proposal_object::expiration_time > > ordered_unique, composite_key, diff --git a/libraries/chain/include/graphene/chain/special_authority_object.hpp b/libraries/chain/include/graphene/chain/special_authority_object.hpp index 2b58f42528..eb5fbc0e71 100644 --- a/libraries/chain/include/graphene/chain/special_authority_object.hpp +++ b/libraries/chain/include/graphene/chain/special_authority_object.hpp @@ -52,7 +52,8 @@ typedef multi_index_container< special_authority_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, - ordered_unique< tag, member< special_authority_object, account_id_type, &special_authority_object::account> > + ordered_unique< tag, + member< special_authority_object, account_id_type, &special_authority_object::account> > > > special_authority_multi_index_type; diff --git a/libraries/chain/include/graphene/chain/ticket_object.hpp b/libraries/chain/include/graphene/chain/ticket_object.hpp index d42858c417..94920dfc28 100644 --- a/libraries/chain/include/graphene/chain/ticket_object.hpp +++ b/libraries/chain/include/graphene/chain/ticket_object.hpp @@ -84,8 +84,8 @@ class ticket_object : public abstract_object(i) ]; } static uint8_t value_multiplier( ticket_type i, ticket_version version ) { - static constexpr uint32_t _value_multiplier_v1[] = { 1, 2, 4, 8, 8, 0 }; - static constexpr uint32_t _value_multiplier_v2[] = { 0, 2, 4, 8, 8, 0 }; + static constexpr uint8_t _value_multiplier_v1[] = { 1, 2, 4, 8, 8, 0 }; + static constexpr uint8_t _value_multiplier_v2[] = { 0, 2, 4, 8, 8, 0 }; return ( version == ticket_v1 ? _value_multiplier_v1[ static_cast(i) ] : _value_multiplier_v2[ static_cast(i) ] ); } diff --git a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp index 3ba50a2932..b8b6a995e2 100644 --- a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp +++ b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp @@ -54,31 +54,37 @@ namespace graphene { namespace chain { asset withdrawal_limit; /// The duration of a withdrawal period in seconds uint32_t withdrawal_period_sec = 0; - /*** - * The beginning of the next withdrawal period - * WARNING: Due to caching, this value does not always represent the start of the next or current period (because it is only updated after a withdrawal operation such as claim). For the latest current period, use current_period(). - */ + /** + * The beginning of the next withdrawal period. + * WARNING: Due to caching, this value does not always represent the start of the next or current period, + * because it is only updated after a withdrawal operation such as claim. + * For the latest current period, use current_period(). + */ time_point_sec period_start_time; /// The time at which this withdraw permission expires time_point_sec expiration; - /*** - * Tracks the total amount - * WARNING: Due to caching, this value does not always represent the total amount claimed during the current period; it may represent what was claimed during the last claimed period (because it is only updated after a withdrawal operation such as claim). For the latest current period, use current_period(). - */ + /** + * Tracks the total amount + * WARNING: Due to caching, this value does not always represent the total amount claimed during the + * current period, it may represent what was claimed during the last claimed period, because it is only + * updated after a withdrawal operation such as claim. + * For the latest current period, use current_period(). + */ share_type claimed_this_period; - /*** - * Determine how much is still available to be claimed during the period that contains a time of interest. This object and function is mainly intended to be used with the "current" time as a parameter. The current time can be obtained from the time of the current head of the blockchain. - */ + /*** + * Determine how much is still available to be claimed during the period that contains a time of interest. + * This object and function is mainly intended to be used with the "current" time as a parameter. + * The current time can be obtained from the time of the current head of the blockchain. + */ asset available_this_period( fc::time_point_sec current_time )const { if( current_time >= period_start_time + withdrawal_period_sec ) return withdrawal_limit; - return asset( - ( withdrawal_limit.amount > claimed_this_period ) - ? withdrawal_limit.amount - claimed_this_period - : 0, withdrawal_limit.asset_id ); + return asset( ( withdrawal_limit.amount > claimed_this_period ) ? + ( withdrawal_limit.amount - claimed_this_period ) : 0, + withdrawal_limit.asset_id ); } }; diff --git a/libraries/net/include/graphene/net/config.hpp b/libraries/net/include/graphene/net/config.hpp index b7d65078bb..b6899c1ab8 100644 --- a/libraries/net/include/graphene/net/config.hpp +++ b/libraries/net/include/graphene/net/config.hpp @@ -67,7 +67,7 @@ * our peers and save a copy in a cache were we will find it if * a peer requests it. We expire out old items out of the cache * after this number of blocks go by. - * + * * Recently lowered from 30 to match the default expiration time * the web wallet imposes on transactions. */ @@ -92,12 +92,12 @@ * is being flooded -- typically transactions will be fetched as soon * as we find out about them, so only one item will be requested * at a time. - * + * * No tests have been done to find the optimal value for this * parameter, so consider increasing or decreasing it if performance * during flooding is lacking. */ -#define GRAPHENE_NET_MAX_ITEMS_PER_PEER_DURING_NORMAL_OPERATION 1 +#define GRAPHENE_NET_MAX_ITEMS_PER_PEER_DURING_NORMAL_OPERATION 1 /** * Instead of fetching all item IDs from a peer, then fetching all blocks diff --git a/libraries/net/include/graphene/net/message.hpp b/libraries/net/include/graphene/net/message.hpp index 1fc49e6308..747dbdb84a 100644 --- a/libraries/net/include/graphene/net/message.hpp +++ b/libraries/net/include/graphene/net/message.hpp @@ -52,7 +52,7 @@ namespace graphene { namespace net { using message_hash_type = fc::ripemd160; /** - * Abstracts the process of packing/unpacking a message for a + * Abstracts the process of packing/unpacking a message for a * particular channel. */ struct message : public message_header @@ -71,7 +71,7 @@ namespace graphene { namespace net { * Assumes that T::type specifies the message type */ template - message( const T& m ) + message( const T& m ) { msg_type = T::type; data = fc::raw::pack(m); @@ -88,7 +88,7 @@ namespace graphene { namespace net { * opposite process from the constructor. */ template - T as()const + T as()const { try { FC_ASSERT( msg_type.value() == T::type ); @@ -105,8 +105,8 @@ namespace graphene { namespace net { fc::raw::unpack( ds, tmp ); } return tmp; - } FC_RETHROW_EXCEPTIONS( warn, - "error unpacking network message as a '${type}' ${x} !=? ${msg_type}", + } FC_RETHROW_EXCEPTIONS( warn, + "error unpacking network message as a '${type}' ${x} !=? ${msg_type}", ("type", fc::get_typename::name() ) ("x", T::type) ("msg_type", msg_type.value()) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index d3b5dae7c0..715386be38 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -74,7 +74,7 @@ namespace graphene { namespace net { * @throws exception if error validating the item, otherwise the item is * safe to broadcast on. */ - virtual bool handle_block( const graphene::net::block_message& blk_msg, bool sync_mode, + virtual bool handle_block( const graphene::net::block_message& blk_msg, bool sync_mode, std::vector& contained_transaction_msg_ids ) = 0; /** @@ -130,13 +130,14 @@ namespace graphene { namespace net { * &c. * the last item in the list will be the hash of the most recent block on our preferred chain */ - virtual std::vector get_blockchain_synopsis(const item_hash_t& reference_point, - uint32_t number_of_blocks_after_reference_point) = 0; + virtual std::vector get_blockchain_synopsis(const item_hash_t& reference_point, + uint32_t number_of_blocks_after_reference_point) = 0; /** * Call this after the call to handle_message succeeds. * - * @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call + * @param item_type the type of the item we're synchronizing, will be the same as item passed to the + * sync_from() call * @param item_count the number of items known to the node that haven't been sent to handle_item() yet. * After `item_count` more calls to handle_item(), the node will be in sync */ diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 8f87485880..139156b2e8 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -62,16 +62,18 @@ namespace graphene { namespace net enum class our_connection_state { disconnected, - just_connected, // if in this state, we have sent a hello_message - connection_accepted, // remote side has sent us a connection_accepted, we're operating normally with them - connection_rejected // remote side has sent us a connection_rejected, we may be exchanging address with them or may just be waiting for them to close + just_connected, ///< If in this state, we have sent a hello_message + connection_accepted, ///< Remote side has sent us a connection_accepted, we're operating normally with them + /// Remote side has sent us a connection_rejected, we may be exchanging address with them or may just + /// be waiting for them to close + connection_rejected }; enum class their_connection_state { disconnected, - just_connected, // we have not yet received a hello_message - connection_accepted, // we have sent them a connection_accepted - connection_rejected // we have sent them a connection_rejected + just_connected, ///< We have not yet received a hello_message + connection_accepted, ///< We have sent them a connection_accepted + connection_rejected ///< We have sent them a connection_rejected }; enum class connection_negotiation_status { @@ -205,17 +207,26 @@ namespace graphene { namespace net using item_to_time_map_type = std::unordered_map; - /// blockchain synchronization state data + /// Blockchain synchronization state data /// @{ - boost::container::deque ids_of_items_to_get; /// id of items in the blockchain that this peer has told us about - std::set ids_of_items_being_processed; /// list of all items this peer has offered use that we've already handed to the client but the client hasn't finished processing - uint32_t number_of_unfetched_item_ids = 0; /// number of items in the blockchain that follow ids_of_items_to_get but the peer hasn't yet told us their ids + /// ID of items in the blockchain that this peer has told us about + boost::container::deque ids_of_items_to_get; + /// List of all items this peer has offered use that we've already handed to the client but the client + /// hasn't finished processing + std::set ids_of_items_being_processed; + /// Number of items in the blockchain that follow ids_of_items_to_get but the peer hasn't yet told us their IDs + uint32_t number_of_unfetched_item_ids = 0; bool peer_needs_sync_items_from_us = false; bool we_need_sync_items_from_peer = false; - fc::optional, fc::time_point> > item_ids_requested_from_peer; /// we check this to detect a timed-out request and in busy() - fc::time_point last_sync_item_received_time; /// the time we received the last sync item or the time we sent the last batch of sync item requests to this peer - std::set sync_items_requested_from_peer; /// ids of blocks we've requested from this peer during sync. fetch from another peer if this peer disconnects - item_hash_t last_block_delegate_has_seen; /// the hash of the last block this peer has told us about that the peer knows + /// We check this to detect a timed-out request and in busy() + fc::optional, fc::time_point> > item_ids_requested_from_peer; + /// The time we received the last sync item or the time we sent the last batch of sync item requests + /// to this peer + fc::time_point last_sync_item_received_time; + /// IDs of blocks we've requested from this peer during sync. Fetch from another peer if this peer disconnects + std::set sync_items_requested_from_peer; + /// The hash of the last block this peer has told us about that the peer knows + item_hash_t last_block_delegate_has_seen; fc::time_point_sec last_block_time_delegate_has_seen; bool inhibit_fetching_sync_blocks = false; /// @} @@ -232,15 +243,24 @@ namespace graphene { namespace net {} }; struct timestamp_index{}; - typedef boost::multi_index_container, - std::hash >, - boost::multi_index::ordered_non_unique, - boost::multi_index::member > > > timestamped_items_set_type; + using timestamped_items_set_type = boost::multi_index_container< timestamped_item_id, + boost::multi_index::indexed_by< + boost::multi_index::hashed_unique< + boost::multi_index::member, + std::hash + >, + boost::multi_index::ordered_non_unique< + boost::multi_index::tag, + boost::multi_index::member + > + > + >; timestamped_items_set_type inventory_peer_advertised_to_us; timestamped_items_set_type inventory_advertised_to_peer; - item_to_time_map_type items_requested_from_peer; /// items we've requested from this peer during normal operation. fetch from another peer if this peer disconnects + /// Items we've requested from this peer during normal operation. + /// Fetch from another peer if this peer disconnects + item_to_time_map_type items_requested_from_peer; /// @} // if they're flooding us with transactions, we set this to avoid fetching for a few seconds to let the @@ -259,13 +279,15 @@ namespace graphene { namespace net fc::thread* _thread = nullptr; unsigned _send_message_queue_tasks_running = 0; // temporary debugging #endif - bool _currently_handling_message = false; // true while we're in the middle of handling a message from the remote system + /// true while we're in the middle of handling a message from the remote system + bool _currently_handling_message = false; protected: peer_connection(peer_connection_delegate* delegate); private: void destroy(); public: - static peer_connection_ptr make_shared(peer_connection_delegate* delegate); // use this instead of the constructor + /// Use this instead of the constructor + static peer_connection_ptr make_shared(peer_connection_delegate* delegate); virtual ~peer_connection(); fc::tcp_socket& get_socket(); diff --git a/libraries/net/include/graphene/net/peer_database.hpp b/libraries/net/include/graphene/net/peer_database.hpp index 8ba79ac501..e770891230 100644 --- a/libraries/net/include/graphene/net/peer_database.hpp +++ b/libraries/net/include/graphene/net/peer_database.hpp @@ -50,23 +50,20 @@ namespace graphene { namespace net { fc::time_point_sec last_seen_time; fc::enum_type last_connection_disposition; fc::time_point_sec last_connection_attempt_time; - uint32_t number_of_successful_connection_attempts; - uint32_t number_of_failed_connection_attempts; + uint32_t number_of_successful_connection_attempts = 0; + uint32_t number_of_failed_connection_attempts = 0; fc::optional last_error; - potential_peer_record() : - number_of_successful_connection_attempts(0), - number_of_failed_connection_attempts(0){} + potential_peer_record() = default; - potential_peer_record(fc::ip::endpoint endpoint, - fc::time_point_sec last_seen_time = fc::time_point_sec(), - potential_peer_last_connection_disposition last_connection_disposition = never_attempted_to_connect) : - endpoint(endpoint), + explicit potential_peer_record( + const fc::ip::endpoint& endpoint, + const fc::time_point_sec& last_seen_time = fc::time_point_sec(), + potential_peer_last_connection_disposition last_connection_disposition = never_attempted_to_connect ) + : endpoint(endpoint), last_seen_time(last_seen_time), - last_connection_disposition(last_connection_disposition), - number_of_successful_connection_attempts(0), - number_of_failed_connection_attempts(0) - {} + last_connection_disposition(last_connection_disposition) + {} }; namespace detail @@ -74,7 +71,9 @@ namespace graphene { namespace net { class peer_database_impl; class peer_database_iterator_impl; - class peer_database_iterator : public boost::iterator_facade + class peer_database_iterator : public boost::iterator_facade< peer_database_iterator, + const potential_peer_record, + boost::forward_traversal_tag> { public: peer_database_iterator(); diff --git a/libraries/protocol/include/graphene/protocol/asset_ops.hpp b/libraries/protocol/include/graphene/protocol/asset_ops.hpp index f2515f4fed..43eec5564d 100644 --- a/libraries/protocol/include/graphene/protocol/asset_ops.hpp +++ b/libraries/protocol/include/graphene/protocol/asset_ops.hpp @@ -26,7 +26,7 @@ #include #include -namespace graphene { namespace protocol { +namespace graphene { namespace protocol { struct additional_asset_options { @@ -191,7 +191,7 @@ namespace graphene { namespace protocol { */ struct asset_create_operation : public base_operation { - struct fee_parameters_type { + struct fee_parameters_type { uint64_t symbol3 = 500000 * GRAPHENE_BLOCKCHAIN_PRECISION; uint64_t symbol4 = 300000 * GRAPHENE_BLOCKCHAIN_PRECISION; uint64_t long_symbol = 5000 * GRAPHENE_BLOCKCHAIN_PRECISION; @@ -208,9 +208,10 @@ namespace graphene { namespace protocol { /// Options common to all assets. /// - /// @note common_options.core_exchange_rate technically needs to store the asset ID of this new asset. Since this - /// ID is not known at the time this operation is created, create this price as though the new asset has instance - /// ID 1, and the chain will overwrite it with the new asset's ID. + /// @note + /// common_options.core_exchange_rate technically needs to store the asset ID of this new asset. Since this + /// ID is not known at the time this operation is created, create this price as though the new asset has + /// instance ID 1, and the chain will overwrite it with the new asset's ID. asset_options common_options; /// Options only available for BitAssets. MUST be non-null if and only if the asset is market-issued. optional bitasset_opts; @@ -252,10 +253,12 @@ namespace graphene { namespace protocol { * @brief Schedules a market-issued asset for automatic settlement * @ingroup operations * - * Holders of market-issued assests may request a forced settlement for some amount of their asset. This means that - * the specified sum will be locked by the chain and held for the settlement period, after which time the chain will + * Holders of market-issued assests may request a forced settlement for some amount of their asset. + * This means that the specified sum will be locked by the chain and held for the settlement period, + * after which time the chain will * choose a margin posision holder and buy the settled asset using the margin's collateral. The price of this sale - * will be based on the feed price for the market-issued asset being settled. The exact settlement price will be the + * will be based on the feed price for the market-issued asset being settled. + * The exact settlement price will be the * feed price at the time of settlement with an offset in favor of the margin position, where the offset is a * blockchain parameter set in the global_property_object. * @@ -263,9 +266,9 @@ namespace graphene { namespace protocol { */ struct asset_settle_operation : public base_operation { - struct fee_parameters_type { + struct fee_parameters_type { /** this fee should be high to encourage small settlement requests to - * be performed on the market rather than via forced settlement. + * be performed on the market rather than via forced settlement. * * Note that in the event of a black swan or prediction market close out * everyone will have to pay this fee. @@ -357,7 +360,7 @@ namespace graphene { namespace protocol { fc::optional skip_core_exchange_rate; }; - struct fee_parameters_type { + struct fee_parameters_type { uint64_t fee = 500 * GRAPHENE_BLOCKCHAIN_PRECISION; uint32_t price_per_kbyte = 10; }; @@ -445,10 +448,11 @@ namespace graphene { namespace protocol { * * Price feed providers use this operation to publish their price feeds for market-issued assets. A price feed is * used to tune the market for a particular market-issued asset. For each value in the feed, the median across all - * committee_member feeds for that asset is calculated and the market for the asset is configured with the median of that - * value. + * committee_member feeds for that asset is calculated and the market for the asset is configured with the median + * of that value. * - * The feed in the operation contains three prices: a call price limit, a short price limit, and a settlement price. + * The feed in the operation contains three prices: a call price limit, a short price limit, + * and a settlement price. * The call limit price is structured as (collateral asset) / (debt asset) and the short limit price is structured * as (asset for sale) / (collateral asset). Note that the asset IDs are opposite to eachother, so if we're * publishing a feed for USD, the call limit price will be CORE/USD and the short limit price will be USD/CORE. The @@ -480,8 +484,8 @@ namespace graphene { namespace protocol { */ struct asset_issue_operation : public base_operation { - struct fee_parameters_type { - uint64_t fee = 20 * GRAPHENE_BLOCKCHAIN_PRECISION; + struct fee_parameters_type { + uint64_t fee = 20 * GRAPHENE_BLOCKCHAIN_PRECISION; uint32_t price_per_kbyte = GRAPHENE_BLOCKCHAIN_PRECISION; }; @@ -714,7 +718,8 @@ FC_REFLECT( graphene::protocol::asset_publish_feed_operation, (fee)(publisher)(asset_id)(feed)(extensions) ) FC_REFLECT( graphene::protocol::asset_settle_operation, (fee)(account)(amount)(extensions) ) FC_REFLECT( graphene::protocol::asset_settle_cancel_operation, (fee)(settlement)(account)(amount) ) -FC_REFLECT( graphene::protocol::asset_global_settle_operation, (fee)(issuer)(asset_to_settle)(settle_price)(extensions) ) +FC_REFLECT( graphene::protocol::asset_global_settle_operation, + (fee)(issuer)(asset_to_settle)(settle_price)(extensions) ) FC_REFLECT( graphene::protocol::asset_issue_operation, (fee)(issuer)(asset_to_issue)(issue_to_account)(memo)(extensions) ) FC_REFLECT( graphene::protocol::asset_reserve_operation, @@ -740,7 +745,8 @@ GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_claim_fees_op GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_update_operation::fee_parameters_type ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_update_issuer_operation::fee_parameters_type ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_update_bitasset_operation::fee_parameters_type ) -GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_update_feed_producers_operation::fee_parameters_type ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( + graphene::protocol::asset_update_feed_producers_operation::fee_parameters_type ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_publish_feed_operation::fee_parameters_type ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_issue_operation::fee_parameters_type ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::asset_reserve_operation::fee_parameters_type ) diff --git a/libraries/protocol/include/graphene/protocol/base.hpp b/libraries/protocol/include/graphene/protocol/base.hpp index 46b127afa4..03e47f6cb8 100644 --- a/libraries/protocol/include/graphene/protocol/base.hpp +++ b/libraries/protocol/include/graphene/protocol/base.hpp @@ -142,15 +142,15 @@ namespace graphene { namespace protocol { * For future expansion many structus include a single member of type * extensions_type that can be changed when updating a protocol. You can * always add new types to a static_variant without breaking backward - * compatibility. + * compatibility. */ using future_extensions = static_variant; /** * A flat_set is used to make sure that only one extension of - * each type is added and that they are added in order. - * - * @note static_variant compares only the type tag and not the + * each type is added and that they are added in order. + * + * @note static_variant compares only the type tag and not the * content. */ using extensions_type = future_extensions::flat_set_type; diff --git a/libraries/protocol/include/graphene/protocol/market.hpp b/libraries/protocol/include/graphene/protocol/market.hpp index 1d7a8ff091..b5dfd413aa 100644 --- a/libraries/protocol/include/graphene/protocol/market.hpp +++ b/libraries/protocol/include/graphene/protocol/market.hpp @@ -25,7 +25,7 @@ #include #include -namespace graphene { namespace protocol { +namespace graphene { namespace protocol { /** * @class limit_order_create_operation @@ -115,7 +115,8 @@ namespace graphene { namespace protocol { */ struct options_type { - optional target_collateral_ratio; ///< maximum CR to maintain when selling collateral on margin call + /// Maximum CR to maintain when selling collateral on margin call + optional target_collateral_ratio; }; /** this is slightly more expensive than limit orders, this pricing impacts prediction markets */ @@ -226,12 +227,18 @@ FC_REFLECT( graphene::protocol::execute_bid_operation::fee_parameters_type, ) / FC_REFLECT( graphene::protocol::call_order_update_operation::options_type, (target_collateral_ratio) ) -FC_REFLECT( graphene::protocol::limit_order_create_operation,(fee)(seller)(amount_to_sell)(min_to_receive)(expiration)(fill_or_kill)(extensions)) -FC_REFLECT( graphene::protocol::limit_order_cancel_operation,(fee)(fee_paying_account)(order)(extensions) ) -FC_REFLECT( graphene::protocol::call_order_update_operation, (fee)(funding_account)(delta_collateral)(delta_debt)(extensions) ) -FC_REFLECT( graphene::protocol::fill_order_operation, (fee)(order_id)(account_id)(pays)(receives)(fill_price)(is_maker) ) -FC_REFLECT( graphene::protocol::bid_collateral_operation, (fee)(bidder)(additional_collateral)(debt_covered)(extensions) ) -FC_REFLECT( graphene::protocol::execute_bid_operation, (fee)(bidder)(debt)(collateral) ) +FC_REFLECT( graphene::protocol::limit_order_create_operation, + (fee)(seller)(amount_to_sell)(min_to_receive)(expiration)(fill_or_kill)(extensions)) +FC_REFLECT( graphene::protocol::limit_order_cancel_operation, + (fee)(fee_paying_account)(order)(extensions) ) +FC_REFLECT( graphene::protocol::call_order_update_operation, + (fee)(funding_account)(delta_collateral)(delta_debt)(extensions) ) +FC_REFLECT( graphene::protocol::fill_order_operation, + (fee)(order_id)(account_id)(pays)(receives)(fill_price)(is_maker) ) +FC_REFLECT( graphene::protocol::bid_collateral_operation, + (fee)(bidder)(additional_collateral)(debt_covered)(extensions) ) +FC_REFLECT( graphene::protocol::execute_bid_operation, + (fee)(bidder)(debt)(collateral) ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::call_order_update_operation::options_type ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::limit_order_create_operation::fee_parameters_type ) diff --git a/libraries/protocol/include/graphene/protocol/pts_address.hpp b/libraries/protocol/include/graphene/protocol/pts_address.hpp index 6806832fb8..2cb366feb0 100644 --- a/libraries/protocol/include/graphene/protocol/pts_address.hpp +++ b/libraries/protocol/include/graphene/protocol/pts_address.hpp @@ -66,10 +66,10 @@ namespace graphene { namespace protocol { namespace std { template<> - struct hash + struct hash { public: - size_t operator()(const graphene::protocol::pts_address &a) const + size_t operator()(const graphene::protocol::pts_address &a) const { size_t s; std::memcpy( (char*)&s, a.addr.data() + a.addr.size() - sizeof(s), sizeof(s) ); @@ -81,8 +81,8 @@ namespace std #include FC_REFLECT( graphene::protocol::pts_address, (addr) ) -namespace fc -{ +namespace fc +{ void to_variant( const graphene::protocol::pts_address& var, fc::variant& vo, uint32_t max_depth = 1 ); void from_variant( const fc::variant& var, graphene::protocol::pts_address& vo, uint32_t max_depth = 1 ); diff --git a/libraries/protocol/include/graphene/protocol/vote.hpp b/libraries/protocol/include/graphene/protocol/vote.hpp index cc52d08bd7..a7ec87e3b5 100644 --- a/libraries/protocol/include/graphene/protocol/vote.hpp +++ b/libraries/protocol/include/graphene/protocol/vote.hpp @@ -73,7 +73,8 @@ struct vote_id_type { try { auto colon = serial.find(':'); FC_ASSERT( colon != std::string::npos ); - *this = vote_id_type(vote_type(std::stoul(serial.substr(0, colon))), std::stoul(serial.substr(colon+1))); + *this = vote_id_type( vote_type( std::stoul(serial.substr(0, colon)) ), + uint32_t( std::stoul(serial.substr(colon+1)) ) ); } FC_CAPTURE_AND_RETHROW( (serial) ) } /// Set the type of this vote_id_type diff --git a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp index c4007127f7..d831ef3a67 100644 --- a/libraries/wallet/include/graphene/wallet/wallet_structs.hpp +++ b/libraries/wallet/include/graphene/wallet/wallet_structs.hpp @@ -32,7 +32,7 @@ using namespace graphene::utilities; using std::string; using std::vector; -namespace graphene { namespace wallet { +namespace graphene { namespace wallet { using transaction_handle_type = uint32_t; From ea6badb97dbc9cb956ac680588f0b3fe5c6fc472 Mon Sep 17 00:00:00 2001 From: abitmore Date: Tue, 8 Nov 2022 20:14:52 +0000 Subject: [PATCH 336/338] Fix code smells --- .../graphene/chain/confidential_object.hpp | 6 +- .../include/graphene/chain/htlc_object.hpp | 65 ++++++++++--------- .../chain/special_authority_object.hpp | 6 +- 3 files changed, 41 insertions(+), 36 deletions(-) diff --git a/libraries/chain/include/graphene/chain/confidential_object.hpp b/libraries/chain/include/graphene/chain/confidential_object.hpp index d742974ad3..62a768ab41 100644 --- a/libraries/chain/include/graphene/chain/confidential_object.hpp +++ b/libraries/chain/include/graphene/chain/confidential_object.hpp @@ -52,15 +52,15 @@ struct by_commitment; /** * @ingroup object_index */ -typedef multi_index_container< +using blinded_balance_obj_multi_idx = multi_index_container< blinded_balance_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, ordered_unique< tag, member > > -> blinded_balance_object_multi_index_type; -typedef generic_index blinded_balance_index; +>; +using blinded_balance_index = generic_index; } } // graphene::chain diff --git a/libraries/chain/include/graphene/chain/htlc_object.hpp b/libraries/chain/include/graphene/chain/htlc_object.hpp index ab86da29ac..b36d539306 100644 --- a/libraries/chain/include/graphene/chain/htlc_object.hpp +++ b/libraries/chain/include/graphene/chain/htlc_object.hpp @@ -39,30 +39,38 @@ namespace graphene { namespace chain { */ class htlc_object : public graphene::db::abstract_object { - public: - struct transfer_info { - account_id_type from; - account_id_type to; - share_type amount; - asset_id_type asset_id; - } transfer; - struct condition_info { - struct hash_lock_info { - htlc_hash preimage_hash; - uint16_t preimage_size; - } hash_lock; - struct time_lock_info { - fc::time_point_sec expiration; - } time_lock; - } conditions; + public: + struct transfer_info + { + account_id_type from; + account_id_type to; + share_type amount; + asset_id_type asset_id; + }; + struct condition_info + { + struct hash_lock_info + { + htlc_hash preimage_hash; + uint16_t preimage_size; + }; + struct time_lock_info + { + fc::time_point_sec expiration; + }; + hash_lock_info hash_lock; + time_lock_info time_lock; + }; - fc::optional memo; + transfer_info transfer; + condition_info conditions; + fc::optional memo; /**** * Index helper for timelock */ struct timelock_extractor { - typedef fc::time_point_sec result_type; + using result_type = fc::time_point_sec; const result_type& operator()(const htlc_object& o)const { return o.conditions.time_lock.expiration; } }; @@ -70,7 +78,7 @@ namespace graphene { namespace chain { * Index helper for from */ struct from_extractor { - typedef account_id_type result_type; + using result_type = account_id_type; const result_type& operator()(const htlc_object& o)const { return o.transfer.from; } }; @@ -78,7 +86,7 @@ namespace graphene { namespace chain { * Index helper for to */ struct to_extractor { - typedef account_id_type result_type; + using result_type = account_id_type; const result_type& operator()(const htlc_object& o)const { return o.transfer.to; } }; }; @@ -86,29 +94,26 @@ namespace graphene { namespace chain { struct by_from_id; struct by_expiration; struct by_to_id; - typedef multi_index_container< + using htlc_object_multi_index_type = multi_index_container< htlc_object, indexed_by< ordered_unique< tag< by_id >, member< object, object_id_type, &object::id > >, - ordered_unique< tag< by_expiration >, - composite_key< htlc_object, + composite_key< htlc_object, htlc_object::timelock_extractor, member< object, object_id_type, &object::id > > >, - ordered_unique< tag< by_from_id >, - composite_key< htlc_object, + composite_key< htlc_object, htlc_object::from_extractor, member< object, object_id_type, &object::id > > >, - ordered_unique< tag< by_to_id >, - composite_key< htlc_object, + composite_key< htlc_object, htlc_object::to_extractor, member< object, object_id_type, &object::id > > > - > - > htlc_object_index_type; + > + >; - typedef generic_index< htlc_object, htlc_object_index_type > htlc_index; + using htlc_index = generic_index< htlc_object, htlc_object_multi_index_type >; } } // namespace graphene::chain diff --git a/libraries/chain/include/graphene/chain/special_authority_object.hpp b/libraries/chain/include/graphene/chain/special_authority_object.hpp index eb5fbc0e71..197e686034 100644 --- a/libraries/chain/include/graphene/chain/special_authority_object.hpp +++ b/libraries/chain/include/graphene/chain/special_authority_object.hpp @@ -48,16 +48,16 @@ class special_authority_object : public graphene::db::abstract_object, member< object, object_id_type, &object::id > >, ordered_unique< tag, member< special_authority_object, account_id_type, &special_authority_object::account> > > -> special_authority_multi_index_type; +>; -typedef generic_index< special_authority_object, special_authority_multi_index_type > special_authority_index; +using special_authority_index = generic_index< special_authority_object, special_authority_multi_idx_typ >; } } // graphene::chain From f18977cae4eb356857428dc9669d65c43f7418f5 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 9 Nov 2022 00:23:29 +0000 Subject: [PATCH 337/338] Fix code smells --- .../graphene/chain/withdraw_permission_object.hpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp index b8b6a995e2..481328edb1 100644 --- a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp +++ b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp @@ -92,32 +92,33 @@ namespace graphene { namespace chain { struct by_authorized; struct by_expiration; - typedef multi_index_container< + using withdraw_permission_obj_mlt_idx = multi_index_container< withdraw_permission_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, ordered_unique< tag, composite_key< withdraw_permission_object, - member, + member< withdraw_permission_object, account_id_type, + &withdraw_permission_object::withdraw_from_account >, member< object, object_id_type, &object::id > > >, ordered_unique< tag, composite_key< withdraw_permission_object, - member, + member< withdraw_permission_object, account_id_type, &withdraw_permission_object::authorized_account >, member< object, object_id_type, &object::id > > >, ordered_unique< tag, composite_key< withdraw_permission_object, - member, + member< withdraw_permission_object, time_point_sec, &withdraw_permission_object::expiration >, member< object, object_id_type, &object::id > > > > - > withdraw_permission_object_multi_index_type; + >; - typedef generic_index withdraw_permission_index; + using withdraw_permission_index = generic_index; } } // graphene::chain From 8d760bbbfc3d393c89209af6106855399e242b33 Mon Sep 17 00:00:00 2001 From: abitmore Date: Wed, 9 Nov 2022 00:30:56 +0000 Subject: [PATCH 338/338] Remove trailing whitespaces --- libraries/net/include/graphene/net/peer_database.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/net/include/graphene/net/peer_database.hpp b/libraries/net/include/graphene/net/peer_database.hpp index e770891230..c46bf44ba4 100644 --- a/libraries/net/include/graphene/net/peer_database.hpp +++ b/libraries/net/include/graphene/net/peer_database.hpp @@ -86,7 +86,7 @@ namespace graphene { namespace net { void increment(); bool equal(const peer_database_iterator& other) const; const potential_peer_record& dereference() const; - private: + private: std::unique_ptr my; }; }