diff --git a/.gitignore b/.gitignore index 7e477b3d84..7b1936ffd0 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,8 @@ Makefile compile_commands.json moc_* *.moc + +genesis.json hardfork.hpp libraries/utilities/git_revision.cpp diff --git a/.gitmodules b/.gitmodules index 24ef291b0c..d70526545f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,8 +1,7 @@ [submodule "docs"] - path = docs - url = https://github.com/cryptonomex/graphene.wiki.git + path = docs + url = https://github.com/bitshares/bitshares-core.wiki.git ignore = dirty [submodule "libraries/fc"] - path = libraries/fc - url = https://github.com/cryptonomex/fc.git - ignore = dirty + path = libraries/fc + url = https://github.com/followmyvote/fc diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..76424128a0 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,45 @@ +language: c++ + +cache: ccache + +git: + depth: 1 + +dist: xenial + +sudo: true + +install: + - sudo apt-get install --allow-unauthenticated libboost-thread-dev libboost-iostreams-dev libboost-date-time-dev libboost-system-dev libboost-filesystem-dev libboost-program-options-dev libboost-chrono-dev libboost-test-dev libboost-context-dev libboost-regex-dev libboost-coroutine-dev cmake parallel + +addons: + sonarcloud: + organization: "flwyiq7go36p6lipr64tbesy5jayad3q" + token: + secure: "Ik4xQhs9imtsFIC1SMAPmdLId9lVadY/4PEgo5tM4M5cQRvyt4xeuMMV+CRIT6tGEEqF71ea74qVJTxT7qinWZ3kmHliFjbqDxk1FbjCpK6NGQDyTdfWMVJFIlk7WefvtGAwFBkf6pSTs553bKNNM0HbBYQGKe08waLwv7R+lOmVjTTKIRF/cCVw+C5QQZdXFnUMTg+mRuUqGk4WvNNPmcBfkX0ekHPrXwAD5ATVS1q0iloA0nzHq8CPNmPE+IyXdPw0EBp+fl3cL9MgrlwRbELxrnCKFy+ObdjhDj7z3FDIxDe+03gVlgd+6Fame+9EJCeeeNLF4G4qNR1sLEvHRqVz12/NYnRU9hQL0c/jJtiUquOJA5+HqrhhB9XUZjS1xbHV3aIU5PR0bdDP6MKatvIVwRhwxwhaDXh7VSimis8eL+LvXT7EO+rGjco0c17RuzZpFCsKmXCej4Q8iDBMdOIWwe2WuWi8zb6MFvnLyK2EcM53hAn2yMwU+nprbpHwzU5oJTFZLD+J78zCSGk7uu7vsF+EEnheMwfqafP9MpMEXGXaXZiq7QKy3KvxQTg+1ozPIu+fgxvY0xdyrjJHOSJlrvXN7osjD4IDTs6D5cLAZ04WGIKsulZDr7ZN5n3gmA9h4cfhJsIEia0uQzLmWnfF6RksxWElK1i1+xmse7E=" + +env: + global: + - CCACHE_COMPRESS=exists_means_true + - CCACHE_MAXSIZE=1Gi + - CCACHE_SLOPPINESS=include_file_ctime,include_file_mtime,time_macros + +script: + - programs/build_helpers/buildstep -s 3500 + - ccache -s + - programs/build_helpers/buildstep Prepare 1 "sed -i '/tests/d' libraries/fc/CMakeLists.txt" + - programs/build_helpers/buildstep cmake 5 "cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS=--coverage -DCMAKE_CXX_FLAGS=--coverage -DBoost_USE_STATIC_LIBS=OFF -DCMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON ." + - programs/build_helpers/buildstep make.cli_wallet 1600 "programs/build_helpers/make_with_sonar bw-output -j 2 cli_wallet" + - programs/build_helpers/buildstep make.witness_node 300 "programs/build_helpers/make_with_sonar bw-output -j 2 witness_node" + - programs/build_helpers/buildstep make.serializer 45 "programs/build_helpers/make_with_sonar bw-output -j 2 js_operation_serializer" + - programs/build_helpers/buildstep make.get_dev_key 10 "programs/build_helpers/make_with_sonar bw-output -j 2 get_dev_key" + - programs/build_helpers/buildstep make.chain_test 900 "programs/build_helpers/make_with_sonar bw-output -j 2 chain_test" + - programs/build_helpers/buildstep make.cli_test 200 "programs/build_helpers/make_with_sonar bw-output -j 2 cli_test" + - programs/build_helpers/buildstep make.perf_test 120 "programs/build_helpers/make_with_sonar bw-output -j 2 performance_test" + - set -o pipefail + - programs/build_helpers/buildstep run.chain_test 240 "libraries/fc/tests/run-parallel-tests.sh tests/chain_test" + - programs/build_helpers/buildstep run.cli_test 30 "libraries/fc/tests/run-parallel-tests.sh tests/cli_test" + - programs/build_helpers/buildstep prepare.sonar 20 'find libraries/[acdenptuw]*/CMakeFiles/*.dir programs/[cdgjsw]*/CMakeFiles/*.dir -type d | while read d; do gcov -o "$d" "${d/CMakeFiles*.dir//}"/*.cpp; done >/dev/null' + - programs/build_helpers/buildstep run.sonar 400 "which sonar-scanner && sonar-scanner || true" + - programs/build_helpers/buildstep end 0 + - ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index 5e0b2bc758..3f30ab7250 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,11 +1,11 @@ -# Defines Graphene library target. -project( Graphene ) +# Defines BitShares library target. +project( BitShares ) cmake_minimum_required( VERSION 2.8.12 ) -set( BLOCKCHAIN_NAME "Graphene" ) +set( BLOCKCHAIN_NAME "BitShares" ) set( CLI_CLIENT_EXECUTABLE_NAME graphene_client ) -set( GUI_CLIENT_EXECUTABLE_NAME Graphene ) +set( GUI_CLIENT_EXECUTABLE_NAME BitShares ) set( CUSTOM_URL_SCHEME "gcs" ) set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) @@ -23,6 +23,7 @@ endif() list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules" ) set(CMAKE_EXPORT_COMPILE_COMMANDS "ON") +set(GRAPHENE_EGENESIS_JSON "${CMAKE_CURRENT_SOURCE_DIR}/libraries/egenesis/genesis.json" ) #set (ENABLE_INSTALLER 1) #set (USE_PCH 1) @@ -31,23 +32,25 @@ if (USE_PCH) include (cotire) endif(USE_PCH) -list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules" ) +option(USE_PROFILER "Build with GPROF support(Linux)." OFF) + +IF( NOT WIN32 ) + list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules" ) +ENDIF( NOT WIN32 ) list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/GitVersionGen" ) include( GetGitRevisionDescription ) get_git_head_revision( GIT_REFSPEC GIT_SHA2 ) SET(BOOST_COMPONENTS) LIST(APPEND BOOST_COMPONENTS thread + iostreams date_time - system filesystem + system program_options - signals - serialization chrono unit_test_framework - context - locale) + context) SET( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) IF( WIN32 ) @@ -67,7 +70,7 @@ ENDIF() if( WIN32 ) - message( STATUS "Configuring Graphene on WIN32") + message( STATUS "Configuring BitShares on WIN32") set( DB_VERSION 60 ) set( BDB_STATIC_LIBS 1 ) @@ -76,12 +79,15 @@ if( WIN32 ) set(CRYPTO_LIB) - #looks like this flag can have different default on some machines. - SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SAFESEH:NO") - SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SAFESEH:NO") + if( MSVC ) + add_definitions(-DWIN32_LEAN_AND_MEAN) + #looks like this flag can have different default on some machines. + SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SAFESEH:NO") + SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SAFESEH:NO") - # Probably cmake has a bug and vcxproj generated for executable in Debug conf. has disabled debug info - set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /DEBUG") + # Probably cmake has a bug and vcxproj generated for executable in Debug conf. has disabled debug info + set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /DEBUG") + endif ( MSVC ) # On windows tcl should be installed to the directory pointed by setenv.bat script SET(TCL_INCLUDE_PATH $ENV{TCL_ROOT}/include) @@ -100,20 +106,17 @@ if( WIN32 ) else( WIN32 ) # Apple AND Linux - find_library(READLINE_LIBRARIES NAMES readline) - find_path(READLINE_INCLUDE_DIR readline/readline.h) - #if(NOT READLINE_INCLUDE_DIR OR NOT READLINE_LIBRARIES) - # MESSAGE(FATAL_ERROR "Could not find lib readline.") - #endif() - if( APPLE ) # Apple Specific Options Here - message( STATUS "Configuring Graphene on OS X" ) + message( STATUS "Configuring BitShares on OS X" ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -stdlib=libc++ -Wall" ) else( APPLE ) # Linux Specific Options Here - message( STATUS "Configuring Graphene on Linux" ) + message( STATUS "Configuring BitShares on Linux" ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -Wall" ) + if(USE_PROFILER) + set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg" ) + endif( USE_PROFILER ) set( rt_library rt ) set( pthread_library pthread) if ( NOT DEFINED crypto_library ) @@ -128,6 +131,10 @@ else( WIN32 ) # Apple AND Linux if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" ) set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp" ) + elseif( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" ) + if( CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.0.0 ) + set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-invalid-partial-specialization" ) + endif() endif() if( "${CMAKE_GENERATOR}" STREQUAL "Ninja" ) @@ -142,9 +149,7 @@ else( WIN32 ) # Apple AND Linux endif( WIN32 ) -find_package( BerkeleyDB ) - -set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build Graphene for code coverage analysis") +set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build BitShares for code coverage analysis") if(ENABLE_COVERAGE_TESTING) SET(CMAKE_CXX_FLAGS "--coverage ${CMAKE_CXX_FLAGS}") @@ -174,18 +179,18 @@ set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}") set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}") set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}") set(CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}") -set(CPACK_PACKAGE_DESCRIPTION "A client for the Graphene network") -set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the Graphene network") +set(CPACK_PACKAGE_DESCRIPTION "A client for the BitShares network") +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the BitShares network") set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.md") -set(CPACK_PACKAGE_INSTALL_DIRECTORY "Graphene ${CPACK_PACKAGE_VERSION}") +set(CPACK_PACKAGE_INSTALL_DIRECTORY "BitShares ${CPACK_PACKAGE_VERSION}") if(WIN32) SET(CPACK_GENERATOR "ZIP;NSIS") - set(CPACK_PACKAGE_NAME "Graphene") # override above + set(CPACK_PACKAGE_NAME "BitShares") # override above set(CPACK_NSIS_EXECUTABLES_DIRECTORY .) - set(CPACK_NSIS_PACKAGE_NAME "Graphene v${CPACK_PACKAGE_VERSION}") + set(CPACK_NSIS_PACKAGE_NAME "BitShares v${CPACK_PACKAGE_VERSION}") set(CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}") - set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"Graphene\\\"") + set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"BitShares\\\"") # it seems like windows zip files usually don't have a single directory inside them, unix tgz frequently do SET(CPACK_INCLUDE_TOPLEVEL_DIRECTORY 0) @@ -203,3 +208,7 @@ endif(LINUX) include(CPack) endif(ENABLE_INSTALLER) + +MESSAGE( STATUS "" ) +MESSAGE( STATUS "PROFILER: ${USE_PROFILER}" ) +MESSAGE( STATUS "" ) diff --git a/CMakeModules/FindBerkeleyDB.cmake b/CMakeModules/FindBerkeleyDB.cmake deleted file mode 100644 index 6dcb3e9bc0..0000000000 --- a/CMakeModules/FindBerkeleyDB.cmake +++ /dev/null @@ -1,98 +0,0 @@ -# Find the BerkeleyDB includes and library -# Customizable variables: -# BDB_ROOT_DIR -# This variable points to the BerkeleyDB root directory. On Windows the -# library location typically will have to be provided explicitly using the -# -D command-line option. Alternatively, the DBROOTDIR environment variable -# can be set. -# -# BDB_STATIC_LIBS -# Should be set to 1 if static version of libraries should be found. Defaults to 0 (shared libs). -# -# This module defines -# BDB_INCLUDE_DIR, where to find db.h, etc. -# BDB_LIBRARIES, the libraries needed to use BerkeleyDB. - -IF (NOT DEFINED BDB_ROOT_DIR) - SET (BDB_ROOT_DIR $ENV{DBROOTDIR}) -ENDIF() - -MESSAGE (STATUS "Using ${BDB_ROOT_DIR} as BerkeleyDB root") - -IF(NOT DEFINED BDB_STATIC_LIBS) - SET (BDB_STATIC_LIBS 0) -ENDIF() - -FIND_PATH(BDB_INCLUDE_DIR NAMES db.h db_cxx.h - HINTS "${BDB_ROOT_DIR}/include" - PATHS ${BDB_ROOT_DIR} - /usr/include/libdb5 - /usr/include/db5 - /usr/include/libdb4 - /usr/include/db4 - /usr/local/include/libdb5 - /usr/local/include/db5 - /usr/local/include/libdb4 - /usr/local/include/db4 - PATH_SUFFIXES include -) - -IF (WIN32) - IF(NOT DEFINED BDB_VERSION) - SET (DB_VERSION "60") - ENDIF () - - SET (BDB_LIB_BASENAME "libdb") - - IF (${BDB_STATIC_LIBS} EQUAL 1) - SET (BDB_LIBS_SUFFIX_RELEASE "s") - SET (BDB_LIBS_SUFFIX_DEBUG "sD") - ELSE() - SET (BDB_LIBS_SUFFIX_RELEASE "") - SET (BDB_LIBS_SUFFIX_DEBUG "D") - ENDIF() - -ELSE (WIN32) - IF(NOT DEFINED BDB_VERSION) - SET (DB_VERSION "-6.0") - ENDIF () - - # On unix library in all versions have the same names. - SET (BDB_LIBS_SUFFIX_RELEASE "") - SET (BDB_LIBS_SUFFIX_DEBUG "") - - SET (BDB_LIB_BASENAME "db_cxx") -ENDIF (WIN32) - -message (STATUS "Looking for: ${BDB_LIB_BASENAME}${DB_VERSION}${BDB_LIBS_SUFFIX_RELEASE}") -FIND_LIBRARY(BDB_LIBRARY_RELEASE "${BDB_LIB_BASENAME}${DB_VERSION}${BDB_LIBS_SUFFIX_RELEASE}" "${BDB_LIB_BASENAME}" - HINTS "${BDB_ROOT_DIR}/lib" PATHS ${BDB_ROOT_DIR} ${BDB_INCLUDE_DIR} "/usr/local/lib" PATH_SUFFIXES lib -) - -FIND_LIBRARY(BDB_LIBRARY_DEBUG "${BDB_LIB_BASENAME}${DB_VERSION}${BDB_LIBS_SUFFIX_DEBUG}" "${BDB_LIB_BASENAME}" - HINTS "${BDB_ROOT_DIR}/lib" PATHS ${BDB_ROOT_DIR} ${BDB_INCLUDE_DIR} "/usr/local/lib" PATH_SUFFIXES lib -) - -IF (BDB_LIBRARY_RELEASE AND BDB_LIBRARY_DEBUG ) - SET (_BDB_LIBRARY - debug ${BDB_LIBRARY_DEBUG} - optimized ${BDB_LIBRARY_RELEASE} - ) -ELSEIF(BDB_LIBRARY_RELEASE) - SET (_BDB_LIBRARY ${BDB_LIBRARY_RELEASE}) -ELSEIF(BDB_LIBRARY_DEBUG) - SET (_BDB_LIBRARY ${BDB_LIBRARY_DEBUG}) -ENDIF() - -MESSAGE (STATUS ${_BDB_LIBRARY}) - -IF(_BDB_LIBRARY) - LIST (APPEND BDB_LIBRARIES ${_BDB_LIBRARY}) -ENDIF() - -INCLUDE(FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS(BerkeleyDB - FOUND_VAR BerkeleyDB_FOUND - REQUIRED_VARS BDB_INCLUDE_DIR BDB_LIBRARIES - FAIL_MESSAGE "Could not find Berkeley DB >= 4.1" ) - diff --git a/CMakeModules/FindLineman.cmake b/CMakeModules/FindLineman.cmake deleted file mode 100644 index f6f480daf5..0000000000 --- a/CMakeModules/FindLineman.cmake +++ /dev/null @@ -1,8 +0,0 @@ -find_program(NPM_EXECUTABLE npm) -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args("NPM" DEFAULT_MSG NPM_EXECUTABLE) - -find_program(LINEMAN_EXECUTABLE lineman) -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args("Lineman" DEFAULT_MSG LINEMAN_EXECUTABLE) - diff --git a/CMakeModules/FindNodeJs.cmake b/CMakeModules/FindNodeJs.cmake deleted file mode 100644 index 3d69870101..0000000000 --- a/CMakeModules/FindNodeJs.cmake +++ /dev/null @@ -1,4 +0,0 @@ -find_program(NODEJS_EXECUTABLE node) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args("NodeJs" DEFAULT_MSG NODEJS_EXECUTABLE) \ No newline at end of file diff --git a/CMakeModules/cotire.cmake b/CMakeModules/cotire.cmake index 0df9a4a225..ab611007dc 100644 --- a/CMakeModules/cotire.cmake +++ b/CMakeModules/cotire.cmake @@ -3,7 +3,7 @@ # See the cotire manual for usage hints. # #============================================================================= -# Copyright 2012-2015 Sascha Kratky +# Copyright 2012-2016 Sascha Kratky # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation @@ -42,7 +42,20 @@ if (NOT CMAKE_SCRIPT_MODE_FILE) cmake_policy(POP) endif() +set (COTIRE_CMAKE_MODULE_FILE "${CMAKE_CURRENT_LIST_FILE}") +set (COTIRE_CMAKE_MODULE_VERSION "1.7.9") + # activate select policies +if (POLICY CMP0025) + # Compiler id for Apple Clang is now AppleClang + cmake_policy(SET CMP0025 NEW) +endif() + +if (POLICY CMP0026) + # disallow use of the LOCATION target property + cmake_policy(SET CMP0026 NEW) +endif() + if (POLICY CMP0038) # targets may not link directly to themselves cmake_policy(SET CMP0038 NEW) @@ -93,9 +106,6 @@ if (POLICY CMP0054) cmake_policy(SET CMP0054 NEW) endif() -set (COTIRE_CMAKE_MODULE_FILE "${CMAKE_CURRENT_LIST_FILE}") -set (COTIRE_CMAKE_MODULE_VERSION "1.7.2") - include(CMakeParseArguments) include(ProcessorCount) @@ -321,7 +331,7 @@ function (cotire_get_target_usage_requirements _target _targetRequirementsVar) list (FIND _targetRequirements ${_library} _index) if (_index LESS 0) list (APPEND _targetRequirements ${_library}) - # process transitive libraries + # BFS traversal of transitive libraries get_target_property(_libraries ${_library} INTERFACE_LINK_LIBRARIES) if (_libraries) list (APPEND _librariesToProcess ${_libraries}) @@ -379,6 +389,24 @@ function (cotire_filter_compile_flags _language _flagFilter _matchedOptionsVar _ set (${_unmatchedOptionsVar} ${_unmatchedOptions} PARENT_SCOPE) endfunction() +function (cotire_is_target_supported _target _isSupportedVar) + if (NOT TARGET "${_target}") + set (${_isSupportedVar} FALSE PARENT_SCOPE) + return() + endif() + get_target_property(_imported ${_target} IMPORTED) + if (_imported) + set (${_isSupportedVar} FALSE PARENT_SCOPE) + return() + endif() + get_target_property(_targetType ${_target} TYPE) + if (NOT _targetType MATCHES "EXECUTABLE|(STATIC|SHARED|MODULE|OBJECT)_LIBRARY") + set (${_isSupportedVar} FALSE PARENT_SCOPE) + return() + endif() + set (${_isSupportedVar} TRUE PARENT_SCOPE) +endfunction() + function (cotire_get_target_compile_flags _config _language _target _flagsVar) string (TOUPPER "${_config}" _upperConfig) # collect options from CMake language variables @@ -421,6 +449,26 @@ function (cotire_get_target_compile_flags _config _language _target _flagsVar) endif() endforeach() endif() + # handle language standard properties + if (CMAKE_${_language}_STANDARD_DEFAULT) + # used compiler supports language standard levels + if (_target) + get_target_property(_targetLanguageStandard ${_target} ${_language}_STANDARD) + if (_targetLanguageStandard) + set (_type "EXTENSION") + get_property(_isSet TARGET ${_target} PROPERTY ${_language}_EXTENSIONS SET) + if (_isSet) + get_target_property(_targetUseLanguageExtensions ${_target} ${_language}_EXTENSIONS) + if (NOT _targetUseLanguageExtensions) + set (_type "STANDARD") + endif() + endif() + if (CMAKE_${_language}${_targetLanguageStandard}_${_type}_COMPILE_OPTION) + list (APPEND _compileFlags "${CMAKE_${_language}${_targetLanguageStandard}_${_type}_COMPILE_OPTION}") + endif() + endif() + endif() + endif() # handle the POSITION_INDEPENDENT_CODE target property if (_target) get_target_property(_targetPIC ${_target} POSITION_INDEPENDENT_CODE) @@ -433,6 +481,17 @@ function (cotire_get_target_compile_flags _config _language _target _flagsVar) endif() endif() endif() + # handle visibility target properties + if (_target) + get_target_property(_targetVisibility ${_target} ${_language}_VISIBILITY_PRESET) + if (_targetVisibility AND CMAKE_${_language}_COMPILE_OPTIONS_VISIBILITY) + list (APPEND _compileFlags "${CMAKE_${_language}_COMPILE_OPTIONS_VISIBILITY}${_targetVisibility}") + endif() + get_target_property(_targetVisibilityInlines ${_target} VISIBILITY_INLINES_HIDDEN) + if (_targetVisibilityInlines AND CMAKE_${_language}_COMPILE_OPTIONS_VISIBILITY_INLINES_HIDDEN) + list (APPEND _compileFlags "${CMAKE_${_language}_COMPILE_OPTIONS_VISIBILITY_INLINES_HIDDEN}") + endif() + endif() # platform specific flags if (APPLE) get_target_property(_architectures ${_target} OSX_ARCHITECTURES_${_upperConfig}) @@ -473,12 +532,31 @@ function (cotire_get_target_include_directories _config _language _target _inclu list (APPEND _includeDirs "${CMAKE_CURRENT_BINARY_DIR}") list (APPEND _includeDirs "${CMAKE_CURRENT_SOURCE_DIR}") endif() - # parse additional include directories from target compile flags set (_targetFlags "") cotire_get_target_compile_flags("${_config}" "${_language}" "${_target}" _targetFlags) - cotire_filter_compile_flags("${_language}" "I" _dirs _ignore ${_targetFlags}) - if (_dirs) - list (APPEND _includeDirs ${_dirs}) + # parse additional include directories from target compile flags + if (CMAKE_INCLUDE_FLAG_${_language}) + string (STRIP "${CMAKE_INCLUDE_FLAG_${_language}}" _includeFlag) + string (REGEX REPLACE "^[-/]+" "" _includeFlag "${_includeFlag}") + if (_includeFlag) + set (_dirs "") + cotire_filter_compile_flags("${_language}" "${_includeFlag}" _dirs _ignore ${_targetFlags}) + if (_dirs) + list (APPEND _includeDirs ${_dirs}) + endif() + endif() + endif() + # parse additional system include directories from target compile flags + if (CMAKE_INCLUDE_SYSTEM_FLAG_${_language}) + string (STRIP "${CMAKE_INCLUDE_SYSTEM_FLAG_${_language}}" _includeFlag) + string (REGEX REPLACE "^[-/]+" "" _includeFlag "${_includeFlag}") + if (_includeFlag) + set (_dirs "") + cotire_filter_compile_flags("${_language}" "${_includeFlag}" _dirs _ignore ${_targetFlags}) + if (_dirs) + list (APPEND _systemIncludeDirs ${_dirs}) + endif() + endif() endif() # target include directories get_directory_property(_dirs DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" INCLUDE_DIRECTORIES) @@ -497,6 +575,21 @@ function (cotire_get_target_include_directories _config _language _target _inclu set (_linkedTargets "") cotire_get_target_usage_requirements(${_target} _linkedTargets) foreach (_linkedTarget ${_linkedTargets}) + get_target_property(_linkedTargetType ${_linkedTarget} TYPE) + if (CMAKE_INCLUDE_CURRENT_DIR_IN_INTERFACE AND NOT CMAKE_VERSION VERSION_LESS "3.4.0" AND + _linkedTargetType MATCHES "(STATIC|SHARED|MODULE|OBJECT)_LIBRARY") + # CMAKE_INCLUDE_CURRENT_DIR_IN_INTERFACE refers to CMAKE_CURRENT_BINARY_DIR and CMAKE_CURRENT_SOURCE_DIR + # at the time, when the target was created. These correspond to the target properties BINARY_DIR and SOURCE_DIR + # which are only available with CMake 3.4 or later. + get_target_property(_targetDirs ${_linkedTarget} BINARY_DIR) + if (_targetDirs) + list (APPEND _dirs ${_targetDirs}) + endif() + get_target_property(_targetDirs ${_linkedTarget} SOURCE_DIR) + if (_targetDirs) + list (APPEND _dirs ${_targetDirs}) + endif() + endif() get_target_property(_targetDirs ${_linkedTarget} INTERFACE_INCLUDE_DIRECTORIES) if (_targetDirs) list (APPEND _dirs ${_targetDirs}) @@ -534,6 +627,25 @@ function (cotire_get_target_include_directories _config _language _target _inclu if (CMAKE_${_language}_IMPLICIT_INCLUDE_DIRECTORIES) list (REMOVE_ITEM _includeDirs ${CMAKE_${_language}_IMPLICIT_INCLUDE_DIRECTORIES}) endif() + if (WIN32) + # convert Windows paths in include directories to CMake paths + if (_includeDirs) + set (_paths "") + foreach (_dir ${_includeDirs}) + file (TO_CMAKE_PATH "${_dir}" _path) + list (APPEND _paths "${_path}") + endforeach() + set (_includeDirs ${_paths}) + endif() + if (_systemIncludeDirs) + set (_paths "") + foreach (_dir ${_systemIncludeDirs}) + file (TO_CMAKE_PATH "${_dir}" _path) + list (APPEND _paths "${_path}") + endforeach() + set (_systemIncludeDirs ${_paths}) + endif() + endif() if (COTIRE_DEBUG AND _includeDirs) message (STATUS "Target ${_target} include dirs: ${_includeDirs}") endif() @@ -617,8 +729,23 @@ function (cotire_get_target_compiler_flags _config _language _target _compilerFl # parse target compile flags omitting compile definitions and include directives set (_targetFlags "") cotire_get_target_compile_flags("${_config}" "${_language}" "${_target}" _targetFlags) + set (_flagFilter "D") + if (CMAKE_INCLUDE_FLAG_${_language}) + string (STRIP "${CMAKE_INCLUDE_FLAG_${_language}}" _includeFlag) + string (REGEX REPLACE "^[-/]+" "" _includeFlag "${_includeFlag}") + if (_includeFlag) + set (_flagFilter "${_flagFilter}|${_includeFlag}") + endif() + endif() + if (CMAKE_INCLUDE_SYSTEM_FLAG_${_language}) + string (STRIP "${CMAKE_INCLUDE_SYSTEM_FLAG_${_language}}" _includeFlag) + string (REGEX REPLACE "^[-/]+" "" _includeFlag "${_includeFlag}") + if (_includeFlag) + set (_flagFilter "${_flagFilter}|${_includeFlag}") + endif() + endif() set (_compilerFlags "") - cotire_filter_compile_flags("${_language}" "[ID]" _ignore _compilerFlags ${_targetFlags}) + cotire_filter_compile_flags("${_language}" "${_flagFilter}" _ignore _compilerFlags ${_targetFlags}) if (COTIRE_DEBUG AND _compilerFlags) message (STATUS "Target ${_target} compiler flags: ${_compilerFlags}") endif() @@ -739,7 +866,10 @@ macro (cotire_set_cmd_to_prologue _cmdVar) endif() endmacro() -function (cotire_init_compile_cmd _cmdVar _language _compilerExe _compilerArg1) +function (cotire_init_compile_cmd _cmdVar _language _compilerLauncher _compilerExe _compilerArg1) + if (NOT _compilerLauncher) + set (_compilerLauncher ${CMAKE_${_language}_COMPILER_LAUNCHER}) + endif() if (NOT _compilerExe) set (_compilerExe "${CMAKE_${_language}_COMPILER}") endif() @@ -747,7 +877,12 @@ function (cotire_init_compile_cmd _cmdVar _language _compilerExe _compilerArg1) set (_compilerArg1 ${CMAKE_${_language}_COMPILER_ARG1}) endif() string (STRIP "${_compilerArg1}" _compilerArg1) - set (${_cmdVar} "${_compilerExe}" ${_compilerArg1} PARENT_SCOPE) + if ("${CMAKE_GENERATOR}" MATCHES "Make|Ninja") + # compiler launcher is only supported for Makefile and Ninja + set (${_cmdVar} ${_compilerLauncher} "${_compilerExe}" ${_compilerArg1} PARENT_SCOPE) + else() + set (${_cmdVar} "${_compilerExe}" ${_compilerArg1} PARENT_SCOPE) + endif() endfunction() macro (cotire_add_definitions_to_cmd _cmdVar _language) @@ -760,39 +895,66 @@ macro (cotire_add_definitions_to_cmd _cmdVar _language) endforeach() endmacro() -macro (cotire_add_includes_to_cmd _cmdVar _language _includeSystemFlag _includesVar _systemIncludesVar) - foreach (_include ${${_includesVar}}) - if (WIN32 AND CMAKE_${_language}_COMPILER_ID MATCHES "MSVC|Intel") - file (TO_NATIVE_PATH "${_include}" _include) - list (APPEND ${_cmdVar} "/I${_include}") - else() - list (FIND ${_systemIncludesVar} ${_include} _index) - if(_index GREATER -1 AND NOT "${_includeSystemFlag}" STREQUAL "") - list (APPEND ${_cmdVar} "${_includeSystemFlag}${_include}") +function (cotire_add_includes_to_cmd _cmdVar _language _includesVar _systemIncludesVar) + set (_includeDirs ${${_includesVar}} ${${_systemIncludesVar}}) + if (_includeDirs) + list (REMOVE_DUPLICATES _includeDirs) + foreach (_include ${_includeDirs}) + if (WIN32 AND CMAKE_${_language}_COMPILER_ID MATCHES "MSVC|Intel") + file (TO_NATIVE_PATH "${_include}" _include) + list (APPEND ${_cmdVar} "${CMAKE_INCLUDE_FLAG_${_language}}${CMAKE_INCLUDE_FLAG_${_language}_SEP}${_include}") else() - list (APPEND ${_cmdVar} "-I${_include}") + set (_index -1) + if ("${CMAKE_INCLUDE_SYSTEM_FLAG_${_language}}" MATCHES ".+") + list (FIND ${_systemIncludesVar} "${_include}" _index) + endif() + if (_index GREATER -1) + list (APPEND ${_cmdVar} "${CMAKE_INCLUDE_SYSTEM_FLAG_${_language}}${_include}") + else() + list (APPEND ${_cmdVar} "${CMAKE_INCLUDE_FLAG_${_language}}${CMAKE_INCLUDE_FLAG_${_language}_SEP}${_include}") + endif() endif() - endif() - endforeach() -endmacro() + endforeach() + endif() + set (${_cmdVar} ${${_cmdVar}} PARENT_SCOPE) +endfunction() -macro (cotire_add_frameworks_to_cmd _cmdVar _language) +function (cotire_add_frameworks_to_cmd _cmdVar _language _includesVar _systemIncludesVar) if (APPLE) - set (_frameWorkDirs "") - foreach (_include ${ARGN}) + set (_frameworkDirs "") + foreach (_include ${${_includesVar}}) + if (IS_ABSOLUTE "${_include}" AND _include MATCHES "\\.framework$") + get_filename_component(_frameworkDir "${_include}" DIRECTORY) + list (APPEND _frameworkDirs "${_frameworkDir}") + endif() + endforeach() + set (_systemFrameworkDirs "") + foreach (_include ${${_systemIncludesVar}}) if (IS_ABSOLUTE "${_include}" AND _include MATCHES "\\.framework$") - get_filename_component(_frameWorkDir "${_include}" PATH) - list (APPEND _frameWorkDirs "${_frameWorkDir}") + get_filename_component(_frameworkDir "${_include}" DIRECTORY) + list (APPEND _systemFrameworkDirs "${_frameworkDir}") endif() endforeach() - if (_frameWorkDirs) - list (REMOVE_DUPLICATES _frameWorkDirs) - foreach (_frameWorkDir ${_frameWorkDirs}) - list (APPEND ${_cmdVar} "-F${_frameWorkDir}") + if (_systemFrameworkDirs) + list (APPEND _frameworkDirs ${_systemFrameworkDirs}) + endif() + if (_frameworkDirs) + list (REMOVE_DUPLICATES _frameworkDirs) + foreach (_frameworkDir ${_frameworkDirs}) + set (_index -1) + if ("${CMAKE_${_language}_SYSTEM_FRAMEWORK_SEARCH_FLAG}" MATCHES ".+") + list (FIND _systemFrameworkDirs "${_frameworkDir}" _index) + endif() + if (_index GREATER -1) + list (APPEND ${_cmdVar} "${CMAKE_${_language}_SYSTEM_FRAMEWORK_SEARCH_FLAG}${_frameworkDir}") + else() + list (APPEND ${_cmdVar} "${CMAKE_${_language}_FRAMEWORK_SEARCH_FLAG}${_frameworkDir}") + endif() endforeach() endif() endif() -endmacro() + set (${_cmdVar} ${${_cmdVar}} PARENT_SCOPE) +endfunction() macro (cotire_add_compile_flags_to_cmd _cmdVar) foreach (_flag ${ARGN}) @@ -1040,9 +1202,9 @@ endfunction() function (cotire_scan_includes _includesVar) set(_options "") - set(_oneValueArgs COMPILER_ID COMPILER_EXECUTABLE COMPILER_VERSION INCLUDE_SYSTEM_FLAG LANGUAGE UNPARSED_LINES) + set(_oneValueArgs COMPILER_ID COMPILER_EXECUTABLE COMPILER_ARG1 COMPILER_VERSION LANGUAGE UNPARSED_LINES SCAN_RESULT) set(_multiValueArgs COMPILE_DEFINITIONS COMPILE_FLAGS INCLUDE_DIRECTORIES SYSTEM_INCLUDE_DIRECTORIES - IGNORE_PATH INCLUDE_PATH IGNORE_EXTENSIONS INCLUDE_PRIORITY_PATH) + IGNORE_PATH INCLUDE_PATH IGNORE_EXTENSIONS INCLUDE_PRIORITY_PATH COMPILER_LAUNCHER) cmake_parse_arguments(_option "${_options}" "${_oneValueArgs}" "${_multiValueArgs}" ${ARGN}) set (_sourceFiles ${_option_UNPARSED_ARGUMENTS}) if (NOT _option_LANGUAGE) @@ -1054,12 +1216,11 @@ function (cotire_scan_includes _includesVar) if (NOT _option_COMPILER_VERSION) set (_option_COMPILER_VERSION "${CMAKE_${_option_LANGUAGE}_COMPILER_VERSION}") endif() - set (_cmd "${_option_COMPILER_EXECUTABLE}" ${_option_COMPILER_ARG1}) - cotire_init_compile_cmd(_cmd "${_option_LANGUAGE}" "${_option_COMPILER_EXECUTABLE}" "${_option_COMPILER_ARG1}") + cotire_init_compile_cmd(_cmd "${_option_LANGUAGE}" "${_option_COMPILER_LAUNCHER}" "${_option_COMPILER_EXECUTABLE}" "${_option_COMPILER_ARG1}") cotire_add_definitions_to_cmd(_cmd "${_option_LANGUAGE}" ${_option_COMPILE_DEFINITIONS}) cotire_add_compile_flags_to_cmd(_cmd ${_option_COMPILE_FLAGS}) - cotire_add_includes_to_cmd(_cmd "${_option_LANGUAGE}" "${_option_INCLUDE_SYSTEM_FLAG}" _option_INCLUDE_DIRECTORIES _option_SYSTEM_INCLUDE_DIRECTORIES) - cotire_add_frameworks_to_cmd(_cmd "${_option_LANGUAGE}" ${_option_INCLUDE_DIRECTORIES}) + cotire_add_includes_to_cmd(_cmd "${_option_LANGUAGE}" _option_INCLUDE_DIRECTORIES _option_SYSTEM_INCLUDE_DIRECTORIES) + cotire_add_frameworks_to_cmd(_cmd "${_option_LANGUAGE}" _option_INCLUDE_DIRECTORIES _option_SYSTEM_INCLUDE_DIRECTORIES) cotire_add_makedep_flags("${_option_LANGUAGE}" "${_option_COMPILER_ID}" "${_option_COMPILER_VERSION}" _cmd) # only consider existing source files for scanning set (_existingSourceFiles "") @@ -1114,6 +1275,9 @@ function (cotire_scan_includes _includesVar) if (_option_UNPARSED_LINES) set (${_option_UNPARSED_LINES} ${_unparsedLines} PARENT_SCOPE) endif() + if (_option_SCAN_RESULT) + set (${_option_SCAN_RESULT} ${_result} PARENT_SCOPE) + endif() endfunction() macro (cotire_append_undefs _contentsVar) @@ -1252,10 +1416,10 @@ endfunction() function (cotire_generate_prefix_header _prefixFile) set(_options "") - set(_oneValueArgs LANGUAGE COMPILER_EXECUTABLE COMPILER_ID COMPILER_VERSION INCLUDE_SYSTEM_FLAG) + set(_oneValueArgs LANGUAGE COMPILER_EXECUTABLE COMPILER_ARG1 COMPILER_ID COMPILER_VERSION) set(_multiValueArgs DEPENDS COMPILE_DEFINITIONS COMPILE_FLAGS INCLUDE_DIRECTORIES SYSTEM_INCLUDE_DIRECTORIES IGNORE_PATH INCLUDE_PATH - IGNORE_EXTENSIONS INCLUDE_PRIORITY_PATH) + IGNORE_EXTENSIONS INCLUDE_PRIORITY_PATH COMPILER_LAUNCHER) cmake_parse_arguments(_option "${_options}" "${_oneValueArgs}" "${_multiValueArgs}" ${ARGN}) if (NOT _option_COMPILER_ID) set (_option_COMPILER_ID "${CMAKE_${_option_LANGUAGE}_ID}") @@ -1288,24 +1452,26 @@ function (cotire_generate_prefix_header _prefixFile) set (_sourceFiles ${_option_UNPARSED_ARGUMENTS}) cotire_scan_includes(_selectedHeaders ${_sourceFiles} LANGUAGE "${_option_LANGUAGE}" + COMPILER_LAUNCHER "${_option_COMPILER_LAUNCHER}" COMPILER_EXECUTABLE "${_option_COMPILER_EXECUTABLE}" + COMPILER_ARG1 "${_option_COMPILER_ARG1}" COMPILER_ID "${_option_COMPILER_ID}" COMPILER_VERSION "${_option_COMPILER_VERSION}" COMPILE_DEFINITIONS ${_option_COMPILE_DEFINITIONS} COMPILE_FLAGS ${_option_COMPILE_FLAGS} INCLUDE_DIRECTORIES ${_option_INCLUDE_DIRECTORIES} - INCLUDE_SYSTEM_FLAG ${_option_INCLUDE_SYSTEM_FLAG} SYSTEM_INCLUDE_DIRECTORIES ${_option_SYSTEM_INCLUDE_DIRECTORIES} IGNORE_PATH ${_option_IGNORE_PATH} INCLUDE_PATH ${_option_INCLUDE_PATH} IGNORE_EXTENSIONS ${_option_IGNORE_EXTENSIONS} INCLUDE_PRIORITY_PATH ${_option_INCLUDE_PRIORITY_PATH} - UNPARSED_LINES _unparsedLines) + UNPARSED_LINES _unparsedLines + SCAN_RESULT _scanResult) cotire_generate_unity_source("${_prefixFile}" PROLOGUE ${_prologue} EPILOGUE ${_epilogue} LANGUAGE "${_option_LANGUAGE}" ${_selectedHeaders}) set (_unparsedLinesFile "${_prefixFile}.log") if (_unparsedLines) - if (COTIRE_VERBOSE OR NOT _selectedHeaders) + if (COTIRE_VERBOSE OR _scanResult OR NOT _selectedHeaders) list (LENGTH _unparsedLines _skippedLineCount) message (STATUS "${_skippedLineCount} line(s) skipped, see ${_unparsedLinesFile}") endif() @@ -1487,7 +1653,7 @@ function (cotire_add_pch_compilation_flags _language _compilerID _compilerVersio # -Kc++ process all source or unrecognized file types as C++ source files # -fsyntax-only check only for correct syntax # -Wpch-messages enable diagnostics related to pre-compiled headers (requires Intel XE 2013 Update 2) - get_filename_component(_pchDir "${_pchFile}" PATH) + get_filename_component(_pchDir "${_pchFile}" DIRECTORY) get_filename_component(_pchName "${_pchFile}" NAME) set (_xLanguage_C "c-header") set (_xLanguage_CXX "c++-header") @@ -1613,7 +1779,7 @@ function (cotire_add_prefix_pch_inclusion_flags _language _compilerID _compilerV # -include process include file as the first line of the primary source file # -Wpch-messages enable diagnostics related to pre-compiled headers (requires Intel XE 2013 Update 2) if (_pchFile) - get_filename_component(_pchDir "${_pchFile}" PATH) + get_filename_component(_pchDir "${_pchFile}" DIRECTORY) get_filename_component(_pchName "${_pchFile}" NAME) if (_flags) # append to list @@ -1647,8 +1813,8 @@ endfunction() function (cotire_precompile_prefix_header _prefixFile _pchFile _hostFile) set(_options "") - set(_oneValueArgs COMPILER_EXECUTABLE COMPILER_ID COMPILER_VERSION INCLUDE_SYSTEM_FLAG LANGUAGE) - set(_multiValueArgs COMPILE_DEFINITIONS COMPILE_FLAGS INCLUDE_DIRECTORIES SYSTEM_INCLUDE_DIRECTORIES SYS) + set(_oneValueArgs COMPILER_EXECUTABLE COMPILER_ARG1 COMPILER_ID COMPILER_VERSION LANGUAGE) + set(_multiValueArgs COMPILE_DEFINITIONS COMPILE_FLAGS INCLUDE_DIRECTORIES SYSTEM_INCLUDE_DIRECTORIES SYS COMPILER_LAUNCHER) cmake_parse_arguments(_option "${_options}" "${_oneValueArgs}" "${_multiValueArgs}" ${ARGN}) if (NOT _option_LANGUAGE) set (_option_LANGUAGE "CXX") @@ -1659,11 +1825,11 @@ function (cotire_precompile_prefix_header _prefixFile _pchFile _hostFile) if (NOT _option_COMPILER_VERSION) set (_option_COMPILER_VERSION "${CMAKE_${_option_LANGUAGE}_COMPILER_VERSION}") endif() - cotire_init_compile_cmd(_cmd "${_option_LANGUAGE}" "${_option_COMPILER_EXECUTABLE}" "${_option_COMPILER_ARG1}") + cotire_init_compile_cmd(_cmd "${_option_LANGUAGE}" "${_option_COMPILER_LAUNCHER}" "${_option_COMPILER_EXECUTABLE}" "${_option_COMPILER_ARG1}") cotire_add_definitions_to_cmd(_cmd "${_option_LANGUAGE}" ${_option_COMPILE_DEFINITIONS}) cotire_add_compile_flags_to_cmd(_cmd ${_option_COMPILE_FLAGS}) - cotire_add_includes_to_cmd(_cmd "${_option_LANGUAGE}" "${_option_INCLUDE_SYSTEM_FLAG}" _option_INCLUDE_DIRECTORIES _option_SYSTEM_INCLUDE_DIRECTORIES) - cotire_add_frameworks_to_cmd(_cmd "${_option_LANGUAGE}" ${_option_INCLUDE_DIRECTORIES}) + cotire_add_includes_to_cmd(_cmd "${_option_LANGUAGE}" _option_INCLUDE_DIRECTORIES _option_SYSTEM_INCLUDE_DIRECTORIES) + cotire_add_frameworks_to_cmd(_cmd "${_option_LANGUAGE}" _option_INCLUDE_DIRECTORIES _option_SYSTEM_INCLUDE_DIRECTORIES) cotire_add_pch_compilation_flags( "${_option_LANGUAGE}" "${_option_COMPILER_ID}" "${_option_COMPILER_VERSION}" "${_prefixFile}" "${_pchFile}" "${_hostFile}" _cmd) @@ -1710,11 +1876,33 @@ function (cotire_check_precompiled_header_support _language _target _msgVar) else() set (${_msgVar} "${_unsupportedCompiler}." PARENT_SCOPE) endif() - if (CMAKE_${_language}_COMPILER MATCHES "ccache") - if (NOT "$ENV{CCACHE_SLOPPINESS}" MATCHES "time_macros|pch_defines") - set (${_msgVar} - "ccache requires the environment variable CCACHE_SLOPPINESS to be set to \"pch_defines,time_macros\"." - PARENT_SCOPE) + get_target_property(_launcher ${_target} ${_language}_COMPILER_LAUNCHER) + if (CMAKE_${_language}_COMPILER MATCHES "ccache" OR _launcher MATCHES "ccache") + if (DEFINED ENV{CCACHE_SLOPPINESS}) + if (NOT "$ENV{CCACHE_SLOPPINESS}" MATCHES "pch_defines" OR NOT "$ENV{CCACHE_SLOPPINESS}" MATCHES "time_macros") + set (${_msgVar} + "ccache requires the environment variable CCACHE_SLOPPINESS to be set to \"pch_defines,time_macros\"." + PARENT_SCOPE) + endif() + else() + if (_launcher MATCHES "ccache") + get_filename_component(_ccacheExe "${_launcher}" REALPATH) + else() + get_filename_component(_ccacheExe "${CMAKE_${_language}_COMPILER}" REALPATH) + endif() + execute_process( + COMMAND "${_ccacheExe}" "--print-config" + WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" + RESULT_VARIABLE _result + OUTPUT_VARIABLE _ccacheConfig OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET) + if (_result OR NOT + _ccacheConfig MATCHES "sloppiness.*=.*time_macros" OR NOT + _ccacheConfig MATCHES "sloppiness.*=.*pch_defines") + set (${_msgVar} + "ccache requires configuration setting \"sloppiness\" to be set to \"pch_defines,time_macros\"." + PARENT_SCOPE) + endif() endif() endif() if (APPLE) @@ -1940,14 +2128,6 @@ function (cotire_get_prefix_header_dependencies _language _target _dependencySou # depend on target source files marked with custom COTIRE_DEPENDENCY property get_target_property(_targetSourceFiles ${_target} SOURCES) cotire_get_objects_with_property_on(_dependencySources COTIRE_DEPENDENCY SOURCE ${_targetSourceFiles}) - if (CMAKE_${_language}_COMPILER_ID MATCHES "GNU|Clang") - # GCC and clang raise a fatal error if a file is not found during preprocessing - # thus we depend on target's generated source files for prefix header generation - cotire_get_objects_with_property_on(_generatedSources GENERATED SOURCE ${_targetSourceFiles}) - if (_generatedSources) - list (APPEND _dependencySources ${_generatedSources}) - endif() - endif() if (COTIRE_DEBUG AND _dependencySources) message (STATUS "${_language} ${_target} prefix header dependencies: ${_dependencySources}") endif() @@ -1970,7 +2150,6 @@ function (cotire_generate_target_script _language _configurations _target _targe get_target_property(COTIRE_TARGET_INCLUDE_PRIORITY_PATH ${_target} COTIRE_PREFIX_HEADER_INCLUDE_PRIORITY_PATH) cotire_get_source_files_undefs(COTIRE_UNITY_SOURCE_PRE_UNDEFS COTIRE_TARGET_SOURCES_PRE_UNDEFS ${_targetSources}) cotire_get_source_files_undefs(COTIRE_UNITY_SOURCE_POST_UNDEFS COTIRE_TARGET_SOURCES_POST_UNDEFS ${_targetSources}) - string (STRIP "${CMAKE_INCLUDE_SYSTEM_FLAG_${_language}}" COTIRE_INCLUDE_SYSTEM_FLAG) set (COTIRE_TARGET_CONFIGURATION_TYPES "${_configurations}") foreach (_config ${_configurations}) string (TOUPPER "${_config}" _upperConfig) @@ -1983,6 +2162,7 @@ function (cotire_generate_target_script _language _configurations _target _targe cotire_get_source_files_compile_definitions( "${_config}" "${_language}" COTIRE_TARGET_SOURCES_COMPILE_DEFINITIONS_${_upperConfig} ${_targetSources}) endforeach() + get_target_property(COTIRE_TARGET_${_language}_COMPILER_LAUNCHER ${_target} ${_language}_COMPILER_LAUNCHER) # set up COTIRE_TARGET_SOURCES set (COTIRE_TARGET_SOURCES "") foreach (_sourceFile ${_targetSources}) @@ -1998,14 +2178,23 @@ function (cotire_generate_target_script _language _configurations _target _targe # copy variable definitions to cotire target script get_cmake_property(_vars VARIABLES) string (REGEX MATCHALL "COTIRE_[A-Za-z0-9_]+" _matchVars "${_vars}") - # remove COTIRE_VERBOSE which is passed as a CMake define on command line + # omit COTIRE_*_INIT variables + string (REGEX MATCHALL "COTIRE_[A-Za-z0-9_]+_INIT" _initVars "${_matchVars}") + if (_initVars) + list (REMOVE_ITEM _matchVars ${_initVars}) + endif() + # omit COTIRE_VERBOSE which is passed as a CMake define on command line list (REMOVE_ITEM _matchVars COTIRE_VERBOSE) set (_contents "") set (_contentsHasGeneratorExpressions FALSE) foreach (_var IN LISTS _matchVars ITEMS XCODE MSVC CMAKE_GENERATOR CMAKE_BUILD_TYPE CMAKE_CONFIGURATION_TYPES CMAKE_${_language}_COMPILER_ID CMAKE_${_language}_COMPILER_VERSION - CMAKE_${_language}_COMPILER CMAKE_${_language}_COMPILER_ARG1 + CMAKE_${_language}_COMPILER_LAUNCHER CMAKE_${_language}_COMPILER CMAKE_${_language}_COMPILER_ARG1 + CMAKE_INCLUDE_FLAG_${_language} CMAKE_INCLUDE_FLAG_${_language}_SEP + CMAKE_INCLUDE_SYSTEM_FLAG_${_language} + CMAKE_${_language}_FRAMEWORK_SEARCH_FLAG + CMAKE_${_language}_SYSTEM_FRAMEWORK_SEARCH_FLAG CMAKE_${_language}_SOURCE_FILE_EXTENSIONS) if (DEFINED ${_var}) string (REPLACE "\"" "\\\"" _value "${${_var}}") @@ -2055,18 +2244,26 @@ function (cotire_setup_pch_file_compilation _language _target _targetScript _pre if (_targetScript) cotire_set_cmd_to_prologue(_cmds) list (APPEND _cmds -P "${COTIRE_CMAKE_MODULE_FILE}" "precompile" "${_targetScript}" "${_prefixFile}" "${_pchFile}" "${_hostFile}") - file (RELATIVE_PATH _pchFileRelPath "${CMAKE_BINARY_DIR}" "${_pchFile}") + if (MSVC_IDE) + file (TO_NATIVE_PATH "${_pchFile}" _pchFileLogPath) + else() + file (RELATIVE_PATH _pchFileLogPath "${CMAKE_BINARY_DIR}" "${_pchFile}") + endif() + # make precompiled header compilation depend on the actual compiler executable used to force + # re-compilation when the compiler executable is updated. This prevents "created by a different GCC executable" + # warnings when the precompiled header is included. + get_filename_component(_realCompilerExe "${CMAKE_${_language}_COMPILER}" ABSOLUTE) if (COTIRE_DEBUG) - message (STATUS "add_custom_command: OUTPUT ${_pchFile} ${_cmds} DEPENDS ${_prefixFile} IMPLICIT_DEPENDS ${_language} ${_prefixFile}") + message (STATUS "add_custom_command: OUTPUT ${_pchFile} ${_cmds} DEPENDS ${_prefixFile} ${_realCompilerExe} IMPLICIT_DEPENDS ${_language} ${_prefixFile}") endif() set_property (SOURCE "${_pchFile}" PROPERTY GENERATED TRUE) add_custom_command( OUTPUT "${_pchFile}" COMMAND ${_cmds} - DEPENDS "${_prefixFile}" + DEPENDS "${_prefixFile}" "${_realCompilerExe}" IMPLICIT_DEPENDS ${_language} "${_prefixFile}" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" - COMMENT "Building ${_language} precompiled header ${_pchFileRelPath}" + COMMENT "Building ${_language} precompiled header ${_pchFileLogPath}" VERBATIM) endif() endif() @@ -2151,19 +2348,23 @@ function (cotire_setup_combine_command _language _targetScript _joinedFile _cmds message (STATUS "add_custom_command: OUTPUT ${_joinedFile} COMMAND ${_prefixCmd} DEPENDS ${_files}") endif() set_property (SOURCE "${_joinedFile}" PROPERTY GENERATED TRUE) - file (RELATIVE_PATH _joinedFileRelPath "${CMAKE_BINARY_DIR}" "${_joinedFile}") + if (MSVC_IDE) + file (TO_NATIVE_PATH "${_joinedFile}" _joinedFileLogPath) + else() + file (RELATIVE_PATH _joinedFileLogPath "${CMAKE_BINARY_DIR}" "${_joinedFile}") + endif() get_filename_component(_joinedFileBaseName "${_joinedFile}" NAME_WE) get_filename_component(_joinedFileExt "${_joinedFile}" EXT) if (_language AND _joinedFileBaseName MATCHES "${COTIRE_UNITY_SOURCE_FILENAME_SUFFIX}$") - set (_comment "Generating ${_language} unity source ${_joinedFileRelPath}") + set (_comment "Generating ${_language} unity source ${_joinedFileLogPath}") elseif (_language AND _joinedFileBaseName MATCHES "${COTIRE_PREFIX_HEADER_FILENAME_SUFFIX}$") if (_joinedFileExt MATCHES "^\\.c") - set (_comment "Generating ${_language} prefix source ${_joinedFileRelPath}") + set (_comment "Generating ${_language} prefix source ${_joinedFileLogPath}") else() - set (_comment "Generating ${_language} prefix header ${_joinedFileRelPath}") + set (_comment "Generating ${_language} prefix header ${_joinedFileLogPath}") endif() else() - set (_comment "Generating ${_joinedFileRelPath}") + set (_comment "Generating ${_joinedFileLogPath}") endif() add_custom_command( OUTPUT "${_joinedFile}" @@ -2264,7 +2465,11 @@ function (cotire_setup_unity_generation_commands _language _target _targetScript # CMake 3.1.0 supports generator expressions in arguments to DEPENDS set (_unityCmdDepends "${_targetConfigScript}") endif() - file (RELATIVE_PATH _unityFileRelPath "${CMAKE_BINARY_DIR}" "${_unityFile}") + if (MSVC_IDE) + file (TO_NATIVE_PATH "${_unityFile}" _unityFileLogPath) + else() + file (RELATIVE_PATH _unityFileLogPath "${CMAKE_BINARY_DIR}" "${_unityFile}") + endif() if (COTIRE_DEBUG) message (STATUS "add_custom_command: OUTPUT ${_unityFile} COMMAND ${_unityCmd} DEPENDS ${_unityCmdDepends}") endif() @@ -2272,41 +2477,59 @@ function (cotire_setup_unity_generation_commands _language _target _targetScript OUTPUT "${_unityFile}" COMMAND ${_unityCmd} DEPENDS ${_unityCmdDepends} - COMMENT "Generating ${_language} unity source ${_unityFileRelPath}" + COMMENT "Generating ${_language} unity source ${_unityFileLogPath}" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" VERBATIM) list (APPEND ${_cmdsVar} COMMAND ${_unityCmd}) endforeach() - list (LENGTH _unityFiles _numberOfUnityFiles) - if (_numberOfUnityFiles GREATER 1) - # create a joint unity file from all unity file segments - cotire_make_single_unity_source_file_path(${_language} ${_target} _unityFile) - cotire_setup_combine_command(${_language} "${_targetConfigScript}" "${_unityFile}" ${_cmdsVar} ${_unityFiles}) - endif() set (${_cmdsVar} ${${_cmdsVar}} PARENT_SCOPE) endfunction() -function (cotire_setup_prefix_generation_command _language _target _targetScript _prefixFile _unityFile _cmdsVar) +function (cotire_setup_prefix_generation_command _language _target _targetScript _prefixFile _unityFiles _cmdsVar) set (_sourceFiles ${ARGN}) set (_dependencySources "") cotire_get_prefix_header_dependencies(${_language} ${_target} _dependencySources ${_sourceFiles}) cotire_set_cmd_to_prologue(_prefixCmd) - list (APPEND _prefixCmd -P "${COTIRE_CMAKE_MODULE_FILE}" "prefix" "${_targetScript}" "${_prefixFile}" "${_unityFile}") + list (APPEND _prefixCmd -P "${COTIRE_CMAKE_MODULE_FILE}" "prefix" "${_targetScript}" "${_prefixFile}" ${_unityFiles}) set_property (SOURCE "${_prefixFile}" PROPERTY GENERATED TRUE) + # make prefix header generation depend on the actual compiler executable used to force + # re-generation when the compiler executable is updated. This prevents "file not found" + # errors for compiler version specific system header files. + get_filename_component(_realCompilerExe "${CMAKE_${_language}_COMPILER}" ABSOLUTE) if (COTIRE_DEBUG) - message (STATUS "add_custom_command: OUTPUT ${_prefixFile} COMMAND ${_prefixCmd} DEPENDS ${_unityFile} ${_dependencySources}") + message (STATUS "add_custom_command: OUTPUT ${_prefixFile} COMMAND ${_prefixCmd} DEPENDS ${_unityFile} ${_dependencySources} ${_realCompilerExe}") + endif() + if (MSVC_IDE) + file (TO_NATIVE_PATH "${_prefixFile}" _prefixFileLogPath) + else() + file (RELATIVE_PATH _prefixFileLogPath "${CMAKE_BINARY_DIR}" "${_prefixFile}") endif() - file (RELATIVE_PATH _prefixFileRelPath "${CMAKE_BINARY_DIR}" "${_prefixFile}") get_filename_component(_prefixFileExt "${_prefixFile}" EXT) if (_prefixFileExt MATCHES "^\\.c") - set (_comment "Generating ${_language} prefix source ${_prefixFileRelPath}") + set (_comment "Generating ${_language} prefix source ${_prefixFileLogPath}") else() - set (_comment "Generating ${_language} prefix header ${_prefixFileRelPath}") + set (_comment "Generating ${_language} prefix header ${_prefixFileLogPath}") + endif() + # prevent pre-processing errors upon generating the prefix header when a target's generated include file does not yet exist + # we do not add a file-level dependency for the target's generated files though, because we only want to depend on their existence + # thus we make the prefix header generation depend on a custom helper target which triggers the generation of the files + set (_preTargetName "${_target}${COTIRE_PCH_TARGET_SUFFIX}_pre") + if (TARGET ${_preTargetName}) + # custom helper target has already been generated while processing a different language + list (APPEND _dependencySources ${_preTargetName}) + else() + get_target_property(_targetSourceFiles ${_target} SOURCES) + cotire_get_objects_with_property_on(_generatedSources GENERATED SOURCE ${_targetSourceFiles}) + if (_generatedSources) + add_custom_target("${_preTargetName}" DEPENDS ${_generatedSources}) + cotire_init_target("${_preTargetName}") + list (APPEND _dependencySources ${_preTargetName}) + endif() endif() add_custom_command( OUTPUT "${_prefixFile}" "${_prefixFile}.log" COMMAND ${_prefixCmd} - DEPENDS "${_unityFile}" ${_dependencySources} + DEPENDS ${_unityFiles} ${_dependencySources} "${_realCompilerExe}" COMMENT "${_comment}" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" VERBATIM) @@ -2322,17 +2545,9 @@ function (cotire_setup_prefix_generation_from_unity_command _language _target _t else() set (_prefixSourceFile "${_prefixFile}") endif() - list (LENGTH _unityFiles _numberOfUnityFiles) - if (_numberOfUnityFiles GREATER 1) - cotire_make_single_unity_source_file_path(${_language} ${_target} _unityFile) - cotire_setup_prefix_generation_command( - ${_language} ${_target} "${_targetScript}" - "${_prefixSourceFile}" "${_unityFile}" ${_cmdsVar} ${_sourceFiles}) - else() - cotire_setup_prefix_generation_command( - ${_language} ${_target} "${_targetScript}" - "${_prefixSourceFile}" "${_unityFiles}" ${_cmdsVar} ${_sourceFiles}) - endif() + cotire_setup_prefix_generation_command( + ${_language} ${_target} "${_targetScript}" + "${_prefixSourceFile}" "${_unityFiles}" ${_cmdsVar} ${_sourceFiles}) if (CMAKE_${_language}_COMPILER_ID MATCHES "GNU|Clang") # set up generation of a prefix source file which includes the prefix header cotire_setup_combine_command(${_language} "${_targetScript}" "${_prefixFile}" _cmds ${_prefixSourceFile}) @@ -2475,10 +2690,12 @@ function (cotire_choose_target_languages _target _targetLanguagesVar _wholeTarge set (${_targetLanguagesVar} "" PARENT_SCOPE) return() endif() - if (_targetUsePCH AND "${_language}" MATCHES "^C|CXX$") - cotire_check_precompiled_header_support("${_language}" "${_target}" _disableMsg) - if (_disableMsg) - set (_targetUsePCH FALSE) + if (_targetUsePCH AND "${_language}" MATCHES "^C|CXX$" AND DEFINED CMAKE_${_language}_COMPILER_ID) + if (CMAKE_${_language}_COMPILER_ID) + cotire_check_precompiled_header_support("${_language}" "${_target}" _disableMsg) + if (_disableMsg) + set (_targetUsePCH FALSE) + endif() endif() endif() set (_sourceFiles "") @@ -2566,10 +2783,6 @@ function (cotire_compute_unity_max_number_of_includes _target _maxIncludesVar) endif() list (LENGTH _sourceFiles _numberOfSources) math (EXPR _maxIncludes "(${_numberOfSources} + ${_numberOfThreads} - 1) / ${_numberOfThreads}") - # a unity source segment must not contain less than COTIRE_MINIMUM_NUMBER_OF_TARGET_SOURCES files - if (_maxIncludes LESS ${COTIRE_MINIMUM_NUMBER_OF_TARGET_SOURCES}) - set (_maxIncludes ${COTIRE_MINIMUM_NUMBER_OF_TARGET_SOURCES}) - endif() elseif (NOT _maxIncludes MATCHES "[0-9]+") set (_maxIncludes 0) endif() @@ -2597,14 +2810,22 @@ function (cotire_process_target_language _language _configurations _target _whol endif() cotire_generate_target_script( ${_language} "${_configurations}" ${_target} _targetScript _targetConfigScript ${_unitySourceFiles}) + # set up unity files for parallel compilation cotire_compute_unity_max_number_of_includes(${_target} _maxIncludes ${_unitySourceFiles}) cotire_make_unity_source_file_paths(${_language} ${_target} ${_maxIncludes} _unityFiles ${_unitySourceFiles}) - if (NOT _unityFiles) + list (LENGTH _unityFiles _numberOfUnityFiles) + if (_numberOfUnityFiles EQUAL 0) return() + elseif (_numberOfUnityFiles GREATER 1) + cotire_setup_unity_generation_commands( + ${_language} ${_target} "${_targetScript}" "${_targetConfigScript}" "${_unityFiles}" _cmds ${_unitySourceFiles}) endif() + # set up single unity file for prefix header generation + cotire_make_single_unity_source_file_path(${_language} ${_target} _unityFile) cotire_setup_unity_generation_commands( - ${_language} ${_target} "${_targetScript}" "${_targetConfigScript}" "${_unityFiles}" _cmds ${_unitySourceFiles}) + ${_language} ${_target} "${_targetScript}" "${_targetConfigScript}" "${_unityFile}" _cmds ${_unitySourceFiles}) cotire_make_prefix_file_path(${_language} ${_target} _prefixFile) + # set up prefix header if (_prefixFile) # check for user provided prefix header files get_property(_prefixHeaderFiles TARGET ${_target} PROPERTY COTIRE_${_language}_PREFIX_HEADER_INIT) @@ -2613,7 +2834,7 @@ function (cotire_process_target_language _language _configurations _target _whol ${_language} ${_target} "${_targetConfigScript}" "${_prefixFile}" _cmds ${_prefixHeaderFiles}) else() cotire_setup_prefix_generation_from_unity_command( - ${_language} ${_target} "${_targetConfigScript}" "${_prefixFile}" "${_unityFiles}" _cmds ${_unitySourceFiles}) + ${_language} ${_target} "${_targetConfigScript}" "${_prefixFile}" "${_unityFile}" _cmds ${_unitySourceFiles}) endif() # check if selected language has enough sources at all list (LENGTH _sourceFiles _numberOfSources) @@ -2689,6 +2910,16 @@ function (cotire_setup_pch_target _languages _configurations _target) endif() endfunction() +function (cotire_filter_object_libraries _target _objectLibrariesVar) + set (_objectLibraries "") + foreach (_source ${ARGN}) + if (_source MATCHES "^\\$$") + list (APPEND _objectLibraries "${_source}") + endif() + endforeach() + set (${_objectLibrariesVar} ${_objectLibraries} PARENT_SCOPE) +endfunction() + function (cotire_collect_unity_target_sources _target _languages _unityTargetSourcesVar) get_target_property(_targetSourceFiles ${_target} SOURCES) set (_unityTargetSources ${_targetSourceFiles}) @@ -2707,9 +2938,32 @@ function (cotire_collect_unity_target_sources _target _languages _unityTargetSou list (APPEND _unityTargetSources ${_unityFiles}) endif() endforeach() + get_target_property(_linkLibrariesStrategy ${_target} COTIRE_UNITY_LINK_LIBRARIES_INIT) + if ("${_linkLibrariesStrategy}" MATCHES "^COPY_UNITY$") + cotire_filter_object_libraries(${_target} _objectLibraries ${_targetSourceFiles}) + if (_objectLibraries) + cotire_map_libraries("${_linkLibrariesStrategy}" _unityObjectLibraries ${_objectLibraries}) + list (REMOVE_ITEM _unityTargetSources ${_objectLibraries}) + list (APPEND _unityTargetSources ${_unityObjectLibraries}) + endif() + endif() set (${_unityTargetSourcesVar} ${_unityTargetSources} PARENT_SCOPE) endfunction() +function (cotire_setup_unity_target_pch_usage _languages _target) + foreach (_language ${_languages}) + get_property(_unityFiles TARGET ${_target} PROPERTY COTIRE_${_language}_UNITY_SOURCE) + if (_unityFiles) + get_property(_userPrefixFile TARGET ${_target} PROPERTY COTIRE_${_language}_PREFIX_HEADER_INIT) + get_property(_prefixFile TARGET ${_target} PROPERTY COTIRE_${_language}_PREFIX_HEADER) + if (_userPrefixFile AND _prefixFile) + # user provided prefix header must be included unconditionally by unity sources + cotire_setup_prefix_file_inclusion(${_language} ${_target} "${_prefixFile}" ${_unityFiles}) + endif() + endif() + endforeach() +endfunction() + function (cotire_setup_unity_build_target _languages _configurations _target) get_target_property(_unityTargetName ${_target} COTIRE_UNITY_TARGET_NAME) if (NOT _unityTargetName) @@ -2751,9 +3005,16 @@ function (cotire_setup_unity_build_target _languages _configurations _target) else() add_library(${_unityTargetName} ${_unityTargetSubType} EXCLUDE_FROM_ALL ${_unityTargetSources}) endif() - if (_targetAutoMoc OR _targetAutoUic OR _targetAutoRcc) - # depend on the original target's implicity generated _automoc target - add_dependencies(${_unityTargetName} ${_target}_automoc) + if ("${CMAKE_GENERATOR}" MATCHES "Visual Studio") + # depend on original target's automoc target, if it exists + if (TARGET ${_target}_automoc) + add_dependencies(${_unityTargetName} ${_target}_automoc) + endif() + else() + if (_targetAutoMoc OR _targetAutoUic OR _targetAutoRcc) + # depend on the original target's implicity generated _automoc target + add_dependencies(${_unityTargetName} ${_target}_automoc) + endif() endif() # copy output location properties set (_outputDirProperties @@ -2806,7 +3067,10 @@ function (cotire_setup_unity_build_target _languages _configurations _target) INCLUDE_DIRECTORIES INTERPROCEDURAL_OPTIMIZATION INTERPROCEDURAL_OPTIMIZATION_ POSITION_INDEPENDENT_CODE - C_VISIBILITY_PRESET CXX_VISIBILITY_PRESET VISIBILITY_INLINES_HIDDEN) + C_COMPILER_LAUNCHER CXX_COMPILER_LAUNCHER + C_INCLUDE_WHAT_YOU_USE CXX_INCLUDE_WHAT_YOU_USE + C_VISIBILITY_PRESET CXX_VISIBILITY_PRESET VISIBILITY_INLINES_HIDDEN + C_CLANG_TIDY CXX_CLANG_TIDY) # copy compile features cotire_copy_set_properites("${_configurations}" TARGET ${_target} ${_unityTargetName} C_EXTENSIONS C_STANDARD C_STANDARD_REQUIRED @@ -2829,28 +3093,41 @@ function (cotire_setup_unity_build_target _languages _configurations _target) LINK_INTERFACE_MULTIPLICITY LINK_INTERFACE_MULTIPLICITY_ LINK_SEARCH_START_STATIC LINK_SEARCH_END_STATIC STATIC_LIBRARY_FLAGS STATIC_LIBRARY_FLAGS_ - NO_SONAME SOVERSION VERSION) + NO_SONAME SOVERSION VERSION + LINK_WHAT_YOU_USE) # copy cmake stuff cotire_copy_set_properites("${_configurations}" TARGET ${_target} ${_unityTargetName} IMPLICIT_DEPENDS_INCLUDE_TRANSFORM RULE_LAUNCH_COMPILE RULE_LAUNCH_CUSTOM RULE_LAUNCH_LINK) # copy Apple platform specific stuff cotire_copy_set_properites("${_configurations}" TARGET ${_target} ${_unityTargetName} - BUNDLE BUNDLE_EXTENSION FRAMEWORK INSTALL_NAME_DIR MACOSX_BUNDLE MACOSX_BUNDLE_INFO_PLIST - MACOSX_FRAMEWORK_INFO_PLIST MACOSX_RPATH OSX_ARCHITECTURES - OSX_ARCHITECTURES_ PRIVATE_HEADER PUBLIC_HEADER RESOURCE) + BUNDLE BUNDLE_EXTENSION FRAMEWORK FRAMEWORK_VERSION INSTALL_NAME_DIR + MACOSX_BUNDLE MACOSX_BUNDLE_INFO_PLIST MACOSX_FRAMEWORK_INFO_PLIST MACOSX_RPATH + OSX_ARCHITECTURES OSX_ARCHITECTURES_ PRIVATE_HEADER PUBLIC_HEADER RESOURCE XCTEST + IOS_INSTALL_COMBINED) # copy Windows platform specific stuff cotire_copy_set_properites("${_configurations}" TARGET ${_target} ${_unityTargetName} GNUtoMS COMPILE_PDB_NAME COMPILE_PDB_NAME_ COMPILE_PDB_OUTPUT_DIRECTORY COMPILE_PDB_OUTPUT_DIRECTORY_ PDB_NAME PDB_NAME_ PDB_OUTPUT_DIRECTORY PDB_OUTPUT_DIRECTORY_ - VS_DOTNET_REFERENCES VS_GLOBAL_KEYWORD VS_GLOBAL_PROJECT_TYPES VS_GLOBAL_ROOTNAMESPACE - VS_KEYWORD VS_SCC_AUXPATH VS_SCC_LOCALPATH VS_SCC_PROJECTNAME VS_SCC_PROVIDER - VS_WINRT_EXTENSIONS VS_WINRT_REFERENCES VS_WINRT_COMPONENT - VS_DOTNET_TARGET_FRAMEWORK_VERSION WIN32_EXECUTABLE) + VS_DESKTOP_EXTENSIONS_VERSION VS_DOTNET_REFERENCES VS_DOTNET_TARGET_FRAMEWORK_VERSION + VS_GLOBAL_KEYWORD VS_GLOBAL_PROJECT_TYPES VS_GLOBAL_ROOTNAMESPACE + VS_IOT_EXTENSIONS_VERSION VS_IOT_STARTUP_TASK + VS_KEYWORD VS_MOBILE_EXTENSIONS_VERSION + VS_SCC_AUXPATH VS_SCC_LOCALPATH VS_SCC_PROJECTNAME VS_SCC_PROVIDER + VS_WINDOWS_TARGET_PLATFORM_MIN_VERSION + VS_WINRT_COMPONENT VS_WINRT_EXTENSIONS VS_WINRT_REFERENCES + WIN32_EXECUTABLE WINDOWS_EXPORT_ALL_SYMBOLS + DEPLOYMENT_REMOTE_DIRECTORY VS_CONFIGURATION_TYPE + VS_SDK_REFERENCES) # copy Android platform specific stuff cotire_copy_set_properites("${_configurations}" TARGET ${_target} ${_unityTargetName} - ANDROID_API ANDROID_API_MIN ANDROID_GUI) + ANDROID_API ANDROID_API_MIN ANDROID_GUI + ANDROID_ANT_ADDITIONAL_OPTIONS ANDROID_ARCH ANDROID_ASSETS_DIRECTORIES + ANDROID_JAR_DEPENDENCIES ANDROID_JAR_DIRECTORIES ANDROID_JAVA_SOURCE_DIR + ANDROID_NATIVE_LIB_DEPENDENCIES ANDROID_NATIVE_LIB_DIRECTORIES + ANDROID_PROCESS_MAX ANDROID_PROGUARD ANDROID_PROGUARD_CONFIG_PATH + ANDROID_SECURE_PROPS_PATH ANDROID_SKIP_ANT_STEP ANDROID_STL_TYPE) # use output name from original target get_target_property(_targetOutputName ${_unityTargetName} OUTPUT_NAME) if (NOT _targetOutputName) @@ -2880,10 +3157,16 @@ function (cotire_target _target) if (NOT _option_CONFIGURATIONS) cotire_get_configuration_types(_option_CONFIGURATIONS) endif() - # trivial checks - get_target_property(_imported ${_target} IMPORTED) - if (_imported) - message (WARNING "cotire: imported target ${_target} cannot be cotired.") + # check if cotire can be applied to target at all + cotire_is_target_supported(${_target} _isSupported) + if (NOT _isSupported) + get_target_property(_imported ${_target} IMPORTED) + get_target_property(_targetType ${_target} TYPE) + if (_imported) + message (WARNING "cotire: imported ${_targetType} target ${_target} cannot be cotired.") + else() + message (STATUS "cotire: ${_targetType} target ${_target} cannot be cotired.") + endif() return() endif() # resolve alias @@ -2935,6 +3218,9 @@ function (cotire_target _target) if (_targetUsePCH) cotire_setup_target_pch_usage("${_targetLanguages}" ${_target} ${_wholeTarget} ${_cmds}) cotire_setup_pch_target("${_targetLanguages}" "${_option_CONFIGURATIONS}" ${_target}) + if (_targetAddSCU) + cotire_setup_unity_target_pch_usage("${_targetLanguages}" ${_target}) + endif() endif() get_target_property(_targetAddCleanTarget ${_target} COTIRE_ADD_CLEAN) if (_targetAddCleanTarget) @@ -2945,11 +3231,35 @@ endfunction(cotire_target) function (cotire_map_libraries _strategy _mappedLibrariesVar) set (_mappedLibraries "") foreach (_library ${ARGN}) - if (TARGET "${_library}" AND "${_strategy}" MATCHES "COPY_UNITY") - # use target's corresponding unity target, if available - get_target_property(_libraryUnityTargetName ${_library} COTIRE_UNITY_TARGET_NAME) - if (TARGET "${_libraryUnityTargetName}") - list (APPEND _mappedLibraries "${_libraryUnityTargetName}") + if (_library MATCHES "^\\$$") + set (_libraryName "${CMAKE_MATCH_1}") + set (_linkOnly TRUE) + set (_objectLibrary FALSE) + elseif (_library MATCHES "^\\$$") + set (_libraryName "${CMAKE_MATCH_1}") + set (_linkOnly FALSE) + set (_objectLibrary TRUE) + else() + set (_libraryName "${_library}") + set (_linkOnly FALSE) + set (_objectLibrary FALSE) + endif() + if ("${_strategy}" MATCHES "COPY_UNITY") + cotire_is_target_supported(${_libraryName} _isSupported) + if (_isSupported) + # use target's corresponding unity target, if available + get_target_property(_libraryUnityTargetName ${_libraryName} COTIRE_UNITY_TARGET_NAME) + if (TARGET "${_libraryUnityTargetName}") + if (_linkOnly) + list (APPEND _mappedLibraries "$") + elseif (_objectLibrary) + list (APPEND _mappedLibraries "$") + else() + list (APPEND _mappedLibraries "${_libraryUnityTargetName}") + endif() + else() + list (APPEND _mappedLibraries "${_library}") + endif() else() list (APPEND _mappedLibraries "${_library}") endif() @@ -2962,6 +3272,10 @@ function (cotire_map_libraries _strategy _mappedLibrariesVar) endfunction() function (cotire_target_link_libraries _target) + cotire_is_target_supported(${_target} _isSupported) + if (NOT _isSupported) + return() + endif() get_target_property(_unityTargetName ${_target} COTIRE_UNITY_TARGET_NAME) if (TARGET "${_unityTargetName}") get_target_property(_linkLibrariesStrategy ${_target} COTIRE_UNITY_LINK_LIBRARIES_INIT) @@ -2969,21 +3283,21 @@ function (cotire_target_link_libraries _target) message (STATUS "unity target ${_unityTargetName} link strategy: ${_linkLibrariesStrategy}") endif() if ("${_linkLibrariesStrategy}" MATCHES "^(COPY|COPY_UNITY)$") - set (_unityLinkLibraries "") get_target_property(_linkLibraries ${_target} LINK_LIBRARIES) if (_linkLibraries) - list (APPEND _unityLinkLibraries ${_linkLibraries}) + cotire_map_libraries("${_linkLibrariesStrategy}" _unityLinkLibraries ${_linkLibraries}) + set_target_properties(${_unityTargetName} PROPERTIES LINK_LIBRARIES "${_unityLinkLibraries}") + if (COTIRE_DEBUG) + message (STATUS "unity target ${_unityTargetName} link libraries: ${_unityLinkLibraries}") + endif() endif() get_target_property(_interfaceLinkLibraries ${_target} INTERFACE_LINK_LIBRARIES) if (_interfaceLinkLibraries) - list (APPEND _unityLinkLibraries ${_interfaceLinkLibraries}) - endif() - cotire_map_libraries("${_linkLibrariesStrategy}" _unityLinkLibraries ${_unityLinkLibraries}) - if (COTIRE_DEBUG) - message (STATUS "unity target ${_unityTargetName} libraries: ${_unityLinkLibraries}") - endif() - if (_unityLinkLibraries) - target_link_libraries(${_unityTargetName} ${_unityLinkLibraries}) + cotire_map_libraries("${_linkLibrariesStrategy}" _unityLinkInterfaceLibraries ${_interfaceLinkLibraries}) + set_target_properties(${_unityTargetName} PROPERTIES INTERFACE_LINK_LIBRARIES "${_unityLinkInterfaceLibraries}") + if (COTIRE_DEBUG) + message (STATUS "unity target ${_unityTargetName} interface link libraries: ${_unityLinkInterfaceLibraries}") + endif() endif() endif() endif() @@ -2998,7 +3312,7 @@ function (cotire_cleanup _binaryDir _cotireIntermediateDirName _targetName) # filter files in intermediate directory set (_filesToRemove "") foreach (_file ${_cotireFiles}) - get_filename_component(_dir "${_file}" PATH) + get_filename_component(_dir "${_file}" DIRECTORY) get_filename_component(_dirName "${_dir}" NAME) if ("${_dirName}" STREQUAL "${_cotireIntermediateDirName}") list (APPEND _filesToRemove "${_file}") @@ -3122,7 +3436,6 @@ if (CMAKE_SCRIPT_MODE_FILE) message (STATUS "COTIRE_BUILD_TYPE=${COTIRE_BUILD_TYPE} not cotired (${COTIRE_TARGET_CONFIGURATION_TYPES})") endif() set (_sources "") - set (_sourceLocations "") set (_sourcesDefinitions "") endif() set (_targetPreUndefs ${COTIRE_TARGET_PRE_UNDEFS}) @@ -3171,6 +3484,7 @@ if (CMAKE_SCRIPT_MODE_FILE) cotire_generate_prefix_header( "${COTIRE_ARGV3}" ${_files} + COMPILER_LAUNCHER "${COTIRE_TARGET_${COTIRE_TARGET_LANGUAGE}_COMPILER_LAUNCHER}" COMPILER_EXECUTABLE "${CMAKE_${COTIRE_TARGET_LANGUAGE}_COMPILER}" COMPILER_ARG1 ${CMAKE_${COTIRE_TARGET_LANGUAGE}_COMPILER_ARG1} COMPILER_ID "${CMAKE_${COTIRE_TARGET_LANGUAGE}_COMPILER_ID}" @@ -3180,7 +3494,6 @@ if (CMAKE_SCRIPT_MODE_FILE) INCLUDE_PATH ${COTIRE_TARGET_INCLUDE_PATH} IGNORE_EXTENSIONS "${CMAKE_${COTIRE_TARGET_LANGUAGE}_SOURCE_FILE_EXTENSIONS};${COTIRE_ADDITIONAL_PREFIX_HEADER_IGNORE_EXTENSIONS}" INCLUDE_PRIORITY_PATH ${COTIRE_TARGET_INCLUDE_PRIORITY_PATH} - INCLUDE_SYSTEM_FLAG "${COTIRE_INCLUDE_SYSTEM_FLAG}" INCLUDE_DIRECTORIES ${_includeDirs} SYSTEM_INCLUDE_DIRECTORIES ${_systemIncludeDirs} COMPILE_DEFINITIONS ${_compileDefinitions} @@ -3198,12 +3511,12 @@ if (CMAKE_SCRIPT_MODE_FILE) cotire_precompile_prefix_header( "${COTIRE_ARGV3}" "${COTIRE_ARGV4}" "${COTIRE_ARGV5}" + COMPILER_LAUNCHER "${COTIRE_TARGET_${COTIRE_TARGET_LANGUAGE}_COMPILER_LAUNCHER}" COMPILER_EXECUTABLE "${CMAKE_${COTIRE_TARGET_LANGUAGE}_COMPILER}" COMPILER_ARG1 ${CMAKE_${COTIRE_TARGET_LANGUAGE}_COMPILER_ARG1} COMPILER_ID "${CMAKE_${COTIRE_TARGET_LANGUAGE}_COMPILER_ID}" COMPILER_VERSION "${CMAKE_${COTIRE_TARGET_LANGUAGE}_COMPILER_VERSION}" LANGUAGE "${COTIRE_TARGET_LANGUAGE}" - INCLUDE_SYSTEM_FLAG "${COTIRE_INCLUDE_SYSTEM_FLAG}" INCLUDE_DIRECTORIES ${_includeDirs} SYSTEM_INCLUDE_DIRECTORIES ${_systemIncludeDirs} COMPILE_DEFINITIONS ${_compileDefinitions} @@ -3324,9 +3637,9 @@ else() endif() if (MSVC) # MSVC default PCH memory scaling factor of 100 percent (75 MB) is too small for template heavy C++ code - # use a bigger default factor of 140 percent (105 MB) + # use a bigger default factor of 170 percent (128 MB) if (NOT DEFINED COTIRE_PCH_MEMORY_SCALING_FACTOR) - set (COTIRE_PCH_MEMORY_SCALING_FACTOR "140") + set (COTIRE_PCH_MEMORY_SCALING_FACTOR "170") endif() endif() if (NOT COTIRE_UNITY_BUILD_TARGET_SUFFIX) diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..25fe0bb418 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,84 @@ +FROM phusion/baseimage:0.10.1 +MAINTAINER The bitshares decentralized organisation + +ENV LANG=en_US.UTF-8 +RUN \ + apt-get update -y && \ + apt-get install -y \ + g++ \ + autoconf \ + cmake \ + git \ + libbz2-dev \ + libcurl4-openssl-dev \ + libssl-dev \ + libncurses-dev \ + libboost-thread-dev \ + libboost-iostreams-dev \ + libboost-date-time-dev \ + libboost-system-dev \ + libboost-filesystem-dev \ + libboost-program-options-dev \ + libboost-chrono-dev \ + libboost-test-dev \ + libboost-context-dev \ + libboost-regex-dev \ + libboost-coroutine-dev \ + libtool \ + doxygen \ + ca-certificates \ + fish \ + && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +ADD . /bitshares-core +WORKDIR /bitshares-core + +# Compile +RUN \ + ( git submodule sync --recursive || \ + find `pwd` -type f -name .git | \ + while read f; do \ + rel="$(echo "${f#$PWD/}" | sed 's=[^/]*/=../=g')"; \ + sed -i "s=: .*/.git/=: $rel/=" "$f"; \ + done && \ + git submodule sync --recursive ) && \ + git submodule update --init --recursive && \ + cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DGRAPHENE_DISABLE_UNITY_BUILD=ON \ + . && \ + make witness_node cli_wallet get_dev_key && \ + install -s programs/witness_node/witness_node programs/genesis_util/get_dev_key programs/cli_wallet/cli_wallet /usr/local/bin && \ + # + # Obtain version + mkdir /etc/bitshares && \ + git rev-parse --short HEAD > /etc/bitshares/version && \ + cd / && \ + rm -rf /bitshares-core + +# Home directory $HOME +WORKDIR / +RUN useradd -s /bin/bash -m -d /var/lib/bitshares bitshares +ENV HOME /var/lib/bitshares +RUN chown bitshares:bitshares -R /var/lib/bitshares + +# Volume +VOLUME ["/var/lib/bitshares", "/etc/bitshares"] + +# rpc service: +EXPOSE 8090 +# p2p service: +EXPOSE 1776 + +# default exec/config files +ADD docker/default_config.ini /etc/bitshares/config.ini +ADD docker/bitsharesentry.sh /usr/local/bin/bitsharesentry.sh +RUN chmod a+x /usr/local/bin/bitsharesentry.sh + +# Make Docker send SIGINT instead of SIGTERM to the daemon +STOPSIGNAL SIGINT + +# default execute entry +CMD ["/usr/local/bin/bitsharesentry.sh"] diff --git a/Doxyfile b/Doxyfile index 75931ef9a4..c3c45a913c 100644 --- a/Doxyfile +++ b/Doxyfile @@ -32,19 +32,19 @@ DOXYFILE_ENCODING = UTF-8 # title of most generated pages and in a few other places. # The default value is: My Project. -PROJECT_NAME = "Graphene" +PROJECT_NAME = "Bitshares-Core" # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = +PROJECT_NUMBER = "2.0.180823" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. -PROJECT_BRIEF = +PROJECT_BRIEF = "BitShares blockchain implementation and command-line interface software" # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 @@ -758,7 +758,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = doc/main.dox libraries/chain libraries/chain/db libraries/app libraries/wallet +INPUT = README.md doc/main.dox libraries/chain libraries/chain/db libraries/app libraries/wallet # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -894,7 +894,7 @@ FILTER_SOURCE_PATTERNS = # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. -USE_MDFILE_AS_MAINPAGE = +USE_MDFILE_AS_MAINPAGE = "README.md" #--------------------------------------------------------------------------- # Configuration options related to source browsing diff --git a/HEADER b/HEADER deleted file mode 100644 index 0a70ec1dc2..0000000000 --- a/HEADER +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) 2015, Cryptonomex, Inc. - * All rights reserved. - * - * This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and - * the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification, - * are permitted until September 8, 2015, provided that the following conditions are met: - * - * 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ diff --git a/LICENSE.md b/LICENSE.txt similarity index 89% rename from LICENSE.md rename to LICENSE.txt index 0415b22c9b..b65abcc737 100644 --- a/LICENSE.md +++ b/LICENSE.txt @@ -1,4 +1,5 @@ -Copyright (c) 2015 Cryptonomex, Inc., and contributors. +Copyright (c) 2015-2016 Cryptonomex Inc. +Copyright (c) 2015-2017 contributors The MIT License @@ -19,4 +20,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/README-docker.md b/README-docker.md new file mode 100644 index 0000000000..c11fc89309 --- /dev/null +++ b/README-docker.md @@ -0,0 +1,125 @@ +# Docker Container + +This repository comes with built-in Dockerfile to support docker +containers. This README serves as documentation. + +## Dockerfile Specifications + +The `Dockerfile` performs the following steps: + +1. Obtain base image (phusion/baseimage:0.10.1) +2. Install required dependencies using `apt-get` +3. Add bitshares-core source code into container +4. Update git submodules +5. Perform `cmake` with build type `Release` +6. Run `make` and `make_install` (this will install binaries into `/usr/local/bin` +7. Purge source code off the container +8. Add a local bitshares user and set `$HOME` to `/var/lib/bitshares` +9. Make `/var/lib/bitshares` and `/etc/bitshares` a docker *volume* +10. Expose ports `8090` and `1776` +11. Add default config from `docker/default_config.ini` and entry point script +12. Run entry point script by default + +The entry point simplifies the use of parameters for the `witness_node` +(which is run by default when spinning up the container). + +### Supported Environmental Variables + +* `$BITSHARESD_SEED_NODES` +* `$BITSHARESD_RPC_ENDPOINT` +* `$BITSHARESD_PLUGINS` +* `$BITSHARESD_REPLAY` +* `$BITSHARESD_RESYNC` +* `$BITSHARESD_P2P_ENDPOINT` +* `$BITSHARESD_WITNESS_ID` +* `$BITSHARESD_PRIVATE_KEY` +* `$BITSHARESD_TRACK_ACCOUNTS` +* `$BITSHARESD_PARTIAL_OPERATIONS` +* `$BITSHARESD_MAX_OPS_PER_ACCOUNT` +* `$BITSHARESD_ES_NODE_URL` +* `$BITSHARESD_TRUSTED_NODE` + +### Default config + +The default configuration is: + + p2p-endpoint = 0.0.0.0:9090 + rpc-endpoint = 0.0.0.0:8090 + bucket-size = [60,300,900,1800,3600,14400,86400] + history-per-size = 1000 + max-ops-per-account = 1000 + partial-operations = true + +# Docker Compose + +With docker compose, multiple nodes can be managed with a single +`docker-compose.yaml` file: + + version: '3' + services: + main: + # Image to run + image: bitshares/bitshares-core:latest + # + volumes: + - ./docker/conf/:/etc/bitshares/ + # Optional parameters + environment: + - BITSHARESD_ARGS=--help + + + version: '3' + services: + fullnode: + # Image to run + image: bitshares/bitshares-core:latest + environment: + # Optional parameters + environment: + - BITSHARESD_ARGS=--help + ports: + - "0.0.0.0:8090:8090" + volumes: + - "bitshares-fullnode:/var/lib/bitshares" + + +# Docker Hub + +This container is properly registered with docker hub under the name: + +* [bitshares/bitshares-core](https://hub.docker.com/r/bitshares/bitshares-core/) + +Going forward, every release tag as well as all pushes to `develop` and +`testnet` will be built into ready-to-run containers, there. + +# Docker Compose + +One can use docker compose to setup a trusted full node together with a +delayed node like this: + +``` +version: '3' +services: + + fullnode: + image: bitshares/bitshares-core:latest + ports: + - "0.0.0.0:8090:8090" + volumes: + - "bitshares-fullnode:/var/lib/bitshares" + + delayed_node: + image: bitshares/bitshares-core:latest + environment: + - 'BITSHARESD_PLUGINS=delayed_node witness' + - 'BITSHARESD_TRUSTED_NODE=ws://fullnode:8090' + ports: + - "0.0.0.0:8091:8090" + volumes: + - "bitshares-delayed_node:/var/lib/bitshares" + links: + - fullnode + +volumes: + bitshares-fullnode: +``` diff --git a/README.md b/README.md index 726d66b306..a5c719aaa0 100644 --- a/README.md +++ b/README.md @@ -1,95 +1,116 @@ -Intro for new developers ------------------------- +BitShares Core +============== -This is a quick introduction to get new developers up to speed on Graphene. +[Build Status](https://travis-ci.org/bitshares/bitshares-core/branches): -Starting Graphene ------------------ +`master` | `develop` | `hardfork` | `testnet` | `bitshares-fc` + --- | --- | --- | --- | --- + [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=master)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=develop)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=hardfork)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=testnet)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-fc.svg?branch=master)](https://travis-ci.org/bitshares/bitshares-fc) -For Ubuntu 14.04 LTS users, see this link first: - https://github.com/cryptonomex/graphene/wiki/build-ubuntu -and then proceed with: +* [Getting Started](#getting-started) +* [Support](#support) +* [Using the API](#using-the-api) +* [Accessing restricted API's](#accessing-restricted-apis) +* [FAQ](#faq) +* [License](#license) - git clone https://github.com/cryptonomex/graphene.git - cd graphene +BitShares Core is the BitShares blockchain implementation and command-line interface. +The web wallet is [BitShares UI](https://github.com/bitshares/bitshares-ui). + +Visit [BitShares.org](https://bitshares.org/) to learn about BitShares and join the community at [BitSharesTalk.org](https://bitsharestalk.org/). + +Information for developers can be found in the [Bitshares Developer Portal](https://dev.bitshares.works/). Users interested in how bitshares works can go to the [BitShares Documentation](https://how.bitshares.works/) site. + +For security issues and bug bounty program please visit [Hack the DEX](https://hackthedex.io). + +Getting Started +--------------- +Build instructions and additional documentation are available in the +[wiki](https://github.com/bitshares/bitshares-core/wiki). + +We recommend building on Ubuntu 16.04 LTS (64-bit) + +**Build Dependencies**: + + sudo apt-get update + sudo apt-get install autoconf cmake make automake libtool git libboost-all-dev libssl-dev g++ libcurl4-openssl-dev + +**Build Script:** + + git clone https://github.com/bitshares/bitshares-core.git + cd bitshares-core + git checkout master # may substitute "master" with current release tag git submodule update --init --recursive - cmake -DCMAKE_BUILD_TYPE=Debug . + cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo . make + +**Upgrade Script** (prepend to the Build Script above if you built a prior release): + + git remote set-url origin https://github.com/bitshares/bitshares-core.git + git checkout master + git remote set-head origin --auto + git pull + git submodule update --init --recursive # this command may fail + git submodule sync --recursive + git submodule update --init --recursive + +**NOTE:** BitShares requires a [Boost](http://www.boost.org/) version in the range [1.57 - 1.65.1]. Versions earlier than +1.57 or newer than 1.65.1 are NOT supported. If your system's Boost version is newer, then you will need to manually build +an older version of Boost and specify it to CMake using `DBOOST_ROOT`. + +**NOTE:** BitShares requires a 64-bit operating system to build, and will not build on a 32-bit OS. + +**NOTE:** BitShares now supports Ubuntu 18.04 LTS + +**NOTE:** BitShares now supports OpenSSL 1.1.0 + +**After Building**, the `witness_node` can be launched with: + ./programs/witness_node/witness_node -This will launch the witness node. If you would like to launch the command-line wallet, you must first specify a port -for communication with the witness node. To do this, add text to `witness_node_data_dir/config.ini` as follows, then -restart the node: +The node will automatically create a data directory including a config file. It may take several hours to fully synchronize +the blockchain. After syncing, you can exit the node using Ctrl+C and setup the command-line wallet by editing +`witness_node_data_dir/config.ini` as follows: rpc-endpoint = 127.0.0.1:8090 -Then, in a separate terminal window, start the command-line wallet `cli_wallet`: +**IMPORTANT:** By default the witness node will start in reduced memory mode by using some of the commands detailed in [Memory reduction for nodes](https://github.com/bitshares/bitshares-core/wiki/Memory-reduction-for-nodes). +In order to run a full node with all the account history you need to remove `partial-operations` and `max-ops-per-account` from your config file. Please note that currently(2018-10-17) a full node will need more than 160GB of RAM to operate and required memory is growing fast. Consider the following table as minimal requirements before running a node: + +| Default | Full | Minimal | ElasticSearch +| --- | --- | --- | --- +| 100G HDD, 16G RAM | 640G SSD, 64G RAM * | 80G HDD, 4G RAM | 500G SSD, 32G RAM + +\* For this setup, allocate at least 500GB of SSD as swap. + +After starting the witness node again, in a separate terminal you can run: ./programs/cli_wallet/cli_wallet -To set your iniital password to 'password' use: +Set your inital password: - >>> set_password password - >>> unlock password + >>> set_password + >>> unlock To import your initial balance: - >>> import_balance nathan [5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3] true + >>> import_balance [] true If you send private keys over this connection, `rpc-endpoint` should be bound to localhost for security. -A list of CLI wallet commands is available -[here](https://github.com/cryptonomex/graphene/blob/master/libraries/wallet/include/graphene/wallet/wallet.hpp). +Use `help` to see all available wallet commands. Source definition and listing of all commands is available +[here](https://github.com/bitshares/bitshares-core/blob/master/libraries/wallet/include/graphene/wallet/wallet.hpp). -Code coverage testing ---------------------- +Support +------- +Technical support is available in the [BitSharesTalk technical support subforum](https://bitsharestalk.org/index.php?board=45.0). -Check how much code is covered by unit tests, using gcov/lcov (see http://ltp.sourceforge.net/coverage/lcov.php ). +BitShares Core bugs can be reported directly to the [issue tracker](https://github.com/bitshares/bitshares-core/issues). - cmake -D ENABLE_COVERAGE_TESTING=true -D CMAKE_BUILD_TYPE=Debug . - make - lcov --capture --initial --directory . --output-file base.info --no-external - libraries/fc/bloom_test - libraries/fc/task_cancel_test - libraries/fc/api - libraries/fc/blind - libraries/fc/ecc_test test - libraries/fc/real128_test - libraries/fc/lzma_test README.md - libraries/fc/ntp_test - tests/intense_test - tests/app_test - tests/chain_bench - tests/chain_test - tests/performance_test - lcov --capture --directory . --output-file test.info --no-external - lcov --add-tracefile base.info --add-tracefile test.info --output-file total.info - lcov -o interesting.info -r total.info libraries/fc/vendor/\* libraries/fc/tests/\* tests/\* - mkdir -p lcov - genhtml interesting.info --output-directory lcov --prefix `pwd` - -Now open `lcov/index.html` in a browser. - -Unit testing ------------- - -We use the Boost unit test framework for unit testing. Most unit -tests reside in the `chain_test` build target. - -Witness node ------------- - -The role of the witness node is to broadcast transactions, download blocks, and optionally sign them. - -``` -./witness_node --rpc-endpoint 127.0.0.1:8090 --enable-stale-production -w '"1.6.0"' '"1.6.1"' '"1.6.2"' '"1.6.3"' '"1.6.4"' '"1.6.5"' '"1.6.6"' '"1.6.7"' '"1.6.8"' '"1.6.9"' '"1.6.10"' '"1.6.11"' '"1.6.12"' '"1.6.13"' '"1.6.14"' '"1.6.15"' '"1.6.16"' '"1.6.17"' '"1.6.18"' '"1.6.19"' '"1.6.20"' '"1.6.21"' '"1.6.22"' '"1.6.23"' '"1.6.24"' '"1.6.25"' '"1.6.26"' '"1.6.27"' '"1.6.28"' '"1.6.29"' '"1.6.30"' '"1.6.31"' '"1.6.32"' '"1.6.33"' '"1.6.34"' '"1.6.35"' '"1.6.36"' '"1.6.37"' '"1.6.38"' '"1.6.39"' '"1.6.40"' '"1.6.41"' '"1.6.42"' '"1.6.43"' '"1.6.44"' '"1.6.45"' '"1.6.46"' '"1.6.47"' '"1.6.48"' '"1.6.49"' '"1.6.50"' '"1.6.51"' '"1.6.52"' '"1.6.53"' '"1.6.54"' '"1.6.55"' '"1.6.56"' '"1.6.57"' '"1.6.58"' '"1.6.59"' '"1.6.60"' '"1.6.61"' '"1.6.62"' '"1.6.63"' '"1.6.64"' '"1.6.65"' '"1.6.66"' '"1.6.67"' '"1.6.68"' '"1.6.69"' '"1.6.70"' '"1.6.71"' '"1.6.72"' '"1.6.73"' '"1.6.74"' '"1.6.75"' '"1.6.76"' '"1.6.77"' '"1.6.78"' '"1.6.79"' '"1.6.80"' '"1.6.81"' '"1.6.82"' '"1.6.83"' '"1.6.84"' '"1.6.85"' '"1.6.86"' '"1.6.87"' '"1.6.88"' '"1.6.89"' '"1.6.90"' '"1.6.91"' '"1.6.92"' '"1.6.93"' '"1.6.94"' '"1.6.95"' '"1.6.96"' '"1.6.97"' '"1.6.98"' '"1.6.99"' '"1.6.100"' -``` - -Running specific tests ----------------------- - -- `tests/chain_tests -t block_tests/name_of_test` +BitShares UI bugs should be reported to the [UI issue tracker](https://github.com/bitshares/bitshares-ui/issues) + +Up to date online Doxygen documentation can be found at [Doxygen](https://bitshares.org/doxygen/hierarchy.html) Using the API ------------- @@ -118,7 +139,7 @@ API 0 is accessible using regular JSON-RPC: Accessing restricted API's -------------------------- -You can restrict API's to particular users by specifying an `apiaccess` file in `config.ini`. Here is an example `apiaccess` file which allows +You can restrict API's to particular users by specifying an `api-access` file in `config.ini` or by using the `--api-access /full/path/to/api-access.json` startup node command. Here is an example `api-access` file which allows user `bytemaster` with password `supersecret` to access four different API's, while allowing any other user to access the three public API's necessary to use the wallet: @@ -162,13 +183,8 @@ If you want information which is not available from an API, it might be availabl from the [database](https://bitshares.github.io/doxygen/classgraphene_1_1chain_1_1database.html); it is fairly simple to write API methods to expose database methods. -Running private testnet ------------------------ - -See the [documentation](https://github.com/cryptonomex/graphene/wiki/private-testnet) if you want to run a private testnet. - -Questions ---------- +FAQ +--- - Is there a way to generate help with parameter names and method descriptions? @@ -207,7 +223,7 @@ Questions The second number specifies the *type*. The type of the object determines what fields it has. For a complete list of type ID's, see `enum object_type` and `enum impl_object_type` in - [types.hpp](https://github.com/cryptonomex/graphene/blob/master/libraries/chain/include/graphene/chain/protocol/types.hpp). + [types.hpp](https://github.com/bitshares/bitshares-2/blob/bitshares/libraries/chain/include/graphene/chain/protocol/types.hpp). The third number specifies the *instance*. The instance of the object is different for each individual object. @@ -237,3 +253,8 @@ Questions less fine if your `witness_node` allows the general public to control which p2p nodes it's connecting to. Therefore the API to add p2p connections needs to be set up with proper access controls. + +License +------- +BitShares Core is under the MIT license. See [LICENSE](https://github.com/bitshares/bitshares-core/blob/master/LICENSE.txt) +for more information. diff --git a/docker/bitsharesentry.sh b/docker/bitsharesentry.sh new file mode 100644 index 0000000000..9de94f1cc4 --- /dev/null +++ b/docker/bitsharesentry.sh @@ -0,0 +1,87 @@ +#!/bin/bash +BITSHARESD="/usr/local/bin/witness_node" + +# For blockchain download +VERSION=`cat /etc/bitshares/version` + +## Supported Environmental Variables +# +# * $BITSHARESD_SEED_NODES +# * $BITSHARESD_RPC_ENDPOINT +# * $BITSHARESD_PLUGINS +# * $BITSHARESD_REPLAY +# * $BITSHARESD_RESYNC +# * $BITSHARESD_P2P_ENDPOINT +# * $BITSHARESD_WITNESS_ID +# * $BITSHARESD_PRIVATE_KEY +# * $BITSHARESD_TRACK_ACCOUNTS +# * $BITSHARESD_PARTIAL_OPERATIONS +# * $BITSHARESD_MAX_OPS_PER_ACCOUNT +# * $BITSHARESD_ES_NODE_URL +# * $BITSHARESD_TRUSTED_NODE +# + +ARGS="" +# Translate environmental variables +if [[ ! -z "$BITSHARESD_SEED_NODES" ]]; then + for NODE in $BITSHARESD_SEED_NODES ; do + ARGS+=" --seed-node=$NODE" + done +fi +if [[ ! -z "$BITSHARESD_RPC_ENDPOINT" ]]; then + ARGS+=" --rpc-endpoint=${BITSHARESD_RPC_ENDPOINT}" +fi + +if [[ ! -z "$BITSHARESD_REPLAY" ]]; then + ARGS+=" --replay-blockchain" +fi + +if [[ ! -z "$BITSHARESD_RESYNC" ]]; then + ARGS+=" --resync-blockchain" +fi + +if [[ ! -z "$BITSHARESD_P2P_ENDPOINT" ]]; then + ARGS+=" --p2p-endpoint=${BITSHARESD_P2P_ENDPOINT}" +fi + +if [[ ! -z "$BITSHARESD_WITNESS_ID" ]]; then + ARGS+=" --witness-id=$BITSHARESD_WITNESS_ID" +fi + +if [[ ! -z "$BITSHARESD_PRIVATE_KEY" ]]; then + ARGS+=" --private-key=$BITSHARESD_PRIVATE_KEY" +fi + +if [[ ! -z "$BITSHARESD_TRACK_ACCOUNTS" ]]; then + for ACCOUNT in $BITSHARESD_TRACK_ACCOUNTS ; do + ARGS+=" --track-account=$ACCOUNT" + done +fi + +if [[ ! -z "$BITSHARESD_PARTIAL_OPERATIONS" ]]; then + ARGS+=" --partial-operations=${BITSHARESD_PARTIAL_OPERATIONS}" +fi + +if [[ ! -z "$BITSHARESD_MAX_OPS_PER_ACCOUNT" ]]; then + ARGS+=" --max-ops-per-account=${BITSHARESD_MAX_OPS_PER_ACCOUNT}" +fi + +if [[ ! -z "$BITSHARESD_ES_NODE_URL" ]]; then + ARGS+=" --elasticsearch-node-url=${BITSHARESD_ES_NODE_URL}" +fi + +if [[ ! -z "$BITSHARESD_TRUSTED_NODE" ]]; then + ARGS+=" --trusted-node=${BITSHARESD_TRUSTED_NODE}" +fi + +## Link the bitshares config file into home +## This link has been created in Dockerfile, already +ln -f -s /etc/bitshares/config.ini /var/lib/bitshares + +# Plugins need to be provided in a space-separated list, which +# makes it necessary to write it like this +if [[ ! -z "$BITSHARESD_PLUGINS" ]]; then + exec $BITSHARESD --data-dir ${HOME} ${ARGS} ${BITSHARESD_ARGS} --plugins "${BITSHARESD_PLUGINS}" +else + exec $BITSHARESD --data-dir ${HOME} ${ARGS} ${BITSHARESD_ARGS} +fi diff --git a/docker/default_config.ini b/docker/default_config.ini new file mode 100644 index 0000000000..daef0e5a8c --- /dev/null +++ b/docker/default_config.ini @@ -0,0 +1,62 @@ +# Endpoint for P2P node to listen on +p2p-endpoint = 0.0.0.0:1776 + +# P2P nodes to connect to on startup (may specify multiple times) +# seed-node = + +# JSON array of P2P nodes to connect to on startup +# seed-nodes = + +# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints. +# checkpoint = + +# Endpoint for websocket RPC to listen on +rpc-endpoint = 0.0.0.0:8090 + +# Endpoint for TLS websocket RPC to listen on +# rpc-tls-endpoint = + +# The TLS certificate file for this server +# server-pem = + +# Password for this certificate +# server-pem-password = + +# File to read Genesis State from +# genesis-json = + +# Block signing key to use for init witnesses, overrides genesis file +# dbg-init-key = + +# JSON file specifying API permissions +# api-access = + +# Enable block production, even if the chain is stale. +enable-stale-production = false + +# Percent of witnesses (0-99) that must be participating in order to produce blocks +required-participation = false + +# ID of witness controlled by this node (e.g. "1.6.5", quotes are required, may specify multiple times) +# witness-id = + +# Tuple of [PublicKey, WIF private key] (may specify multiple times) +# private-key = ["BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] + +# Account ID to track history for (may specify multiple times) +# track-account = + +# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers +# bucket-size = [15,60,300,3600,86400] +bucket-size = [60,300,900,1800,3600,14400,86400] +# for 1 min, 5 mins, 30 mins, 1h, 4 hs and 1 day. i think this should be the default. +# https://github.com/bitshares/bitshares-core/issues/465 + +# How far back in time to track history for each bucket size, measured in the number of buckets (default: 1000) +history-per-size = 1000 + +# Max amount of operations to store in the database, per account (drastically reduces RAM requirements) +max-ops-per-account = 1000 + +# Remove old operation history # objects from RAM +partial-operations = true diff --git a/docs b/docs index cdc8ea8133..00bd507e22 160000 --- a/docs +++ b/docs @@ -1 +1 @@ -Subproject commit cdc8ea8133a999afef8051700a4ce8edb0988ec4 +Subproject commit 00bd507e227e8b145bb75a1f13f22d9126281cbb diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index be71012dcc..35b768460f 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -1,11 +1,8 @@ add_subdirectory( fc ) add_subdirectory( db ) -add_subdirectory( deterministic_openssl_rand ) add_subdirectory( chain ) add_subdirectory( egenesis ) add_subdirectory( net ) -#add_subdirectory( p2p ) -add_subdirectory( time ) add_subdirectory( utilities ) add_subdirectory( app ) add_subdirectory( plugins ) diff --git a/libraries/README.md b/libraries/README.md new file mode 100644 index 0000000000..256d15029b --- /dev/null +++ b/libraries/README.md @@ -0,0 +1,20 @@ +# BitShares Libraries + +The libraries are the core of the project and defines everything where applications can build on top. + +A **graphene** blockchain software will use the `app` library to define what the application will do, what services it will offer. The blockchain is defined by the `chain` library and include all the objects, types, operations, protocols that builds current consensus blockchain. The lowest level in memory database of Bitshares is developed at the `db` library. The `fc` is a helper module broadly used in the libraries code, `egenesis` will help with the genesis file, `plugins` will be loaded optionally to the application. Wallet software like the cli_wallet will benefit from the `wallet` library. + +Code in libraries is the most important part of **bitshares-core** project and it is maintained by the Bitshares Core Team and contributors. +# Available Libraries + +Folder | Name | Description | Status +---|---|---|--- +[app](app) | Application | Bundles component libraries (chain, network, plugins) into a useful application. Also provides API access. | Active +[chain](chain) | Blockchain | Defines all objects, operations and types. This include the consensus protocol, defines the whole blockchain behaviour. | Active +[db](db) | Database | Defines the internal database graphene uses. | Active +[egenesis](egenesis) | Genesis | Hardcodes the `genesis.json` file into the `witness_node` executable.| Active +[fc](fc) | Fast-compiling C++ library | https://github.com/bitshares/bitshares-fc | Active +[net](net) | Network | The graphene p2p layer. | Active +[plugins](plugins) | Plugins | Collection of singleton designed modules used for extending the bitshares-core. | Active +[utilities](utilities) | Utilities | Common utility calls used in applications or other libraries. | Active +[wallet](wallet) | Wallet | Wallet definition for the `cli_wallet` software. | Active diff --git a/libraries/app/CMakeLists.txt b/libraries/app/CMakeLists.txt index 2dd91f630a..bf4f5c2b00 100644 --- a/libraries/app/CMakeLists.txt +++ b/libraries/app/CMakeLists.txt @@ -4,15 +4,16 @@ file(GLOB EGENESIS_HEADERS "../egenesis/include/graphene/app/*.hpp") add_library( graphene_app api.cpp application.cpp + util.cpp database_api.cpp - impacted.cpp plugin.cpp + config_util.cpp ${HEADERS} ${EGENESIS_HEADERS} ) # need to link graphene_debug_witness because plugins aren't sufficiently isolated #246 -target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_chain fc graphene_db graphene_net graphene_time graphene_utilities graphene_debug_witness ) +target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_grouped_orders graphene_chain fc graphene_db graphene_net graphene_utilities graphene_debug_witness ) target_include_directories( graphene_app PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../egenesis/include" ) @@ -28,3 +29,4 @@ INSTALL( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/app" ) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 5c69fcefdd..c2631b1293 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -38,7 +37,7 @@ #include #include -#include +#include namespace graphene { namespace app { @@ -77,7 +76,11 @@ namespace graphene { namespace app { { if( api_name == "database_api" ) { - _database_api = std::make_shared< database_api >( std::ref( *_app.chain_database() ) ); + _database_api = std::make_shared< database_api >( std::ref( *_app.chain_database() ), &( _app.get_options() ) ); + } + else if( api_name == "block_api" ) + { + _block_api = std::make_shared< block_api >( std::ref( *_app.chain_database() ) ); } else if( api_name == "network_broadcast_api" ) { @@ -95,6 +98,14 @@ namespace graphene { namespace app { { _crypto_api = std::make_shared< crypto_api >(); } + else if( api_name == "asset_api" ) + { + _asset_api = std::make_shared< asset_api >( _app ); + } + else if( api_name == "orders_api" ) + { + _orders_api = std::make_shared< orders_api >( std::ref( _app ) ); + } else if( api_name == "debug_api" ) { // can only enable this API if the plugin was loaded @@ -104,6 +115,20 @@ namespace graphene { namespace app { return; } + // block_api + block_api::block_api(graphene::chain::database& db) : _db(db) { } + block_api::~block_api() { } + + vector> block_api::get_blocks(uint32_t block_num_from, uint32_t block_num_to)const + { + FC_ASSERT( block_num_to >= block_num_from ); + vector> res; + for(uint32_t block_num=block_num_from; block_num<=block_num_to; block_num++) { + res.push_back(_db.fetch_block_by_number(block_num)); + } + return res; + } + network_broadcast_api::network_broadcast_api(application& a):_app(a) { _applied_block_connection = _app.chain_database()->applied_block.connect([this](const signed_block& b){ on_applied_block(b); }); @@ -124,31 +149,48 @@ namespace graphene { namespace app { { auto block_num = b.block_num(); auto& callback = _callbacks.find(id)->second; - fc::async( [capture_this,this,id,block_num,trx_num,trx,callback](){ callback( fc::variant(transaction_confirmation{ id, block_num, trx_num, trx}) ); } ); + auto v = fc::variant( transaction_confirmation{ id, block_num, trx_num, trx }, GRAPHENE_MAX_NESTED_OBJECTS ); + fc::async( [capture_this,v,callback]() { + callback(v); + } ); } } } } - void network_broadcast_api::broadcast_transaction(const signed_transaction& trx) + void network_broadcast_api::broadcast_transaction(const precomputable_transaction& trx) { - trx.validate(); + _app.chain_database()->precompute_parallel( trx ).wait(); _app.chain_database()->push_transaction(trx); - _app.p2p_node()->broadcast_transaction(trx); + if( _app.p2p_node() != nullptr ) + _app.p2p_node()->broadcast_transaction(trx); + } + + fc::variant network_broadcast_api::broadcast_transaction_synchronous(const precomputable_transaction& trx) + { + fc::promise::ptr prom( new fc::promise() ); + broadcast_transaction_with_callback( [prom]( const fc::variant& v ){ + prom->set_value(v); + }, trx ); + + return fc::future(prom).wait(); } void network_broadcast_api::broadcast_block( const signed_block& b ) { + _app.chain_database()->precompute_parallel( b ).wait(); _app.chain_database()->push_block(b); - _app.p2p_node()->broadcast( net::block_message( b )); + if( _app.p2p_node() != nullptr ) + _app.p2p_node()->broadcast( net::block_message( b )); } - void network_broadcast_api::broadcast_transaction_with_callback(confirmation_callback cb, const signed_transaction& trx) + void network_broadcast_api::broadcast_transaction_with_callback(confirmation_callback cb, const precomputable_transaction& trx) { - trx.validate(); + _app.chain_database()->precompute_parallel( trx ).wait(); _callbacks[trx.id()] = cb; _app.chain_database()->push_transaction(trx); - _app.p2p_node()->broadcast_transaction(trx); + if( _app.p2p_node() != nullptr ) + _app.p2p_node()->broadcast_transaction(trx); } network_node_api::network_node_api( application& a ) : _app( a ) @@ -193,6 +235,12 @@ namespace graphene { namespace app { return *_network_broadcast_api; } + fc::api login_api::block()const + { + FC_ASSERT(_block_api); + return *_block_api; + } + fc::api login_api::network_node()const { FC_ASSERT(_network_node_api); @@ -217,160 +265,30 @@ namespace graphene { namespace app { return *_crypto_api; } + fc::api login_api::asset() const + { + FC_ASSERT(_asset_api); + return *_asset_api; + } + + fc::api login_api::orders() const + { + FC_ASSERT(_orders_api); + return *_orders_api; + } + fc::api login_api::debug() const { FC_ASSERT(_debug_api); return *_debug_api; } - vector get_relevant_accounts( const object* obj ) - { - vector result; - if( obj->id.space() == protocol_ids ) - { - switch( (object_type)obj->id.type() ) - { - case null_object_type: - case base_object_type: - case OBJECT_TYPE_COUNT: - return result; - case account_object_type:{ - result.push_back( obj->id ); - break; - } case asset_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->issuer ); - break; - } case force_settlement_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->owner ); - break; - } case committee_member_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->committee_member_account ); - break; - } case witness_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->witness_account ); - break; - } case limit_order_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->seller ); - break; - } case call_order_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->borrower ); - break; - } case custom_object_type:{ - break; - } case proposal_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - flat_set impacted; - transaction_get_impacted_accounts( aobj->proposed_transaction, impacted ); - result.reserve( impacted.size() ); - for( auto& item : impacted ) result.emplace_back(item); - break; - } case operation_history_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - flat_set impacted; - operation_get_impacted_accounts( aobj->op, impacted ); - result.reserve( impacted.size() ); - for( auto& item : impacted ) result.emplace_back(item); - break; - } case withdraw_permission_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->withdraw_from_account ); - result.push_back( aobj->authorized_account ); - break; - } case vesting_balance_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->owner ); - break; - } case worker_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->worker_account ); - break; - } case balance_object_type:{ - /** these are free from any accounts */ - break; - } - } - } - else if( obj->id.space() == implementation_ids ) - { - switch( (impl_object_type)obj->id.type() ) - { - case impl_global_property_object_type: - break; - case impl_dynamic_global_property_object_type: - break; - case impl_reserved0_object_type: - break; - case impl_asset_dynamic_data_type: - break; - case impl_asset_bitasset_data_type: - break; - case impl_account_balance_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->owner ); - break; - } case impl_account_statistics_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.push_back( aobj->owner ); - break; - } case impl_transaction_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - flat_set impacted; - transaction_get_impacted_accounts( aobj->trx, impacted ); - result.reserve( impacted.size() ); - for( auto& item : impacted ) result.emplace_back(item); - break; - } case impl_blinded_balance_object_type:{ - const auto& aobj = dynamic_cast(obj); - assert( aobj != nullptr ); - result.reserve( aobj->owner.account_auths.size() ); - for( const auto& a : aobj->owner.account_auths ) - result.push_back( a.first ); - break; - } case impl_block_summary_object_type: - break; - case impl_account_transaction_history_object_type: - break; - case impl_chain_property_object_type: - break; - case impl_witness_schedule_object_type: - break; - case impl_budget_record_object_type: - break; - case impl_special_authority_object_type: - break; - case impl_buyback_object_type: - break; - case impl_fba_accumulator_object_type: - break; - } - } - return result; - } // end get_relevant_accounts( obj ) - - vector history_api::get_fill_order_history( asset_id_type a, asset_id_type b, uint32_t limit )const + vector history_api::get_fill_order_history( std::string asset_a, std::string asset_b, uint32_t limit )const { FC_ASSERT(_app.chain_database()); const auto& db = *_app.chain_database(); + asset_id_type a = database_api.get_asset_id_from_string( asset_a ); + asset_id_type b = database_api.get_asset_id_from_string( asset_b ); if( a > b ) std::swap(a,b); const auto& history_idx = db.get_index_type().indices().get(); history_key hkey; @@ -392,58 +310,118 @@ namespace graphene { namespace app { return result; } - vector history_api::get_account_history( account_id_type account, - operation_history_id_type stop, - unsigned limit, + vector history_api::get_account_history( const std::string account_id_or_name, + operation_history_id_type stop, + unsigned limit, operation_history_id_type start ) const { FC_ASSERT( _app.chain_database() ); - const auto& db = *_app.chain_database(); - FC_ASSERT( limit <= 100 ); + const auto& db = *_app.chain_database(); + uint64_t api_limit_get_account_history=_app.get_options().api_limit_get_account_history; + FC_ASSERT( limit <= api_limit_get_account_history ); vector result; + account_id_type account; + try { + account = database_api.get_account_id_from_string(account_id_or_name); + const account_transaction_history_object& node = account(db).statistics(db).most_recent_op(db); + if(start == operation_history_id_type() || start.instance.value > node.operation_id.instance.value) + start = node.operation_id; + } catch(...) { return result; } + + const auto& hist_idx = db.get_index_type(); + const auto& by_op_idx = hist_idx.indices().get(); + auto index_start = by_op_idx.begin(); + auto itr = by_op_idx.lower_bound(boost::make_tuple(account, start)); + + while(itr != index_start && itr->account == account && itr->operation_id.instance.value > stop.instance.value && result.size() < limit) + { + if(itr->operation_id.instance.value <= start.instance.value) + result.push_back(itr->operation_id(db)); + --itr; + } + if(stop.instance.value == 0 && result.size() < limit && itr->account == account) { + result.push_back(itr->operation_id(db)); + } + + return result; + } + + vector history_api::get_account_history_operations( const std::string account_id_or_name, + int operation_type, + operation_history_id_type start, + operation_history_id_type stop, + unsigned limit) const + { + FC_ASSERT( _app.chain_database() ); + const auto& db = *_app.chain_database(); + uint64_t api_limit_get_account_history_operations=_app.get_options().api_limit_get_account_history_operations; + FC_ASSERT(limit <= api_limit_get_account_history_operations); + vector result; + account_id_type account; + try { + account = database_api.get_account_id_from_string(account_id_or_name); + } catch(...) { return result; } const auto& stats = account(db).statistics(db); if( stats.most_recent_op == account_transaction_history_id_type() ) return result; const account_transaction_history_object* node = &stats.most_recent_op(db); if( start == operation_history_id_type() ) start = node->operation_id; - + while(node && node->operation_id.instance.value > stop.instance.value && result.size() < limit) { - if( node->operation_id.instance.value <= start.instance.value ) - result.push_back( node->operation_id(db) ); + if( node->operation_id.instance.value <= start.instance.value ) { + + if(node->operation_id(db).op.which() == operation_type) + result.push_back( node->operation_id(db) ); + } if( node->next == account_transaction_history_id_type() ) node = nullptr; else node = &node->next(db); } - + if( stop.instance.value == 0 && result.size() < limit ) { + auto head = db.find(account_transaction_history_id_type()); + if (head != nullptr && head->account == account && head->operation_id(db).op.which() == operation_type) + result.push_back(head->operation_id(db)); + } return result; } - - vector history_api::get_relative_account_history( account_id_type account, - uint32_t stop, - unsigned limit, - uint32_t start) const + + + vector history_api::get_relative_account_history( const std::string account_id_or_name, + uint64_t stop, + unsigned limit, + uint64_t start) const { FC_ASSERT( _app.chain_database() ); const auto& db = *_app.chain_database(); - FC_ASSERT(limit <= 100); + uint64_t api_limit_get_relative_account_history=_app.get_options().api_limit_get_relative_account_history; + FC_ASSERT(limit <= api_limit_get_relative_account_history); vector result; + account_id_type account; + try { + account = database_api.get_account_id_from_string(account_id_or_name); + } catch(...) { return result; } + const auto& stats = account(db).statistics(db); if( start == 0 ) - start = account(db).statistics(db).total_ops; - else start = min( account(db).statistics(db).total_ops, start ); - const auto& hist_idx = db.get_index_type(); - const auto& by_seq_idx = hist_idx.indices().get(); - - auto itr = by_seq_idx.upper_bound( boost::make_tuple( account, start ) ); - auto itr_stop = by_seq_idx.lower_bound( boost::make_tuple( account, stop ) ); - --itr; - - while ( itr != itr_stop && result.size() < limit ) + start = stats.total_ops; + else + start = min( stats.total_ops, start ); + + if( start >= stop && start > stats.removed_ops && limit > 0 ) { - result.push_back( itr->operation_id(db) ); - --itr; + const auto& hist_idx = db.get_index_type(); + const auto& by_seq_idx = hist_idx.indices().get(); + + auto itr = by_seq_idx.upper_bound( boost::make_tuple( account, start ) ); + auto itr_stop = by_seq_idx.lower_bound( boost::make_tuple( account, stop ) ); + + do + { + --itr; + result.push_back( itr->operation_id(db) ); + } + while ( itr != itr_stop && result.size() < limit ); } - return result; } @@ -454,11 +432,29 @@ namespace graphene { namespace app { return hist->tracked_buckets(); } - vector history_api::get_market_history( asset_id_type a, asset_id_type b, + history_operation_detail history_api::get_account_history_by_operations(const std::string account_id_or_name, vector operation_types, uint32_t start, unsigned limit) + { + uint64_t api_limit_get_account_history_by_operations=_app.get_options().api_limit_get_account_history_by_operations; + FC_ASSERT(limit <= api_limit_get_account_history_by_operations); + history_operation_detail result; + vector objs = get_relative_account_history(account_id_or_name, start, limit, limit + start - 1); + std::for_each(objs.begin(), objs.end(), [&](const operation_history_object &o) { + if (operation_types.empty() || find(operation_types.begin(), operation_types.end(), o.op.which()) != operation_types.end()) { + result.operation_history_objs.push_back(o); + } + }); + + result.total_count = objs.size(); + return result; + } + + vector history_api::get_market_history( std::string asset_a, std::string asset_b, uint32_t bucket_seconds, fc::time_point_sec start, fc::time_point_sec end )const { try { FC_ASSERT(_app.chain_database()); const auto& db = *_app.chain_database(); + asset_id_type a = database_api.get_asset_id_from_string( asset_a ); + asset_id_type b = database_api.get_asset_id_from_string( asset_b ); vector result; result.reserve(200); @@ -478,49 +474,35 @@ namespace graphene { namespace app { ++itr; } return result; - } FC_CAPTURE_AND_RETHROW( (a)(b)(bucket_seconds)(start)(end) ) } - + } FC_CAPTURE_AND_RETHROW( (asset_a)(asset_b)(bucket_seconds)(start)(end) ) } + crypto_api::crypto_api(){}; - - blind_signature crypto_api::blind_sign( const extended_private_key_type& key, const blinded_hash& hash, int i ) - { - return fc::ecc::extended_private_key( key ).blind_sign( hash, i ); - } - - signature_type crypto_api::unblind_signature( const extended_private_key_type& key, - const extended_public_key_type& bob, - const blind_signature& sig, - const fc::sha256& hash, - int i ) - { - return fc::ecc::extended_private_key( key ).unblind_signature( extended_public_key( bob ), sig, hash, i ); - } - + commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value ) { return fc::ecc::blind( blind, value ); } - + blind_factor_type crypto_api::blind_sum( const std::vector& blinds_in, uint32_t non_neg ) { return fc::ecc::blind_sum( blinds_in, non_neg ); } - + bool crypto_api::verify_sum( const std::vector& commits_in, const std::vector& neg_commits_in, int64_t excess ) { return fc::ecc::verify_sum( commits_in, neg_commits_in, excess ); } - + verify_range_result crypto_api::verify_range( const commitment_type& commit, const std::vector& proof ) { verify_range_result result; result.success = fc::ecc::verify_range( result.min_val, result.max_val, commit, proof ); return result; } - - std::vector crypto_api::range_proof_sign( uint64_t min_value, - const commitment_type& commit, - const blind_factor_type& commit_blind, + + std::vector crypto_api::range_proof_sign( uint64_t min_value, + const commitment_type& commit, + const blind_factor_type& commit_blind, const blind_factor_type& nonce, int8_t base10_exp, uint8_t min_bits, @@ -528,26 +510,143 @@ namespace graphene { namespace app { { return fc::ecc::range_proof_sign( min_value, commit, commit_blind, nonce, base10_exp, min_bits, actual_value ); } - + verify_range_proof_rewind_result crypto_api::verify_range_proof_rewind( const blind_factor_type& nonce, - const commitment_type& commit, + const commitment_type& commit, const std::vector& proof ) { verify_range_proof_rewind_result result; - result.success = fc::ecc::verify_range_proof_rewind( result.blind_out, - result.value_out, - result.message_out, - nonce, - result.min_val, - result.max_val, - const_cast< commitment_type& >( commit ), + result.success = fc::ecc::verify_range_proof_rewind( result.blind_out, + result.value_out, + result.message_out, + nonce, + result.min_val, + result.max_val, + const_cast< commitment_type& >( commit ), proof ); return result; } - + range_proof_info crypto_api::range_get_info( const std::vector& proof ) { return fc::ecc::range_get_info( proof ); } + // asset_api + asset_api::asset_api(graphene::app::application& app) : + _app(app), + _db( *app.chain_database()), + database_api( std::ref(*app.chain_database()), &(app.get_options()) + ) { } + asset_api::~asset_api() { } + + vector asset_api::get_asset_holders( std::string asset, uint32_t start, uint32_t limit ) const { + uint64_t api_limit_get_asset_holders=_app.get_options().api_limit_get_asset_holders; + FC_ASSERT(limit <= api_limit_get_asset_holders); + asset_id_type asset_id = database_api.get_asset_id_from_string( asset ); + const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); + auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); + + vector result; + + uint32_t index = 0; + for( const account_balance_object& bal : boost::make_iterator_range( range.first, range.second ) ) + { + if( result.size() >= limit ) + break; + + if( bal.balance.value == 0 ) + continue; + + if( index++ < start ) + continue; + + const auto account = _db.find(bal.owner); + + account_asset_balance aab; + aab.name = account->name; + aab.account_id = account->id; + aab.amount = bal.balance.value; + + result.push_back(aab); + } + + return result; + } + // get number of asset holders. + int asset_api::get_asset_holders_count( std::string asset ) const { + const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); + asset_id_type asset_id = database_api.get_asset_id_from_string( asset ); + auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); + + int count = boost::distance(range) - 1; + + return count; + } + // function to get vector of system assets with holders count. + vector asset_api::get_all_asset_holders() const { + vector result; + vector total_assets; + for( const asset_object& asset_obj : _db.get_index_type().indices() ) + { + const auto& dasset_obj = asset_obj.dynamic_asset_data_id(_db); + + asset_id_type asset_id; + asset_id = dasset_obj.id; + + const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >(); + auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) ); + + int count = boost::distance(range) - 1; + + asset_holders ah; + ah.asset_id = asset_id; + ah.count = count; + + result.push_back(ah); + } + + return result; + } + + // orders_api + flat_set orders_api::get_tracked_groups()const + { + auto plugin = _app.get_plugin( "grouped_orders" ); + FC_ASSERT( plugin ); + return plugin->tracked_groups(); + } + + vector< limit_order_group > orders_api::get_grouped_limit_orders( std::string base_asset, + std::string quote_asset, + uint16_t group, + optional start, + uint32_t limit )const + { + uint64_t api_limit_get_grouped_limit_orders=_app.get_options().api_limit_get_grouped_limit_orders; + FC_ASSERT( limit <= api_limit_get_grouped_limit_orders ); + auto plugin = _app.get_plugin( "grouped_orders" ); + FC_ASSERT( plugin ); + const auto& limit_groups = plugin->limit_order_groups(); + vector< limit_order_group > result; + + asset_id_type base_asset_id = database_api.get_asset_id_from_string( base_asset ); + asset_id_type quote_asset_id = database_api.get_asset_id_from_string( quote_asset ); + + price max_price = price::max( base_asset_id, quote_asset_id ); + price min_price = price::min( base_asset_id, quote_asset_id ); + if( start.valid() && !start->is_null() ) + max_price = std::max( std::min( max_price, *start ), min_price ); + + auto itr = limit_groups.lower_bound( limit_order_group_key( group, max_price ) ); + // use an end itrator to try to avoid expensive price comparison + auto end = limit_groups.upper_bound( limit_order_group_key( group, min_price ) ); + while( itr != end && result.size() < limit ) + { + result.emplace_back( *itr ); + ++itr; + } + return result; + } + } } // graphene::app diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 6446448844..cc3941b50a 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -26,30 +26,30 @@ #include #include +#include +#include #include #include -#include #include #include #include -#include - #include #include -#include - +#include #include #include #include #include +#include #include #include #include +#include #include @@ -77,11 +77,11 @@ namespace bpo = boost::program_options; namespace detail { - genesis_state_type create_example_genesis() { + graphene::chain::genesis_state_type create_example_genesis() { auto nathan_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan"))); dlog("Allocating all stake to ${key}", ("key", utilities::key_to_wif(nathan_key))); - genesis_state_type initial_state; - initial_state.initial_parameters.current_fees = fee_schedule::get_default();//->set_all_fees(GRAPHENE_BLOCKCHAIN_PRECISION); + graphene::chain::genesis_state_type initial_state; + initial_state.initial_parameters.current_fees = std::make_shared(fee_schedule::get_default()); initial_state.initial_active_witnesses = GRAPHENE_DEFAULT_MIN_WITNESS_COUNT; initial_state.initial_timestamp = time_point_sec(time_point::now().sec_since_epoch() / initial_state.initial_parameters.block_interval * @@ -106,777 +106,860 @@ namespace detail { return initial_state; } - class application_impl : public net::node_delegate - { - public: - fc::optional _lock_file; - bool _is_block_producer = false; - bool _force_validate = false; - void reset_p2p_node(const fc::path& data_dir) - { try { - _p2p_network = std::make_shared("Graphene Reference Implementation"); +} - _p2p_network->load_configuration(data_dir / "p2p"); - _p2p_network->set_node_delegate(this); +}} - if( _options->count("seed-node") ) - { - auto seeds = _options->at("seed-node").as>(); - for( const string& endpoint_string : seeds ) - { - std::vector endpoints = resolve_string_to_ip_endpoints(endpoint_string); - for (const fc::ip::endpoint& endpoint : endpoints) - { - ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); - _p2p_network->add_node(endpoint); - _p2p_network->connect_to_endpoint(endpoint); - } - } - } +#include "application_impl.hxx" - if( _options->count("p2p-endpoint") ) - _p2p_network->listen_on_endpoint(fc::ip::endpoint::from_string(_options->at("p2p-endpoint").as()), true); - else - _p2p_network->listen_on_port(0, false); - _p2p_network->listen_to_p2p_network(); - ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); +namespace graphene { namespace app { namespace detail { - _p2p_network->connect_to_p2p_network(); - _p2p_network->sync_from(net::item_id(net::core_message_type_enum::block_message_type, - _chain_db->head_block_id()), - std::vector()); - } FC_CAPTURE_AND_RETHROW() } +void application_impl::reset_p2p_node(const fc::path& data_dir) +{ try { + _p2p_network = std::make_shared("BitShares Reference Implementation"); - std::vector resolve_string_to_ip_endpoints(const std::string& endpoint_string) + _p2p_network->load_configuration(data_dir / "p2p"); + _p2p_network->set_node_delegate(this); + + if( _options->count("seed-node") ) + { + auto seeds = _options->at("seed-node").as>(); + for( const string& endpoint_string : seeds ) { - try - { - string::size_type colon_pos = endpoint_string.find(':'); - if (colon_pos == std::string::npos) - FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", - ("endpoint_string", endpoint_string)); - std::string port_string = endpoint_string.substr(colon_pos + 1); - try + try { + std::vector endpoints = resolve_string_to_ip_endpoints(endpoint_string); + for (const fc::ip::endpoint& endpoint : endpoints) { - uint16_t port = boost::lexical_cast(port_string); + ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); + _p2p_network->add_node(endpoint); + _p2p_network->connect_to_endpoint(endpoint); + } + } catch( const fc::exception& e ) { + wlog( "caught exception ${e} while adding seed node ${endpoint}", + ("e", e.to_detail_string())("endpoint", endpoint_string) ); + } + } + } - std::string hostname = endpoint_string.substr(0, colon_pos); - std::vector endpoints = fc::resolve(hostname, port); - if (endpoints.empty()) - FC_THROW_EXCEPTION(fc::unknown_host_exception, "The host name can not be resolved: ${hostname}", ("hostname", hostname)); - return endpoints; + if( _options->count("seed-nodes") ) + { + auto seeds_str = _options->at("seed-nodes").as(); + auto seeds = fc::json::from_string(seeds_str).as>(2); + for( const string& endpoint_string : seeds ) + { + try { + std::vector endpoints = resolve_string_to_ip_endpoints(endpoint_string); + for (const fc::ip::endpoint& endpoint : endpoints) + { + ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); + _p2p_network->add_node(endpoint); } - catch (const boost::bad_lexical_cast&) + } catch( const fc::exception& e ) { + wlog( "caught exception ${e} while adding seed node ${endpoint}", + ("e", e.to_detail_string())("endpoint", endpoint_string) ); + } + } + } + else + { + // https://bitsharestalk.org/index.php/topic,23715.0.html + vector seeds = { + "seed01.liondani.com:1776", // liondani (GERMANY) + "104.236.144.84:1777", // puppies (USA) + "128.199.143.47:2015", // Harvey (Singapore) + "209.105.239.13:1776", // sahkan (USA) + "45.35.12.22:1776", // sahkan (USA) + "51.15.61.160:1776", // lafona (France) + "bts-seed1.abit-more.com:62015", // abit (China) + "node.blckchnd.com:4243", // blckchnd (Germany) + "seed.bitsharesdex.com:50696", // iHashFury (Europe) + "seed.bitsharesnodes.com:1776", // wackou (Netherlands) + "seed.blocktrades.us:1776", // BlockTrades (USA) + "seed.cubeconnex.com:1777", // cube (USA) + "seed.roelandp.nl:1776", // roelandp (Canada) + "seed04.bts-nodes.net:1776", // Thom (Australia) + "seed05.bts-nodes.net:1776", // Thom (USA) + "seed06.bts-nodes.net:1776", // Thom (USA) + "seed07.bts-nodes.net:1776", // Thom (Singapore) + "seed.bts.bangzi.info:55501", // Bangzi (Germany) + "seeds.bitshares.eu:1776" // pc (http://seeds.quisquis.de/bitshares.html) + }; + for( const string& endpoint_string : seeds ) + { + try { + std::vector endpoints = resolve_string_to_ip_endpoints(endpoint_string); + for (const fc::ip::endpoint& endpoint : endpoints) { - FC_THROW("Bad port: ${port}", ("port", port_string)); + ilog("Adding seed node ${endpoint}", ("endpoint", endpoint)); + _p2p_network->add_node(endpoint); } + } catch( const fc::exception& e ) { + wlog( "caught exception ${e} while adding seed node ${endpoint}", + ("e", e.to_detail_string())("endpoint", endpoint_string) ); } - FC_CAPTURE_AND_RETHROW((endpoint_string)) } + } - void reset_websocket_server() - { try { - if( !_options->count("rpc-endpoint") ) - return; - - bool enable_deflate_compression = _options->count("enable-permessage-deflate") != 0; - - _websocket_server = std::make_shared(enable_deflate_compression); - - _websocket_server->on_connection([&]( const fc::http::websocket_connection_ptr& c ){ - auto wsc = std::make_shared(*c); - auto login = std::make_shared( std::ref(*_self) ); - auto db_api = std::make_shared( std::ref(*_self->chain_database()) ); - wsc->register_api(fc::api(db_api)); - wsc->register_api(fc::api(login)); - c->set_session_data( wsc ); - }); - ilog("Configured websocket rpc to listen on ${ip}", ("ip",_options->at("rpc-endpoint").as())); - _websocket_server->listen( fc::ip::endpoint::from_string(_options->at("rpc-endpoint").as()) ); - _websocket_server->start_accept(); - } FC_CAPTURE_AND_RETHROW() } - - - void reset_websocket_tls_server() - { try { - if( !_options->count("rpc-tls-endpoint") ) - return; - if( !_options->count("server-pem") ) - { - wlog( "Please specify a server-pem to use rpc-tls-endpoint" ); - return; - } + if( _options->count("p2p-endpoint") ) + _p2p_network->listen_on_endpoint(fc::ip::endpoint::from_string(_options->at("p2p-endpoint").as()), true); + else + _p2p_network->listen_on_port(0, false); + _p2p_network->listen_to_p2p_network(); + ilog("Configured p2p node to listen on ${ip}", ("ip", _p2p_network->get_actual_listening_endpoint())); - string password = _options->count("server-pem-password") ? _options->at("server-pem-password").as() : ""; - bool enable_deflate_compression = _options->count("enable-permessage-deflate") != 0; - _websocket_tls_server = std::make_shared( _options->at("server-pem").as(), password, enable_deflate_compression ); - - _websocket_tls_server->on_connection([&]( const fc::http::websocket_connection_ptr& c ){ - auto wsc = std::make_shared(*c); - auto login = std::make_shared( std::ref(*_self) ); - auto db_api = std::make_shared( std::ref(*_self->chain_database()) ); - wsc->register_api(fc::api(db_api)); - wsc->register_api(fc::api(login)); - c->set_session_data( wsc ); - }); - ilog("Configured websocket TLS rpc to listen on ${ip}", ("ip",_options->at("rpc-tls-endpoint").as())); - _websocket_tls_server->listen( fc::ip::endpoint::from_string(_options->at("rpc-tls-endpoint").as()) ); - _websocket_tls_server->start_accept(); - } FC_CAPTURE_AND_RETHROW() } - - application_impl(application* self) - : _self(self), - _chain_db(std::make_shared()) - { - } + _p2p_network->connect_to_p2p_network(); + _p2p_network->sync_from(net::item_id(net::core_message_type_enum::block_message_type, + _chain_db->head_block_id()), + std::vector()); +} FC_CAPTURE_AND_RETHROW() } - ~application_impl() +std::vector application_impl::resolve_string_to_ip_endpoints(const std::string& endpoint_string) +{ + try + { + string::size_type colon_pos = endpoint_string.find(':'); + if (colon_pos == std::string::npos) + FC_THROW("Missing required port number in endpoint string \"${endpoint_string}\"", + ("endpoint_string", endpoint_string)); + std::string port_string = endpoint_string.substr(colon_pos + 1); + try { - fc::remove_all(_data_dir / "blockchain/dblock"); + uint16_t port = boost::lexical_cast(port_string); + + std::string hostname = endpoint_string.substr(0, colon_pos); + std::vector endpoints = fc::resolve(hostname, port); + if (endpoints.empty()) + FC_THROW_EXCEPTION( fc::unknown_host_exception, + "The host name can not be resolved: ${hostname}", + ("hostname", hostname) ); + return endpoints; } - - void set_dbg_init_key( genesis_state_type& genesis, const std::string& init_key ) + catch (const boost::bad_lexical_cast&) { - flat_set< std::string > initial_witness_names; - public_key_type init_pubkey( init_key ); - for( uint64_t i=0; icount("genesis-json") ) - { - std::string genesis_str; - fc::read_file_contents( _options->at("genesis-json").as(), genesis_str ); - genesis_state_type genesis = fc::json::from_string( genesis_str ).as(); - bool modified_genesis = false; - if( _options->count("genesis-timestamp") ) - { - genesis.initial_timestamp = fc::time_point_sec( graphene::time::now() ) + genesis.initial_parameters.block_interval + _options->at("genesis-timestamp").as(); - genesis.initial_timestamp -= genesis.initial_timestamp.sec_since_epoch() % genesis.initial_parameters.block_interval; - modified_genesis = true; - std::cerr << "Used genesis timestamp: " << genesis.initial_timestamp.to_iso_string() << " (PLEASE RECORD THIS)\n"; - } - if( _options->count("dbg-init-key") ) - { - std::string init_key = _options->at( "dbg-init-key" ).as(); - FC_ASSERT( genesis.initial_witness_candidates.size() >= genesis.initial_active_witnesses ); - set_dbg_init_key( genesis, init_key ); - modified_genesis = true; - std::cerr << "Set init witness key to " << init_key << "\n"; - } - if( modified_genesis ) - { - std::cerr << "WARNING: GENESIS WAS MODIFIED, YOUR CHAIN ID MAY BE DIFFERENT\n"; - genesis_str += "BOGUS"; - genesis.initial_chain_id = fc::sha256::hash( genesis_str ); - } - else - genesis.initial_chain_id = fc::sha256::hash( genesis_str ); - return genesis; - } - else - { - std::string egenesis_json; - graphene::egenesis::compute_egenesis_json( egenesis_json ); - FC_ASSERT( egenesis_json != "" ); - FC_ASSERT( graphene::egenesis::get_egenesis_json_hash() == fc::sha256::hash( egenesis_json ) ); - auto genesis = fc::json::from_string( egenesis_json ).as(); - genesis.initial_chain_id = fc::sha256::hash( egenesis_json ); - return genesis; - } - }; +void application_impl::new_connection( const fc::http::websocket_connection_ptr& c ) +{ + auto wsc = std::make_shared(*c, GRAPHENE_NET_MAX_NESTED_OBJECTS); + auto login = std::make_shared( std::ref(*_self) ); + login->enable_api("database_api"); - if( _options->count("resync-blockchain") ) - _chain_db->wipe(_data_dir / "blockchain", true); + wsc->register_api(login->database()); + wsc->register_api(fc::api(login)); + c->set_session_data( wsc ); - flat_map loaded_checkpoints; - if( _options->count("checkpoint") ) - { - auto cps = _options->at("checkpoint").as>(); - loaded_checkpoints.reserve( cps.size() ); - for( auto cp : cps ) - { - auto item = fc::json::from_string(cp).as >(); - loaded_checkpoints[item.first] = item.second; - } - } - _chain_db->add_checkpoints( loaded_checkpoints ); + std::string username = "*"; + std::string password = "*"; - if( _options->count("replay-blockchain") ) - { - ilog("Replaying blockchain on user request."); - _chain_db->reindex(_data_dir/"blockchain", initial_state()); - } else if( clean ) { + // Try to extract login information from "Authorization" header if present + std::string auth = c->get_request_header("Authorization"); + if( boost::starts_with(auth, "Basic ") ) { - auto is_new = [&]() -> bool - { - // directory doesn't exist - if( !fc::exists( _data_dir ) ) - return true; - // if directory exists but is empty, return true; else false. - return ( fc::directory_iterator( _data_dir ) == fc::directory_iterator() ); - }; - - auto is_outdated = [&]() -> bool - { - if( !fc::exists( _data_dir / "db_version" ) ) - return true; - std::string version_str; - fc::read_file_contents( _data_dir / "db_version", version_str ); - return (version_str != GRAPHENE_CURRENT_DB_VERSION); - }; + FC_ASSERT( auth.size() > 6 ); + auto user_pass = fc::base64_decode(auth.substr(6)); - bool need_reindex = (!is_new() && is_outdated()); - std::string reindex_reason = "version upgrade"; + std::vector parts; + boost::split( parts, user_pass, boost::is_any_of(":") ); - if( !need_reindex ) - { - try - { - _chain_db->open(_data_dir / "blockchain", initial_state); - } - catch( const fc::exception& e ) - { - ilog( "caught exception ${e} in open()", ("e", e.to_detail_string()) ); - need_reindex = true; - reindex_reason = "exception in open()"; - } - } + FC_ASSERT(parts.size() == 2); - if( need_reindex ) - { - ilog("Replaying blockchain due to ${reason}", ("reason", reindex_reason) ); - - fc::remove_all( _data_dir / "db_version" ); - _chain_db->reindex(_data_dir / "blockchain", initial_state()); - - // doing this down here helps ensure that DB will be wiped - // if any of the above steps were interrupted on a previous run - if( !fc::exists( _data_dir / "db_version" ) ) - { - std::ofstream db_version( - (_data_dir / "db_version").generic_string().c_str(), - std::ios::out | std::ios::binary | std::ios::trunc ); - std::string version_string = GRAPHENE_CURRENT_DB_VERSION; - db_version.write( version_string.c_str(), version_string.size() ); - db_version.close(); - } - } - } else { - wlog("Detected unclean shutdown. Replaying blockchain..."); - _chain_db->reindex(_data_dir / "blockchain", initial_state()); - } + username = parts[0]; + password = parts[1]; + } - if (!_options->count("genesis-json") && - _chain_db->get_chain_id() != graphene::egenesis::get_egenesis_chain_id()) { - elog("Detected old database. Nuking and starting over."); - _chain_db->wipe(_data_dir / "blockchain", true); - _chain_db.reset(); - _chain_db = std::make_shared(); - _chain_db->add_checkpoints(loaded_checkpoints); - _chain_db->open(_data_dir / "blockchain", initial_state); - } + login->login(username, password); +} - if( _options->count("force-validate") ) - { - ilog( "All transaction signatures will be validated" ); - _force_validate = true; - } +void application_impl::reset_websocket_server() +{ try { + if( !_options->count("rpc-endpoint") ) + return; - graphene::time::now(); + _websocket_server = std::make_shared(); + _websocket_server->on_connection( std::bind(&application_impl::new_connection, this, std::placeholders::_1) ); - if( _options->count("api-access") ) - _apiaccess = fc::json::from_file( _options->at("api-access").as() ) - .as(); - else - { - // TODO: Remove this generous default access policy - // when the UI logs in properly - _apiaccess = api_access(); - api_access_info wild_access; - wild_access.password_hash_b64 = "*"; - wild_access.password_salt_b64 = "*"; - wild_access.allowed_apis.push_back( "database_api" ); - wild_access.allowed_apis.push_back( "network_broadcast_api" ); - wild_access.allowed_apis.push_back( "history_api" ); - wild_access.allowed_apis.push_back( "crypto_api" ); - _apiaccess.permission_map["*"] = wild_access; - } + ilog("Configured websocket rpc to listen on ${ip}", ("ip",_options->at("rpc-endpoint").as())); + _websocket_server->listen( fc::ip::endpoint::from_string(_options->at("rpc-endpoint").as()) ); + _websocket_server->start_accept(); +} FC_CAPTURE_AND_RETHROW() } - reset_p2p_node(_data_dir); - reset_websocket_server(); - reset_websocket_tls_server(); - } FC_LOG_AND_RETHROW() } +void application_impl::reset_websocket_tls_server() +{ try { + if( !_options->count("rpc-tls-endpoint") ) + return; + if( !_options->count("server-pem") ) + { + wlog( "Please specify a server-pem to use rpc-tls-endpoint" ); + return; + } - optional< api_access_info > get_api_access_info(const string& username)const - { - optional< api_access_info > result; - auto it = _apiaccess.permission_map.find(username); - if( it == _apiaccess.permission_map.end() ) - { - it = _apiaccess.permission_map.find("*"); - if( it == _apiaccess.permission_map.end() ) - return result; - } - return it->second; - } + string password = _options->count("server-pem-password") ? _options->at("server-pem-password").as() : ""; + _websocket_tls_server = std::make_shared( _options->at("server-pem").as(), password ); + _websocket_tls_server->on_connection( std::bind(&application_impl::new_connection, this, std::placeholders::_1) ); - void set_api_access_info(const string& username, api_access_info&& permissions) - { - _apiaccess.permission_map.insert(std::make_pair(username, std::move(permissions))); - } + ilog("Configured websocket TLS rpc to listen on ${ip}", ("ip",_options->at("rpc-tls-endpoint").as())); + _websocket_tls_server->listen( fc::ip::endpoint::from_string(_options->at("rpc-tls-endpoint").as()) ); + _websocket_tls_server->start_accept(); +} FC_CAPTURE_AND_RETHROW() } - /** - * If delegate has the item, the network has no need to fetch it. - */ - virtual bool has_item(const net::item_id& id) override - { - try - { - if( id.item_type == graphene::net::block_message_type ) - return _chain_db->is_known_block(id.item_hash); - else - return _chain_db->is_known_transaction(id.item_hash); - } - FC_CAPTURE_AND_RETHROW( (id) ) - } +void application_impl::set_dbg_init_key( graphene::chain::genesis_state_type& genesis, const std::string& init_key ) +{ + flat_set< std::string > initial_witness_names; + public_key_type init_pubkey( init_key ); + for( uint64_t i=0; i& contained_transaction_message_ids) override - { try { - - auto latency = graphene::time::now() - blk_msg.block.timestamp; - if (!sync_mode || blk_msg.block.block_num() % 10000 == 0) - { - const auto& witness = blk_msg.block.witness(*_chain_db); - const auto& witness_account = witness.witness_account(*_chain_db); - auto last_irr = _chain_db->get_dynamic_global_properties().last_irreversible_block_num; - ilog("Got block: #${n} time: ${t} latency: ${l} ms from: ${w} irreversible: ${i} (-${d})", - ("t",blk_msg.block.timestamp) - ("n", blk_msg.block.block_num()) - ("l", (latency.count()/1000)) - ("w",witness_account.name) - ("i",last_irr)("d",blk_msg.block.block_num()-last_irr) ); - } - try { - // TODO: in the case where this block is valid but on a fork that's too old for us to switch to, - // you can help the network code out by throwing a block_older_than_undo_history exception. - // when the net code sees that, it will stop trying to push blocks from that chain, but - // leave that peer connected so that they can get sync blocks from us - bool result = _chain_db->push_block(blk_msg.block, (_is_block_producer | _force_validate) ? database::skip_nothing : database::skip_transaction_signatures); - - // the block was accepted, so we now know all of the transactions contained in the block - if (!sync_mode) - { - // if we're not in sync mode, there's a chance we will be seeing some transactions - // included in blocks before we see the free-floating transaction itself. If that - // happens, there's no reason to fetch the transactions, so construct a list of the - // transaction message ids we no longer need. - // during sync, it is unlikely that we'll see any old - for (const processed_transaction& transaction : blk_msg.block.transactions) - { - graphene::net::trx_message transaction_message(transaction); - contained_transaction_message_ids.push_back(graphene::net::message(transaction_message).id()); - } - } - return result; - } catch ( const graphene::chain::unlinkable_block_exception& e ) { - // translate to a graphene::net exception - elog("Error when pushing block:\n${e}", ("e", e.to_detail_string())); - FC_THROW_EXCEPTION(graphene::net::unlinkable_block_exception, "Error when pushing block:\n${e}", ("e", e.to_detail_string())); - } catch( const fc::exception& e ) { - elog("Error when pushing block:\n${e}", ("e", e.to_detail_string())); - throw; - } +void application_impl::set_api_limit() { + if (_options->count("api-limit-get-account-history-operations")) { + _app_options.api_limit_get_account_history_operations = _options->at("api-limit-get-account-history-operations").as(); + } + if(_options->count("api-limit-get-account-history")){ + _app_options.api_limit_get_account_history = _options->at("api-limit-get-account-history").as(); + } + if(_options->count("api-limit-get-grouped-limit-orders")){ + _app_options.api_limit_get_grouped_limit_orders = _options->at("api-limit-get-grouped-limit-orders").as(); + } + if(_options->count("api-limit-get-relative-account-history")){ + _app_options.api_limit_get_relative_account_history = _options->at("api-limit-get-relative-account-history").as(); + } + if(_options->count("api-limit-get-account-history-by-operations")){ + _app_options.api_limit_get_account_history_by_operations = _options->at("api-limit-get-account-history-by-operations").as(); + } + if(_options->count("api-limit-get-asset-holders")){ + _app_options.api_limit_get_asset_holders = _options->at("api-limit-get-asset-holders").as(); + } + if(_options->count("api-limit-get-key-references")){ + _app_options.api_limit_get_key_references = _options->at("api-limit-get-key-references").as(); + } +} - if( !_is_finished_syncing && !sync_mode ) +void application_impl::startup() +{ try { + fc::create_directories(_data_dir / "blockchain"); + + auto initial_state = [this] { + ilog("Initializing database..."); + if( _options->count("genesis-json") ) + { + std::string genesis_str; + fc::read_file_contents( _options->at("genesis-json").as(), genesis_str ); + graphene::chain::genesis_state_type genesis = fc::json::from_string( genesis_str ).as( 20 ); + bool modified_genesis = false; + if( _options->count("genesis-timestamp") ) { - _is_finished_syncing = true; - _self->syncing_finished(); + genesis.initial_timestamp = fc::time_point_sec( fc::time_point::now() ) + + genesis.initial_parameters.block_interval + + _options->at("genesis-timestamp").as(); + genesis.initial_timestamp -= ( genesis.initial_timestamp.sec_since_epoch() + % genesis.initial_parameters.block_interval ); + modified_genesis = true; + + ilog( + "Used genesis timestamp: ${timestamp} (PLEASE RECORD THIS)", + ("timestamp", genesis.initial_timestamp.to_iso_string()) + ); } - } FC_CAPTURE_AND_RETHROW( (blk_msg)(sync_mode) ) } - - virtual void handle_transaction(const graphene::net::trx_message& transaction_message) override - { try { - static fc::time_point last_call; - static int trx_count = 0; - ++trx_count; - auto now = fc::time_point::now(); - if( now - last_call > fc::seconds(1) ) { - ilog("Got ${c} transactions from network", ("c",trx_count) ); - last_call = now; - trx_count = 0; + if( _options->count("dbg-init-key") ) + { + std::string init_key = _options->at( "dbg-init-key" ).as(); + FC_ASSERT( genesis.initial_witness_candidates.size() >= genesis.initial_active_witnesses ); + set_dbg_init_key( genesis, init_key ); + modified_genesis = true; + ilog("Set init witness key to ${init_key}", ("init_key", init_key)); } + if( modified_genesis ) + { + wlog("WARNING: GENESIS WAS MODIFIED, YOUR CHAIN ID MAY BE DIFFERENT"); + genesis_str += "BOGUS"; + genesis.initial_chain_id = fc::sha256::hash( genesis_str ); + } + else + genesis.initial_chain_id = fc::sha256::hash( genesis_str ); + return genesis; + } + else + { + std::string egenesis_json; + graphene::egenesis::compute_egenesis_json( egenesis_json ); + FC_ASSERT( egenesis_json != "" ); + FC_ASSERT( graphene::egenesis::get_egenesis_json_hash() == fc::sha256::hash( egenesis_json ) ); + auto genesis = fc::json::from_string( egenesis_json ).as( 20 ); + genesis.initial_chain_id = fc::sha256::hash( egenesis_json ); + return genesis; + } + }; - _chain_db->push_transaction( transaction_message.trx ); - } FC_CAPTURE_AND_RETHROW( (transaction_message) ) } + if( _options->count("resync-blockchain") ) + _chain_db->wipe(_data_dir / "blockchain", true); - virtual void handle_message(const message& message_to_process) override + flat_map loaded_checkpoints; + if( _options->count("checkpoint") ) + { + auto cps = _options->at("checkpoint").as>(); + loaded_checkpoints.reserve( cps.size() ); + for( auto cp : cps ) { - // not a transaction, not a block - FC_THROW( "Invalid Message Type" ); + auto item = fc::json::from_string(cp).as >( 2 ); + loaded_checkpoints[item.first] = item.second; } + } + _chain_db->add_checkpoints( loaded_checkpoints ); - bool is_included_block(const block_id_type& block_id) + if( _options->count("enable-standby-votes-tracking") ) + { + _chain_db->enable_standby_votes_tracking( _options->at("enable-standby-votes-tracking").as() ); + } + + if( _options->count("replay-blockchain") || _options->count("revalidate-blockchain") ) + _chain_db->wipe( _data_dir / "blockchain", false ); + + try + { + // these flags are used in open() only, i. e. during replay + uint32_t skip; + if( _options->count("revalidate-blockchain") ) // see also handle_block() { - uint32_t block_num = block_header::num_from_id(block_id); - block_id_type block_id_in_preferred_chain = _chain_db->get_block_id_for_num(block_num); - return block_id == block_id_in_preferred_chain; + if( !loaded_checkpoints.empty() ) + wlog( "Warning - revalidate will not validate before last checkpoint" ); + if( _options->count("force-validate") ) + skip = graphene::chain::database::skip_nothing; + else + skip = graphene::chain::database::skip_transaction_signatures; } + else // no revalidate, skip most checks + skip = graphene::chain::database::skip_witness_signature | + graphene::chain::database::skip_block_size_check | + graphene::chain::database::skip_merkle_check | + graphene::chain::database::skip_transaction_signatures | + graphene::chain::database::skip_transaction_dupe_check | + graphene::chain::database::skip_tapos_check | + graphene::chain::database::skip_witness_schedule_check; + + graphene::chain::detail::with_skip_flags( *_chain_db, skip, [this,&initial_state] () { + _chain_db->open( _data_dir / "blockchain", initial_state, GRAPHENE_CURRENT_DB_VERSION ); + }); + } + catch( const fc::exception& e ) + { + elog( "Caught exception ${e} in open(), you might want to force a replay", ("e", e.to_detail_string()) ); + throw; + } - /** - * Assuming all data elements are ordered in some way, this method should - * return up to limit ids that occur *after* the last ID in synopsis that - * we recognize. - * - * On return, remaining_item_count will be set to the number of items - * in our blockchain after the last item returned in the result, - * or 0 if the result contains the last item in the blockchain - */ - virtual std::vector get_block_ids(const std::vector& blockchain_synopsis, - uint32_t& remaining_item_count, - uint32_t limit) override - { try { - vector result; - remaining_item_count = 0; - if( _chain_db->head_block_num() == 0 ) - return result; - - result.reserve(limit); - block_id_type last_known_block_id; - - if (blockchain_synopsis.empty() || - (blockchain_synopsis.size() == 1 && blockchain_synopsis[0] == block_id_type())) - { - // peer has sent us an empty synopsis meaning they have no blocks. - // A bug in old versions would cause them to send a synopsis containing block 000000000 - // when they had an empty blockchain, so pretend they sent the right thing here. + if( _options->count("force-validate") ) + { + ilog( "All transaction signatures will be validated" ); + _force_validate = true; + } - // do nothing, leave last_known_block_id set to zero - } - else - { - bool found_a_block_in_synopsis = false; - for (const item_hash_t& block_id_in_synopsis : boost::adaptors::reverse(blockchain_synopsis)) - if (block_id_in_synopsis == block_id_type() || - (_chain_db->is_known_block(block_id_in_synopsis) && is_included_block(block_id_in_synopsis))) - { - last_known_block_id = block_id_in_synopsis; - found_a_block_in_synopsis = true; - break; - } - if (!found_a_block_in_synopsis) - FC_THROW_EXCEPTION(graphene::net::peer_is_on_an_unreachable_fork, "Unable to provide a list of blocks starting at any of the blocks in peer's synopsis"); - } - for( uint32_t num = block_header::num_from_id(last_known_block_id); - num <= _chain_db->head_block_num() && result.size() < limit; - ++num ) - if( num > 0 ) - result.push_back(_chain_db->get_block_id_for_num(num)); + if ( _options->count("enable-subscribe-to-all") ) + _app_options.enable_subscribe_to_all = _options->at( "enable-subscribe-to-all" ).as(); + + set_api_limit(); + + if( _active_plugins.find( "market_history" ) != _active_plugins.end() ) + _app_options.has_market_history_plugin = true; + + if( _options->count("api-access") ) { + + fc::path api_access_file = _options->at("api-access").as(); + + FC_ASSERT( fc::exists(api_access_file), + "Failed to load file from ${path}", ("path", api_access_file) ); + + _apiaccess = fc::json::from_file( api_access_file ).as( 20 ); + ilog( "Using api access file from ${path}", + ("path", api_access_file) ); + } + else + { + // TODO: Remove this generous default access policy + // when the UI logs in properly + _apiaccess = api_access(); + api_access_info wild_access; + wild_access.password_hash_b64 = "*"; + wild_access.password_salt_b64 = "*"; + wild_access.allowed_apis.push_back( "database_api" ); + wild_access.allowed_apis.push_back( "network_broadcast_api" ); + wild_access.allowed_apis.push_back( "history_api" ); + wild_access.allowed_apis.push_back( "orders_api" ); + _apiaccess.permission_map["*"] = wild_access; + } - if( !result.empty() && block_header::num_from_id(result.back()) < _chain_db->head_block_num() ) - remaining_item_count = _chain_db->head_block_num() - block_header::num_from_id(result.back()); + reset_p2p_node(_data_dir); + reset_websocket_server(); + reset_websocket_tls_server(); +} FC_LOG_AND_RETHROW() } +optional< api_access_info > application_impl::get_api_access_info(const string& username)const +{ + optional< api_access_info > result; + auto it = _apiaccess.permission_map.find(username); + if( it == _apiaccess.permission_map.end() ) + { + it = _apiaccess.permission_map.find("*"); + if( it == _apiaccess.permission_map.end() ) return result; - } FC_CAPTURE_AND_RETHROW( (blockchain_synopsis)(remaining_item_count)(limit) ) } - - /** - * Given the hash of the requested data, fetch the body. - */ - virtual message get_item(const item_id& id) override - { try { - // ilog("Request for item ${id}", ("id", id)); - if( id.item_type == graphene::net::block_message_type ) - { - auto opt_block = _chain_db->fetch_block_by_id(id.item_hash); - if( !opt_block ) - elog("Couldn't find block ${id} -- corresponding ID in our chain is ${id2}", - ("id", id.item_hash)("id2", _chain_db->get_block_id_for_num(block_header::num_from_id(id.item_hash)))); - FC_ASSERT( opt_block.valid() ); - // ilog("Serving up block #${num}", ("num", opt_block->block_num())); - return block_message(std::move(*opt_block)); - } - return trx_message( _chain_db->get_recent_transaction( id.item_hash ) ); - } FC_CAPTURE_AND_RETHROW( (id) ) } + } + return it->second; +} - virtual chain_id_type get_chain_id()const override - { - return _chain_db->get_chain_id(); - } +void application_impl::set_api_access_info(const string& username, api_access_info&& permissions) +{ + _apiaccess.permission_map.insert(std::make_pair(username, std::move(permissions))); +} - /** - * Returns a synopsis of the blockchain used for syncing. This consists of a list of - * block hashes at intervals exponentially increasing towards the genesis block. - * When syncing to a peer, the peer uses this data to determine if we're on the same - * fork as they are, and if not, what blocks they need to send us to get us on their - * fork. - * - * In the over-simplified case, this is a straighforward synopsis of our current - * preferred blockchain; when we first connect up to a peer, this is what we will be sending. - * It looks like this: - * If the blockchain is empty, it will return the empty list. - * If the blockchain has one block, it will return a list containing just that block. - * If it contains more than one block: - * the first element in the list will be the hash of the highest numbered block that - * we cannot undo - * the second element will be the hash of an item at the half way point in the undoable - * segment of the blockchain - * the third will be ~3/4 of the way through the undoable segment of the block chain - * the fourth will be at ~7/8... - * &c. - * the last item in the list will be the hash of the most recent block on our preferred chain - * so if the blockchain had 26 blocks labeled a - z, the synopsis would be: - * a n u x z - * the idea being that by sending a small (<30) number of block ids, we can summarize a huge - * blockchain. The block ids are more dense near the end of the chain where because we are - * more likely to be almost in sync when we first connect, and forks are likely to be short. - * If the peer we're syncing with in our example is on a fork that started at block 'v', - * then they will reply to our synopsis with a list of all blocks starting from block 'u', - * the last block they know that we had in common. - * - * In the real code, there are several complications. - * - * First, as an optimization, we don't usually send a synopsis of the entire blockchain, we - * send a synopsis of only the segment of the blockchain that we have undo data for. If their - * fork doesn't build off of something in our undo history, we would be unable to switch, so there's - * no reason to fetch the blocks. - * - * Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think - * we are missing, they only send a chunk of a few thousand blocks at once. After we get those - * block ids, we need to request more blocks by sending another synopsis (we can't just say "send me - * the next 2000 ids" because they may have switched forks themselves and they don't track what - * they've sent us). For faster performance, we want to get a fairly long list of block ids first, - * then start downloading the blocks. - * The peer doesn't handle these follow-up block id requests any different from the initial request; - * it treats the synopsis we send as our blockchain and bases its response entirely off that. So to - * get the response we want (the next chunk of block ids following the last one they sent us, or, - * failing that, the shortest fork off of the last list of block ids they sent), we need to construct - * a synopsis as if our blockchain was made up of: - * 1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork) - * 2. the blocks we've already pushed from their fork (if there's a fork) - * 3. the block ids they've previously sent us - * Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in - * number_of_blocks_after_reference_point) so we can leave space in the synopsis for them. - * We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and - * fork database. The reference_point parameter is the last block from that peer that has been - * successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on - * the main chain. - */ - virtual std::vector get_blockchain_synopsis(const item_hash_t& reference_point, - uint32_t number_of_blocks_after_reference_point) override - { try { - std::vector synopsis; - synopsis.reserve(30); - uint32_t high_block_num; - uint32_t non_fork_high_block_num; - uint32_t low_block_num = _chain_db->last_non_undoable_block_num(); - std::vector fork_history; - - if (reference_point != item_hash_t()) - { - // the node is asking for a summary of the block chain up to a specified - // block, which may or may not be on a fork - // for now, assume it's not on a fork - if (is_included_block(reference_point)) - { - // reference_point is a block we know about and is on the main chain - uint32_t reference_point_block_num = block_header::num_from_id(reference_point); - assert(reference_point_block_num > 0); - high_block_num = reference_point_block_num; - non_fork_high_block_num = high_block_num; - - if (reference_point_block_num < low_block_num) - { - // we're on the same fork (at least as far as reference_point) but we've passed - // reference point and could no longer undo that far if we diverged after that - // block. This should probably only happen due to a race condition where - // the network thread calls this function, and then immediately pushes a bunch of blocks, - // then the main thread finally processes this function. - // with the current framework, there's not much we can do to tell the network - // thread what our current head block is, so we'll just pretend that - // our head is actually the reference point. - // this *may* enable us to fetch blocks that we're unable to push, but that should - // be a rare case (and correctly handled) - low_block_num = reference_point_block_num; - } - } - else - { - // block is a block we know about, but it is on a fork - try - { - fork_history = _chain_db->get_block_ids_on_fork(reference_point); - // returns a vector where the last element is the common ancestor with the preferred chain, - // and the first element is the reference point you passed in - assert(fork_history.size() >= 2); - - if( fork_history.front() != reference_point ) - { - edump( (fork_history)(reference_point) ); - assert(fork_history.front() == reference_point); - } - block_id_type last_non_fork_block = fork_history.back(); - fork_history.pop_back(); // remove the common ancestor - boost::reverse(fork_history); - - if (last_non_fork_block == block_id_type()) // if the fork goes all the way back to genesis (does graphene's fork db allow this?) - non_fork_high_block_num = 0; - else - non_fork_high_block_num = block_header::num_from_id(last_non_fork_block); - - high_block_num = non_fork_high_block_num + fork_history.size(); - assert(high_block_num == block_header::num_from_id(fork_history.back())); - } - catch (const fc::exception& e) - { - // unable to get fork history for some reason. maybe not linked? - // we can't return a synopsis of its chain - elog("Unable to construct a blockchain synopsis for reference hash ${hash}: ${exception}", ("hash", reference_point)("exception", e)); - throw; - } - if (non_fork_high_block_num < low_block_num) - { - wlog("Unable to generate a usable synopsis because the peer we're generating it for forked too long ago " - "(our chains diverge after block #${non_fork_high_block_num} but only undoable to block #${low_block_num})", - ("low_block_num", low_block_num) - ("non_fork_high_block_num", non_fork_high_block_num)); - FC_THROW_EXCEPTION(graphene::net::block_older_than_undo_history, "Peer is are on a fork I'm unable to switch to"); - } - } - } - else - { - // no reference point specified, summarize the whole block chain - high_block_num = _chain_db->head_block_num(); - non_fork_high_block_num = high_block_num; - if (high_block_num == 0) - return synopsis; // we have no blocks - } +/** + * If delegate has the item, the network has no need to fetch it. + */ +bool application_impl::has_item(const net::item_id& id) +{ + try + { + if( id.item_type == graphene::net::block_message_type ) + return _chain_db->is_known_block(id.item_hash); + else + return _chain_db->is_known_transaction(id.item_hash); + } + FC_CAPTURE_AND_RETHROW( (id) ) +} - // at this point: - // low_block_num is the block before the first block we can undo, - // non_fork_high_block_num is the block before the fork (if the peer is on a fork, or otherwise it is the same as high_block_num) - // high_block_num is the block number of the reference block, or the end of the chain if no reference provided +/** + * @brief allows the application to validate an item prior to broadcasting to peers. + * + * @param sync_mode true if the message was fetched through the sync process, false during normal operation + * @returns true if this message caused the blockchain to switch forks, false if it did not + * + * @throws exception if error validating the item, otherwise the item is safe to broadcast on. + */ +bool application_impl::handle_block(const graphene::net::block_message& blk_msg, bool sync_mode, + std::vector& contained_transaction_message_ids) +{ try { - // true_high_block_num is the ending block number after the network code appends any item ids it - // knows about that we don't - uint32_t true_high_block_num = high_block_num + number_of_blocks_after_reference_point; - do - { - // for each block in the synopsis, figure out where to pull the block id from. - // if it's <= non_fork_high_block_num, we grab it from the main blockchain; - // if it's not, we pull it from the fork history - if (low_block_num <= non_fork_high_block_num) - synopsis.push_back(_chain_db->get_block_id_for_num(low_block_num)); - else - synopsis.push_back(fork_history[low_block_num - non_fork_high_block_num - 1]); - low_block_num += (true_high_block_num - low_block_num + 2) / 2; - } - while (low_block_num <= high_block_num); - - idump((synopsis)); - return synopsis; - } FC_CAPTURE_AND_RETHROW() } - - /** - * Call this after the call to handle_message succeeds. - * - * @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call - * @param item_count the number of items known to the node that haven't been sent to handle_item() yet. - * After `item_count` more calls to handle_item(), the node will be in sync - */ - virtual void sync_status(uint32_t item_type, uint32_t item_count) override - { - // any status reports to GUI go here - } + auto latency = fc::time_point::now() - blk_msg.block.timestamp; + if (!sync_mode || blk_msg.block.block_num() % 10000 == 0) + { + const auto& witness = blk_msg.block.witness(*_chain_db); + const auto& witness_account = witness.witness_account(*_chain_db); + auto last_irr = _chain_db->get_dynamic_global_properties().last_irreversible_block_num; + ilog("Got block: #${n} ${bid} time: ${t} transaction(s): ${x} latency: ${l} ms from: ${w} irreversible: ${i} (-${d})", + ("t",blk_msg.block.timestamp) + ("n", blk_msg.block.block_num()) + ("bid", blk_msg.block.id()) + ("x", blk_msg.block.transactions.size()) + ("l", (latency.count()/1000)) + ("w",witness_account.name) + ("i",last_irr)("d",blk_msg.block.block_num()-last_irr) ); + } + FC_ASSERT( (latency.count()/1000) > -5000, "Rejecting block with timestamp in the future" ); - /** - * Call any time the number of connected peers changes. - */ - virtual void connection_count_changed(uint32_t c) override + try { + const uint32_t skip = (_is_block_producer | _force_validate) ? + database::skip_nothing : database::skip_transaction_signatures; + bool result = valve.do_serial( [this,&blk_msg,skip] () { + _chain_db->precompute_parallel( blk_msg.block, skip ).wait(); + }, [this,&blk_msg,skip] () { + // TODO: in the case where this block is valid but on a fork that's too old for us to switch to, + // you can help the network code out by throwing a block_older_than_undo_history exception. + // when the net code sees that, it will stop trying to push blocks from that chain, but + // leave that peer connected so that they can get sync blocks from us + return _chain_db->push_block( blk_msg.block, skip ); + }); + + // the block was accepted, so we now know all of the transactions contained in the block + if (!sync_mode) { - // any status reports to GUI go here + // if we're not in sync mode, there's a chance we will be seeing some transactions + // included in blocks before we see the free-floating transaction itself. If that + // happens, there's no reason to fetch the transactions, so construct a list of the + // transaction message ids we no longer need. + // during sync, it is unlikely that we'll see any old + contained_transaction_message_ids.reserve( contained_transaction_message_ids.size() + + blk_msg.block.transactions.size() ); + for (const processed_transaction& transaction : blk_msg.block.transactions) + { + graphene::net::trx_message transaction_message(transaction); + contained_transaction_message_ids.emplace_back(graphene::net::message(transaction_message).id()); + } } - virtual uint32_t get_block_number(const item_hash_t& block_id) override - { try { - return block_header::num_from_id(block_id); - } FC_CAPTURE_AND_RETHROW( (block_id) ) } - - /** - * Returns the time a block was produced (if block_id = 0, returns genesis time). - * If we don't know about the block, returns time_point_sec::min() - */ - virtual fc::time_point_sec get_block_time(const item_hash_t& block_id) override - { try { - auto opt_block = _chain_db->fetch_block_by_id( block_id ); - if( opt_block.valid() ) return opt_block->timestamp; - return fc::time_point_sec::min(); - } FC_CAPTURE_AND_RETHROW( (block_id) ) } - - /** returns graphene::time::now() */ - virtual fc::time_point_sec get_blockchain_now() override - { - return graphene::time::now(); - } + return result; + } catch ( const graphene::chain::unlinkable_block_exception& e ) { + // translate to a graphene::net exception + elog("Error when pushing block:\n${e}", ("e", e.to_detail_string())); + FC_THROW_EXCEPTION( graphene::net::unlinkable_block_exception, + "Error when pushing block:\n${e}", + ("e", e.to_detail_string()) ); + } catch( const fc::exception& e ) { + elog("Error when pushing block:\n${e}", ("e", e.to_detail_string())); + throw; + } - virtual item_hash_t get_head_block_id() const override - { - return _chain_db->head_block_id(); - } + if( !_is_finished_syncing && !sync_mode ) + { + _is_finished_syncing = true; + _self->syncing_finished(); + } +} FC_CAPTURE_AND_RETHROW( (blk_msg)(sync_mode) ) return false; } + +void application_impl::handle_transaction(const graphene::net::trx_message& transaction_message) +{ try { + static fc::time_point last_call; + static int trx_count = 0; + ++trx_count; + auto now = fc::time_point::now(); + if( now - last_call > fc::seconds(1) ) { + ilog("Got ${c} transactions from network", ("c",trx_count) ); + last_call = now; + trx_count = 0; + } - virtual uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override - { - return 0; // there are no forks in graphene - } + _chain_db->precompute_parallel( transaction_message.trx ).wait(); + _chain_db->push_transaction( transaction_message.trx ); +} FC_CAPTURE_AND_RETHROW( (transaction_message) ) } + +void application_impl::handle_message(const message& message_to_process) +{ + // not a transaction, not a block + FC_THROW( "Invalid Message Type" ); +} + +bool application_impl::is_included_block(const block_id_type& block_id) +{ + uint32_t block_num = block_header::num_from_id(block_id); + block_id_type block_id_in_preferred_chain = _chain_db->get_block_id_for_num(block_num); + return block_id == block_id_in_preferred_chain; +} + +/** + * Assuming all data elements are ordered in some way, this method should + * return up to limit ids that occur *after* the last ID in synopsis that + * we recognize. + * + * On return, remaining_item_count will be set to the number of items + * in our blockchain after the last item returned in the result, + * or 0 if the result contains the last item in the blockchain + */ +std::vector application_impl::get_block_ids(const std::vector& blockchain_synopsis, + uint32_t& remaining_item_count, + uint32_t limit) +{ try { + vector result; + remaining_item_count = 0; + if( _chain_db->head_block_num() == 0 ) + return result; + + result.reserve(limit); + block_id_type last_known_block_id; + + if (blockchain_synopsis.empty() || + (blockchain_synopsis.size() == 1 && blockchain_synopsis[0] == block_id_type())) + { + // peer has sent us an empty synopsis meaning they have no blocks. + // A bug in old versions would cause them to send a synopsis containing block 000000000 + // when they had an empty blockchain, so pretend they sent the right thing here. + + // do nothing, leave last_known_block_id set to zero + } + else + { + bool found_a_block_in_synopsis = false; + for (const item_hash_t& block_id_in_synopsis : boost::adaptors::reverse(blockchain_synopsis)) + if (block_id_in_synopsis == block_id_type() || + (_chain_db->is_known_block(block_id_in_synopsis) && is_included_block(block_id_in_synopsis))) + { + last_known_block_id = block_id_in_synopsis; + found_a_block_in_synopsis = true; + break; + } + if (!found_a_block_in_synopsis) + FC_THROW_EXCEPTION( graphene::net::peer_is_on_an_unreachable_fork, + "Unable to provide a list of blocks starting at any of the blocks in peer's synopsis" ); + } + for( uint32_t num = block_header::num_from_id(last_known_block_id); + num <= _chain_db->head_block_num() && result.size() < limit; + ++num ) + if( num > 0 ) + result.push_back(_chain_db->get_block_id_for_num(num)); + + if( !result.empty() && block_header::num_from_id(result.back()) < _chain_db->head_block_num() ) + remaining_item_count = _chain_db->head_block_num() - block_header::num_from_id(result.back()); - virtual void error_encountered(const std::string& message, const fc::oexception& error) override + return result; +} FC_CAPTURE_AND_RETHROW( (blockchain_synopsis)(remaining_item_count)(limit) ) } + +/** + * Given the hash of the requested data, fetch the body. + */ +message application_impl::get_item(const item_id& id) +{ try { + // ilog("Request for item ${id}", ("id", id)); + if( id.item_type == graphene::net::block_message_type ) + { + auto opt_block = _chain_db->fetch_block_by_id(id.item_hash); + if( !opt_block ) + elog("Couldn't find block ${id} -- corresponding ID in our chain is ${id2}", + ("id", id.item_hash)("id2", _chain_db->get_block_id_for_num(block_header::num_from_id(id.item_hash)))); + FC_ASSERT( opt_block.valid() ); + // ilog("Serving up block #${num}", ("num", opt_block->block_num())); + return block_message(std::move(*opt_block)); + } + return trx_message( _chain_db->get_recent_transaction( id.item_hash ) ); +} FC_CAPTURE_AND_RETHROW( (id) ) } + +chain_id_type application_impl::get_chain_id() const +{ + return _chain_db->get_chain_id(); +} + +/** + * Returns a synopsis of the blockchain used for syncing. This consists of a list of + * block hashes at intervals exponentially increasing towards the genesis block. + * When syncing to a peer, the peer uses this data to determine if we're on the same + * fork as they are, and if not, what blocks they need to send us to get us on their + * fork. + * + * In the over-simplified case, this is a straighforward synopsis of our current + * preferred blockchain; when we first connect up to a peer, this is what we will be sending. + * It looks like this: + * If the blockchain is empty, it will return the empty list. + * If the blockchain has one block, it will return a list containing just that block. + * If it contains more than one block: + * the first element in the list will be the hash of the highest numbered block that + * we cannot undo + * the second element will be the hash of an item at the half way point in the undoable + * segment of the blockchain + * the third will be ~3/4 of the way through the undoable segment of the block chain + * the fourth will be at ~7/8... + * &c. + * the last item in the list will be the hash of the most recent block on our preferred chain + * so if the blockchain had 26 blocks labeled a - z, the synopsis would be: + * a n u x z + * the idea being that by sending a small (<30) number of block ids, we can summarize a huge + * blockchain. The block ids are more dense near the end of the chain where because we are + * more likely to be almost in sync when we first connect, and forks are likely to be short. + * If the peer we're syncing with in our example is on a fork that started at block 'v', + * then they will reply to our synopsis with a list of all blocks starting from block 'u', + * the last block they know that we had in common. + * + * In the real code, there are several complications. + * + * First, as an optimization, we don't usually send a synopsis of the entire blockchain, we + * send a synopsis of only the segment of the blockchain that we have undo data for. If their + * fork doesn't build off of something in our undo history, we would be unable to switch, so there's + * no reason to fetch the blocks. + * + * Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think + * we are missing, they only send a chunk of a few thousand blocks at once. After we get those + * block ids, we need to request more blocks by sending another synopsis (we can't just say "send me + * the next 2000 ids" because they may have switched forks themselves and they don't track what + * they've sent us). For faster performance, we want to get a fairly long list of block ids first, + * then start downloading the blocks. + * The peer doesn't handle these follow-up block id requests any different from the initial request; + * it treats the synopsis we send as our blockchain and bases its response entirely off that. So to + * get the response we want (the next chunk of block ids following the last one they sent us, or, + * failing that, the shortest fork off of the last list of block ids they sent), we need to construct + * a synopsis as if our blockchain was made up of: + * 1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork) + * 2. the blocks we've already pushed from their fork (if there's a fork) + * 3. the block ids they've previously sent us + * Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in + * number_of_blocks_after_reference_point) so we can leave space in the synopsis for them. + * We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and + * fork database. The reference_point parameter is the last block from that peer that has been + * successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on + * the main chain. + */ +std::vector application_impl::get_blockchain_synopsis(const item_hash_t& reference_point, + uint32_t number_of_blocks_after_reference_point) +{ try { + std::vector synopsis; + synopsis.reserve(30); + uint32_t high_block_num; + uint32_t non_fork_high_block_num; + uint32_t low_block_num = _chain_db->last_non_undoable_block_num(); + std::vector fork_history; + + if (reference_point != item_hash_t()) + { + // the node is asking for a summary of the block chain up to a specified + // block, which may or may not be on a fork + // for now, assume it's not on a fork + if (is_included_block(reference_point)) { - // notify GUI or something cool + // reference_point is a block we know about and is on the main chain + uint32_t reference_point_block_num = block_header::num_from_id(reference_point); + assert(reference_point_block_num > 0); + high_block_num = reference_point_block_num; + non_fork_high_block_num = high_block_num; + + if (reference_point_block_num < low_block_num) + { + // we're on the same fork (at least as far as reference_point) but we've passed + // reference point and could no longer undo that far if we diverged after that + // block. This should probably only happen due to a race condition where + // the network thread calls this function, and then immediately pushes a bunch of blocks, + // then the main thread finally processes this function. + // with the current framework, there's not much we can do to tell the network + // thread what our current head block is, so we'll just pretend that + // our head is actually the reference point. + // this *may* enable us to fetch blocks that we're unable to push, but that should + // be a rare case (and correctly handled) + low_block_num = reference_point_block_num; + } } - - uint8_t get_current_block_interval_in_seconds() const override + else { - return _chain_db->get_global_properties().parameters.block_interval; + // block is a block we know about, but it is on a fork + try + { + fork_history = _chain_db->get_block_ids_on_fork(reference_point); + // returns a vector where the last element is the common ancestor with the preferred chain, + // and the first element is the reference point you passed in + assert(fork_history.size() >= 2); + + if( fork_history.front() != reference_point ) + { + edump( (fork_history)(reference_point) ); + assert(fork_history.front() == reference_point); + } + block_id_type last_non_fork_block = fork_history.back(); + fork_history.pop_back(); // remove the common ancestor + boost::reverse(fork_history); + + if (last_non_fork_block == block_id_type()) // if the fork goes all the way back to genesis (does graphene's fork db allow this?) + non_fork_high_block_num = 0; + else + non_fork_high_block_num = block_header::num_from_id(last_non_fork_block); + + high_block_num = non_fork_high_block_num + fork_history.size(); + assert(high_block_num == block_header::num_from_id(fork_history.back())); + } + catch (const fc::exception& e) + { + // unable to get fork history for some reason. maybe not linked? + // we can't return a synopsis of its chain + elog( "Unable to construct a blockchain synopsis for reference hash ${hash}: ${exception}", + ("hash", reference_point)("exception", e) ); + throw; + } + if (non_fork_high_block_num < low_block_num) + { + wlog("Unable to generate a usable synopsis because the peer we're generating it for forked too long ago " + "(our chains diverge after block #${non_fork_high_block_num} but only undoable to block #${low_block_num})", + ("low_block_num", low_block_num) + ("non_fork_high_block_num", non_fork_high_block_num)); + FC_THROW_EXCEPTION(graphene::net::block_older_than_undo_history, "Peer is are on a fork I'm unable to switch to"); + } } + } + else + { + // no reference point specified, summarize the whole block chain + high_block_num = _chain_db->head_block_num(); + non_fork_high_block_num = high_block_num; + if (high_block_num == 0) + return synopsis; // we have no blocks + } + + if( low_block_num == 0) + low_block_num = 1; + + // at this point: + // low_block_num is the block before the first block we can undo, + // non_fork_high_block_num is the block before the fork (if the peer is on a fork, or otherwise it is the same as high_block_num) + // high_block_num is the block number of the reference block, or the end of the chain if no reference provided + + // true_high_block_num is the ending block number after the network code appends any item ids it + // knows about that we don't + uint32_t true_high_block_num = high_block_num + number_of_blocks_after_reference_point; + do + { + // for each block in the synopsis, figure out where to pull the block id from. + // if it's <= non_fork_high_block_num, we grab it from the main blockchain; + // if it's not, we pull it from the fork history + if (low_block_num <= non_fork_high_block_num) + synopsis.push_back(_chain_db->get_block_id_for_num(low_block_num)); + else + synopsis.push_back(fork_history[low_block_num - non_fork_high_block_num - 1]); + low_block_num += (true_high_block_num - low_block_num + 2) / 2; + } + while (low_block_num <= high_block_num); + + //idump((synopsis)); + return synopsis; +} FC_CAPTURE_AND_RETHROW() } + +/** + * Call this after the call to handle_message succeeds. + * + * @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call + * @param item_count the number of items known to the node that haven't been sent to handle_item() yet. + * After `item_count` more calls to handle_item(), the node will be in sync + */ +void application_impl::sync_status(uint32_t item_type, uint32_t item_count) +{ + // any status reports to GUI go here +} - application* _self; +/** + * Call any time the number of connected peers changes. + */ +void application_impl::connection_count_changed(uint32_t c) +{ + // any status reports to GUI go here +} - fc::path _data_dir; - const bpo::variables_map* _options = nullptr; - api_access _apiaccess; +uint32_t application_impl::get_block_number(const item_hash_t& block_id) +{ try { + return block_header::num_from_id(block_id); +} FC_CAPTURE_AND_RETHROW( (block_id) ) } - std::shared_ptr _chain_db; - std::shared_ptr _p2p_network; - std::shared_ptr _websocket_server; - std::shared_ptr _websocket_tls_server; +/** + * Returns the time a block was produced (if block_id = 0, returns genesis time). + * If we don't know about the block, returns time_point_sec::min() + */ +fc::time_point_sec application_impl::get_block_time(const item_hash_t& block_id) +{ try { + auto opt_block = _chain_db->fetch_block_by_id( block_id ); + if( opt_block.valid() ) return opt_block->timestamp; + return fc::time_point_sec::min(); +} FC_CAPTURE_AND_RETHROW( (block_id) ) } + +item_hash_t application_impl::get_head_block_id() const +{ + return _chain_db->head_block_id(); +} - std::map> _plugins; +uint32_t application_impl::estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const +{ + return 0; // there are no forks in graphene +} - bool _is_finished_syncing = false; - }; +void application_impl::error_encountered(const std::string& message, const fc::oexception& error) +{ + // notify GUI or something cool +} +uint8_t application_impl::get_current_block_interval_in_seconds() const +{ + return _chain_db->get_global_properties().parameters.block_interval; } + + +} } } // namespace graphene namespace app namespace detail + +namespace graphene { namespace app { + application::application() : my(new detail::application_impl(this)) {} @@ -899,28 +982,50 @@ void application::set_program_options(boost::program_options::options_descriptio { configuration_file_options.add_options() ("p2p-endpoint", bpo::value(), "Endpoint for P2P node to listen on") - ("seed-node,s", bpo::value>()->composing(), "P2P nodes to connect to on startup (may specify multiple times)") - ("checkpoint,c", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") - ("rpc-endpoint", bpo::value()->implicit_value("127.0.0.1:8090"), "Endpoint for websocket RPC to listen on") - ("rpc-tls-endpoint", bpo::value()->implicit_value("127.0.0.1:8089"), "Endpoint for TLS websocket RPC to listen on") - ("enable-permessage-deflate", "Enable support for per-message deflate compression in the websocket servers " - "(--rpc-endpoint and --rpc-tls-endpoint), disabled by default") + ("seed-node,s", bpo::value>()->composing(), + "P2P nodes to connect to on startup (may specify multiple times)") + ("seed-nodes", bpo::value()->composing(), + "JSON array of P2P nodes to connect to on startup") + ("checkpoint,c", bpo::value>()->composing(), + "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") + ("rpc-endpoint", bpo::value()->implicit_value("127.0.0.1:8090"), + "Endpoint for websocket RPC to listen on") + ("rpc-tls-endpoint", bpo::value()->implicit_value("127.0.0.1:8089"), + "Endpoint for TLS websocket RPC to listen on") ("server-pem,p", bpo::value()->implicit_value("server.pem"), "The TLS certificate file for this server") ("server-pem-password,P", bpo::value()->implicit_value(""), "Password for this certificate") ("genesis-json", bpo::value(), "File to read Genesis State from") ("dbg-init-key", bpo::value(), "Block signing key to use for init witnesses, overrides genesis file") ("api-access", bpo::value(), "JSON file specifying API permissions") + ("io-threads", bpo::value()->implicit_value(0), "Number of IO threads, default to 0 for auto-configuration") + ("enable-subscribe-to-all", bpo::value()->implicit_value(true), + "Whether allow API clients to subscribe to universal object creation and removal events") + ("enable-standby-votes-tracking", bpo::value()->implicit_value(true), + "Whether to enable tracking of votes of standby witnesses and committee members. " + "Set it to true to provide accurate data to API clients, set to false for slightly better performance.") + ("api-limit-get-account-history-operations",boost::program_options::value()->default_value(100), + "For history_api::get_account_history_operations to set its default limit value as 100") + ("api-limit-get-account-history",boost::program_options::value()->default_value(100), + "For history_api::get_account_history to set its default limit value as 100") + ("api-limit-get-grouped-limit-orders",boost::program_options::value()->default_value(101), + "For orders_api::get_grouped_limit_orders to set its default limit value as 101") + ("api-limit-get-relative-account-history",boost::program_options::value()->default_value(100), + "For history_api::get_relative_account_history to set its default limit value as 100") + ("api-limit-get-account-history-by-operations",boost::program_options::value()->default_value(100), + "For history_api::get_account_history_by_operations to set its default limit value as 100") + ("api-limit-get-asset-holders",boost::program_options::value()->default_value(100), + "For asset_api::get_asset_holders to set its default limit value as 100") + ("api-limit-get-key-references",boost::program_options::value()->default_value(100), + "For database_api_impl::get_key_references to set its default limit value as 100") ; command_line_options.add(configuration_file_options); command_line_options.add_options() - ("create-genesis-json", bpo::value(), - "Path to create a Genesis State at. If a well-formed JSON file exists at the path, it will be parsed and any " - "missing fields in a Genesis State will be added, and any unknown fields will be removed. If no file or an " - "invalid file is found, it will be replaced with an example Genesis State.") - ("replay-blockchain", "Rebuild object graph by replaying all blocks") + ("replay-blockchain", "Rebuild object graph by replaying all blocks without validation") + ("revalidate-blockchain", "Rebuild object graph by replaying all blocks with full validation") ("resync-blockchain", "Delete all blocks and re-sync with network from scratch") - ("force-validate", "Force validation of all transactions") - ("genesis-timestamp", bpo::value(), "Replace timestamp from genesis.json with current time plus this many seconds (experts only!)") + ("force-validate", "Force validation of all transactions during normal operation") + ("genesis-timestamp", bpo::value(), + "Replace timestamp from genesis.json with current time plus this many seconds (experts only!)") ; command_line_options.add(_cli_options); configuration_file_options.add(_cfg_options); @@ -931,36 +1036,17 @@ void application::initialize(const fc::path& data_dir, const boost::program_opti my->_data_dir = data_dir; my->_options = &options; - if( options.count("create-genesis-json") ) + if ( options.count("io-threads") ) { - fc::path genesis_out = options.at("create-genesis-json").as(); - genesis_state_type genesis_state = detail::create_example_genesis(); - if( fc::exists(genesis_out) ) - { - try { - genesis_state = fc::json::from_file(genesis_out).as(); - } catch(const fc::exception& e) { - std::cerr << "Unable to parse existing genesis file:\n" << e.to_string() - << "\nWould you like to replace it? [y/N] "; - char response = std::cin.get(); - if( toupper(response) != 'Y' ) - return; - } - - std::cerr << "Updating genesis state in file " << genesis_out.generic_string() << "\n"; - } else { - std::cerr << "Creating example genesis state in file " << genesis_out.generic_string() << "\n"; - } - fc::json::save_to_file(genesis_state, genesis_out); - - std::exit(EXIT_SUCCESS); + const uint16_t num_threads = options["io-threads"].as(); + fc::asio::default_io_service_scope::set_num_threads(num_threads); } } void application::startup() { try { - my->startup(); + my->startup(); } catch ( const fc::exception& e ) { elog( "${e}", ("e",e.to_detail_string()) ); throw; @@ -970,9 +1056,21 @@ void application::startup() } } +void application::set_api_limit() +{ + try { + my->set_api_limit(); + } catch ( const fc::exception& e ) { + elog( "${e}", ("e",e.to_detail_string()) ); + throw; + } catch ( ... ) { + elog( "unexpected exception" ); + throw; + } +} std::shared_ptr application::get_plugin(const string& name) const { - return my->_plugins[name]; + return my->_active_plugins[name]; } net::node_ptr application::p2p_node() @@ -1005,14 +1103,21 @@ bool application::is_finished_syncing() const return my->_is_finished_syncing; } -void graphene::app::application::add_plugin(const string& name, std::shared_ptr p) +void graphene::app::application::enable_plugin(const string& name) +{ + FC_ASSERT(my->_available_plugins[name], "Unknown plugin '" + name + "'"); + my->_active_plugins[name] = my->_available_plugins[name]; + my->_active_plugins[name]->plugin_set_app(this); +} + +void graphene::app::application::add_available_plugin(std::shared_ptr p) { - my->_plugins[name] = p; + my->_available_plugins[p->plugin_name()] = p; } void application::shutdown_plugins() { - for( auto& entry : my->_plugins ) + for( auto& entry : my->_active_plugins ) entry.second->plugin_shutdown(); return; } @@ -1021,22 +1126,33 @@ void application::shutdown() if( my->_p2p_network ) my->_p2p_network->close(); if( my->_chain_db ) + { my->_chain_db->close(); + my->_chain_db = nullptr; + } } void application::initialize_plugins( const boost::program_options::variables_map& options ) { - for( auto& entry : my->_plugins ) + for( auto& entry : my->_active_plugins ) entry.second->plugin_initialize( options ); return; } void application::startup_plugins() { - for( auto& entry : my->_plugins ) + for( auto& entry : my->_active_plugins ) + { entry.second->plugin_startup(); + ilog( "Plugin ${name} started", ( "name", entry.second->plugin_name() ) ); + } return; } +const application_options& application::get_options() +{ + return my->_app_options; +} + // namespace detail } } diff --git a/libraries/app/application_impl.hxx b/libraries/app/application_impl.hxx new file mode 100644 index 0000000000..2c33a46654 --- /dev/null +++ b/libraries/app/application_impl.hxx @@ -0,0 +1,204 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +namespace graphene { namespace app { namespace detail { + + +class application_impl : public net::node_delegate + { + public: + fc::optional _lock_file; + bool _is_block_producer = false; + bool _force_validate = false; + application_options _app_options; + + void reset_p2p_node(const fc::path& data_dir); + + std::vector resolve_string_to_ip_endpoints(const std::string& endpoint_string); + + void new_connection( const fc::http::websocket_connection_ptr& c ); + + void reset_websocket_server(); + + void reset_websocket_tls_server(); + + explicit application_impl(application* self) + : _self(self), + _chain_db(std::make_shared()) + { + } + + virtual ~application_impl() + { + } + + void set_dbg_init_key( graphene::chain::genesis_state_type& genesis, const std::string& init_key ); + void set_api_limit(); + + void startup(); + + fc::optional< api_access_info > get_api_access_info(const string& username)const; + + void set_api_access_info(const string& username, api_access_info&& permissions); + + /** + * If delegate has the item, the network has no need to fetch it. + */ + virtual bool has_item(const net::item_id& id) override; + + /** + * @brief allows the application to validate an item prior to broadcasting to peers. + * + * @param sync_mode true if the message was fetched through the sync process, false during normal operation + * @returns true if this message caused the blockchain to switch forks, false if it did not + * + * @throws exception if error validating the item, otherwise the item is safe to broadcast on. + */ + virtual bool handle_block(const graphene::net::block_message& blk_msg, bool sync_mode, + std::vector& contained_transaction_message_ids) override; + + virtual void handle_transaction(const graphene::net::trx_message& transaction_message) override; + + void handle_message(const graphene::net::message& message_to_process) override; + + bool is_included_block(const graphene::chain::block_id_type& block_id); + + /** + * Assuming all data elements are ordered in some way, this method should + * return up to limit ids that occur *after* the last ID in synopsis that + * we recognize. + * + * On return, remaining_item_count will be set to the number of items + * in our blockchain after the last item returned in the result, + * or 0 if the result contains the last item in the blockchain + */ + virtual std::vector get_block_ids(const std::vector& blockchain_synopsis, + uint32_t& remaining_item_count, + uint32_t limit) override; + + /** + * Given the hash of the requested data, fetch the body. + */ + virtual graphene::net::message get_item(const graphene::net::item_id& id) override; + + virtual graphene::chain::chain_id_type get_chain_id()const override; + + /** + * Returns a synopsis of the blockchain used for syncing. This consists of a list of + * block hashes at intervals exponentially increasing towards the genesis block. + * When syncing to a peer, the peer uses this data to determine if we're on the same + * fork as they are, and if not, what blocks they need to send us to get us on their + * fork. + * + * In the over-simplified case, this is a straighforward synopsis of our current + * preferred blockchain; when we first connect up to a peer, this is what we will be sending. + * It looks like this: + * If the blockchain is empty, it will return the empty list. + * If the blockchain has one block, it will return a list containing just that block. + * If it contains more than one block: + * the first element in the list will be the hash of the highest numbered block that + * we cannot undo + * the second element will be the hash of an item at the half way point in the undoable + * segment of the blockchain + * the third will be ~3/4 of the way through the undoable segment of the block chain + * the fourth will be at ~7/8... + * &c. + * the last item in the list will be the hash of the most recent block on our preferred chain + * so if the blockchain had 26 blocks labeled a - z, the synopsis would be: + * a n u x z + * the idea being that by sending a small (<30) number of block ids, we can summarize a huge + * blockchain. The block ids are more dense near the end of the chain where because we are + * more likely to be almost in sync when we first connect, and forks are likely to be short. + * If the peer we're syncing with in our example is on a fork that started at block 'v', + * then they will reply to our synopsis with a list of all blocks starting from block 'u', + * the last block they know that we had in common. + * + * In the real code, there are several complications. + * + * First, as an optimization, we don't usually send a synopsis of the entire blockchain, we + * send a synopsis of only the segment of the blockchain that we have undo data for. If their + * fork doesn't build off of something in our undo history, we would be unable to switch, so there's + * no reason to fetch the blocks. + * + * Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think + * we are missing, they only send a chunk of a few thousand blocks at once. After we get those + * block ids, we need to request more blocks by sending another synopsis (we can't just say "send me + * the next 2000 ids" because they may have switched forks themselves and they don't track what + * they've sent us). For faster performance, we want to get a fairly long list of block ids first, + * then start downloading the blocks. + * The peer doesn't handle these follow-up block id requests any different from the initial request; + * it treats the synopsis we send as our blockchain and bases its response entirely off that. So to + * get the response we want (the next chunk of block ids following the last one they sent us, or, + * failing that, the shortest fork off of the last list of block ids they sent), we need to construct + * a synopsis as if our blockchain was made up of: + * 1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork) + * 2. the blocks we've already pushed from their fork (if there's a fork) + * 3. the block ids they've previously sent us + * Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in + * number_of_blocks_after_reference_point) so we can leave space in the synopsis for them. + * We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and + * fork database. The reference_point parameter is the last block from that peer that has been + * successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on + * the main chain. + */ + virtual std::vector get_blockchain_synopsis(const graphene::net::item_hash_t& reference_point, + uint32_t number_of_blocks_after_reference_point) override; + + /** + * Call this after the call to handle_message succeeds. + * + * @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call + * @param item_count the number of items known to the node that haven't been sent to handle_item() yet. + * After `item_count` more calls to handle_item(), the node will be in sync + */ + virtual void sync_status(uint32_t item_type, uint32_t item_count) override; + + /** + * Call any time the number of connected peers changes. + */ + virtual void connection_count_changed(uint32_t c) override; + + virtual uint32_t get_block_number(const graphene::net::item_hash_t& block_id) override; + + /** + * Returns the time a block was produced (if block_id = 0, returns genesis time). + * If we don't know about the block, returns time_point_sec::min() + */ + virtual fc::time_point_sec get_block_time(const graphene::net::item_hash_t& block_id) override; + + virtual graphene::net::item_hash_t get_head_block_id() const override; + + virtual uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override; + + virtual void error_encountered(const std::string& message, const fc::oexception& error) override; + + uint8_t get_current_block_interval_in_seconds() const override; + + application* _self; + + fc::path _data_dir; + const boost::program_options::variables_map* _options = nullptr; + api_access _apiaccess; + + std::shared_ptr _chain_db; + std::shared_ptr _p2p_network; + std::shared_ptr _websocket_server; + std::shared_ptr _websocket_tls_server; + + std::map> _active_plugins; + std::map> _available_plugins; + + bool _is_finished_syncing = false; + private: + fc::serial_valve valve; + }; + +}}} // namespace graphene namespace app namespace detail diff --git a/libraries/app/config_util.cpp b/libraries/app/config_util.cpp new file mode 100644 index 0000000000..f06291b788 --- /dev/null +++ b/libraries/app/config_util.cpp @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2018 Lubos Ilcik, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace bpo = boost::program_options; + +class deduplicator +{ +public: + deduplicator() : modifier(nullptr) {} + + deduplicator(const boost::shared_ptr (*mod_fn)(const boost::shared_ptr&)) + : modifier(mod_fn) {} + + const boost::shared_ptr next(const boost::shared_ptr& o) + { + const std::string name = o->long_name(); + if( seen.find( name ) != seen.end() ) + return nullptr; + seen.insert(name); + return modifier ? modifier(o) : o; + } + +private: + boost::container::flat_set seen; + const boost::shared_ptr (*modifier)(const boost::shared_ptr&); +}; + +// Currently, you can only specify the filenames and logging levels, which +// are all most users would want to change. At a later time, options can +// be added to control rotation intervals, compression, and other seldom- +// used features +static void write_default_logging_config_to_stream(std::ostream& out) +{ + out << "# declare an appender named \"stderr\" that writes messages to the console\n" + "[log.console_appender.stderr]\n" + "stream=std_error\n\n" + "# declare an appender named \"default\" that writes messages to default.log\n" + "[log.file_appender.default]\n" + "# filename can be absolute or relative to this config file\n" + "filename=logs/default/default.log\n" + "# Rotate log every ? minutes, if leave out default to 60\n" + "rotation_interval=60\n" + "# how long will logs be kept (in days), if leave out default to 1\n" + "rotation_limit=7\n\n" + "# declare an appender named \"p2p\" that writes messages to p2p.log\n" + "[log.file_appender.p2p]\n" + "# filename can be absolute or relative to this config file\n" + "filename=logs/p2p/p2p.log\n" + "# Rotate log every ? minutes, if leave out default to 60\n" + "rotation_interval=60\n" + "# how long will logs be kept (in days), if leave out default to 1\n" + "rotation_limit=7\n\n" + "# declare an appender named \"rpc\" that writes messages to rpc.log\n" + "[log.file_appender.rpc]\n" + "# filename can be absolute or relative to this config file\n" + "filename=logs/rpc/rpc.log\n" + "# Rotate log every ? minutes, if leave out default to 60\n" + "rotation_interval=60\n" + "# how long will logs be kept (in days), if leave out default to 1\n" + "rotation_limit=7\n\n" + "# route any messages logged to the default logger to the \"stderr\" appender and\n" + "# \"default\" appender we declared above, if they are info level or higher\n" + "[logger.default]\n" + "level=info\n" + "appenders=stderr,default\n\n" + "# route messages sent to the \"p2p\" logger to the \"p2p\" appender declared above\n" + "[logger.p2p]\n" + "level=warn\n" + "appenders=p2p\n\n" + "# route messages sent to the \"rpc\" logger to the \"rpc\" appender declared above\n" + "[logger.rpc]\n" + "level=error\n" + "appenders=rpc\n\n"; +} + +// logging config is too complicated to be parsed by boost::program_options, +// so we do it by hand +static fc::optional load_logging_config_from_ini_file(const fc::path& config_ini_filename) +{ + try + { + fc::logging_config logging_config; + bool found_logging_config = false; + + boost::property_tree::ptree config_ini_tree; + boost::property_tree::ini_parser::read_ini(config_ini_filename.preferred_string().c_str(), config_ini_tree); + for (const auto& section : config_ini_tree) + { + const std::string& section_name = section.first; + const boost::property_tree::ptree& section_tree = section.second; + + const std::string console_appender_section_prefix = "log.console_appender."; + const std::string file_appender_section_prefix = "log.file_appender."; + const std::string logger_section_prefix = "logger."; + + if (boost::starts_with(section_name, console_appender_section_prefix)) + { + std::string console_appender_name = section_name.substr(console_appender_section_prefix.length()); + std::string stream_name = section_tree.get("stream"); + + // construct a default console appender config here + // stdout/stderr will be taken from ini file, everything else hard-coded here + fc::console_appender::config console_appender_config; + console_appender_config.level_colors.emplace_back( + fc::console_appender::level_color(fc::log_level::debug, + fc::console_appender::color::green)); + console_appender_config.level_colors.emplace_back( + fc::console_appender::level_color(fc::log_level::warn, + fc::console_appender::color::brown)); + console_appender_config.level_colors.emplace_back( + fc::console_appender::level_color(fc::log_level::error, + fc::console_appender::color::cyan)); + console_appender_config.stream = fc::variant(stream_name).as(GRAPHENE_MAX_NESTED_OBJECTS); + logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config, GRAPHENE_MAX_NESTED_OBJECTS))); + found_logging_config = true; + } + else if (boost::starts_with(section_name, file_appender_section_prefix)) + { + std::string file_appender_name = section_name.substr(file_appender_section_prefix.length()); + fc::path file_name = section_tree.get("filename"); + if (file_name.is_relative()) + file_name = fc::absolute(config_ini_filename).parent_path() / file_name; + + int interval = section_tree.get_optional("rotation_interval").get_value_or(60); + int limit = section_tree.get_optional("rotation_limit").get_value_or(1); + + // construct a default file appender config here + // filename will be taken from ini file, everything else hard-coded here + fc::file_appender::config file_appender_config; + file_appender_config.filename = file_name; + file_appender_config.flush = true; + file_appender_config.rotate = true; + file_appender_config.rotation_interval = fc::minutes(interval); + file_appender_config.rotation_limit = fc::days(limit); + logging_config.appenders.push_back(fc::appender_config(file_appender_name, "file", fc::variant(file_appender_config, GRAPHENE_MAX_NESTED_OBJECTS))); + found_logging_config = true; + } + else if (boost::starts_with(section_name, logger_section_prefix)) + { + std::string logger_name = section_name.substr(logger_section_prefix.length()); + std::string level_string = section_tree.get("level"); + std::string appenders_string = section_tree.get("appenders"); + fc::logger_config logger_config(logger_name); + logger_config.level = fc::variant(level_string).as(5); + boost::split(logger_config.appenders, appenders_string, + boost::is_any_of(" ,"), + boost::token_compress_on); + logging_config.loggers.push_back(logger_config); + found_logging_config = true; + } + } + if (found_logging_config) + return logging_config; + else + return fc::optional(); + } + FC_RETHROW_EXCEPTIONS(warn, "") +} + +static const boost::shared_ptr new_option_description( const std::string& name, const bpo::value_semantic* value, const std::string& description ) +{ + bpo::options_description helper(""); + helper.add_options()( name.c_str(), value, description.c_str() ); + return helper.options()[0]; +} + + +static void load_config_file(const fc::path& config_ini_path, const bpo::options_description& cfg_options, + bpo::variables_map& options ) +{ + deduplicator dedup; + bpo::options_description unique_options("Graphene Witness Node"); + for( const boost::shared_ptr opt : cfg_options.options() ) + { + const boost::shared_ptr od = dedup.next(opt); + if( !od ) continue; + unique_options.add( od ); + } + + // get the basic options + bpo::store(bpo::parse_config_file(config_ini_path.preferred_string().c_str(), + unique_options, true), options); +} + +static bool load_logging_config_file(const fc::path& config_ini_path) +{ + // try to get logging options from the config file. + try + { + fc::optional logging_config = load_logging_config_from_ini_file(config_ini_path); + if (logging_config) + { + fc::configure_logging(*logging_config); + return true; + } + } + catch (const fc::exception& ex) + { + wlog("Error parsing logging config from logging config file ${config}, using default config", ("config", config_ini_path.preferred_string())); + } + return false; +} + +static void create_new_config_file(const fc::path& config_ini_path, const fc::path& data_dir, + const bpo::options_description& cfg_options ) +{ + ilog("Writing new config file at ${path}", ("path", config_ini_path)); + if( !fc::exists(data_dir) ) + fc::create_directories(data_dir); + + auto modify_option_defaults = [](const boost::shared_ptr& o) -> const boost::shared_ptr { + const std::string& name = o->long_name(); + if( name == "partial-operations" ) + return new_option_description(name, bpo::value()->default_value(true), o->description() ); + if( name == "max-ops-per-account" ) + return new_option_description(name, bpo::value()->default_value(100), o->description() ); + return o; + }; + deduplicator dedup(modify_option_defaults); + std::ofstream out_cfg(config_ini_path.preferred_string()); + std::string plugin_header_surrounding( 78, '=' ); + for( const boost::shared_ptr opt : cfg_options.options() ) + { + const boost::shared_ptr od = dedup.next(opt); + if( !od ) continue; + + if( od->long_name().find("plugin-cfg-header-") == 0 ) // it's a plugin header + { + out_cfg << "\n"; + out_cfg << "# " << plugin_header_surrounding << "\n"; + out_cfg << "# " << od->description() << "\n"; + out_cfg << "# " << plugin_header_surrounding << "\n"; + out_cfg << "\n"; + continue; + } + + if( !od->description().empty() ) + out_cfg << "# " << od->description() << "\n"; + boost::any store; + if( !od->semantic()->apply_default(store) ) + out_cfg << "# " << od->long_name() << " = \n"; + else { + auto example = od->format_parameter(); + if( example.empty() ) + // This is a boolean switch + out_cfg << od->long_name() << " = " << "false\n"; + else { + // The string is formatted "arg (=)" + example.erase(0, 6); + example.erase(example.length()-1); + out_cfg << od->long_name() << " = " << example << "\n"; + } + } + out_cfg << "\n"; + } + + out_cfg << "\n" + << "# " << plugin_header_surrounding << "\n" + << "# logging options\n" + << "# " << plugin_header_surrounding << "\n" + << "#\n" + << "# Logging configuration is loaded from logging.ini by default.\n" + << "# If logging.ini exists, logging configuration added in this file will be ignored.\n"; + out_cfg.close(); +} + +static void create_logging_config_file(const fc::path& config_ini_path, const fc::path& data_dir) +{ + ilog("Writing new config file at ${path}", ("path", config_ini_path)); + if (!exists(data_dir)) + { + create_directories(data_dir); + } + + std::ofstream out_cfg(config_ini_path.preferred_string()); + write_default_logging_config_to_stream(out_cfg); + out_cfg.close(); +} + +namespace graphene { namespace app { + + void load_configuration_options(const fc::path& data_dir, const bpo::options_description& cfg_options, bpo::variables_map& options) + { + const auto config_ini_path = data_dir / "config.ini"; + const auto logging_ini_path = data_dir / "logging.ini"; + + if(!exists(config_ini_path) && fc::exists(logging_ini_path)) + { + // this is an uncommon case + create_new_config_file(config_ini_path, data_dir, cfg_options); + } + else if(!exists(config_ini_path)) + { + // create default config.ini and logging.ini + create_new_config_file(config_ini_path, data_dir, cfg_options); + create_logging_config_file(logging_ini_path, data_dir); + } + + // load witness node configuration + load_config_file(config_ini_path, cfg_options, options); + + // load logging configuration + if (fc::exists(logging_ini_path)) + { + load_logging_config_file(logging_ini_path); + } + else + { + // this is the legacy config.ini case + load_logging_config_file(config_ini_path); + } + } + +} } // graphene::app diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 734d68b2d2..7ced33d48b 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -23,12 +23,13 @@ */ #include +#include #include #include -#include #include +#include #include #include @@ -41,28 +42,29 @@ #define GET_REQUIRED_FEES_MAX_RECURSION 4 -namespace graphene { namespace app { - -class database_api_impl; +typedef std::map< std::pair, std::vector > market_queue_type; +namespace graphene { namespace app { class database_api_impl : public std::enable_shared_from_this { public: - database_api_impl( graphene::chain::database& db ); + explicit database_api_impl( graphene::chain::database& db, const application_options* app_options ); ~database_api_impl(); + // Objects fc::variants get_objects(const vector& ids)const; // Subscriptions - void set_subscribe_callback( std::function cb, bool clear_filter ); + void set_subscribe_callback( std::function cb, bool notify_remove_create ); void set_pending_transaction_callback( std::function cb ); void set_block_applied_callback( std::function cb ); - void cancel_all_subscriptions(); + void cancel_all_subscriptions(bool reset_callback, bool reset_market_subscriptions); // Blocks and transactions optional get_block_header(uint32_t block_num)const; + map> get_block_header_batch(const vector block_nums)const; optional get_block(uint32_t block_num)const; processed_transaction get_transaction( uint32_t block_num, uint32_t trx_in_block )const; @@ -75,71 +77,99 @@ class database_api_impl : public std::enable_shared_from_this // Keys vector> get_key_references( vector key )const; + bool is_public_key_registered(string public_key) const; // Accounts - vector> get_accounts(const vector& account_ids)const; + account_id_type get_account_id_from_string(const std::string& name_or_id)const; + vector> get_accounts(const vector& account_names_or_ids)const; std::map get_full_accounts( const vector& names_or_ids, bool subscribe ); optional get_account_by_name( string name )const; - vector get_account_references( account_id_type account_id )const; + vector get_account_references( const std::string account_id_or_name )const; vector> lookup_account_names(const vector& account_names)const; map lookup_accounts(const string& lower_bound_name, uint32_t limit)const; uint64_t get_account_count()const; // Balances - vector get_account_balances(account_id_type id, const flat_set& assets)const; + vector get_account_balances(const std::string& account_name_or_id, const flat_set& assets)const; vector get_named_account_balances(const std::string& name, const flat_set& assets)const; vector get_balance_objects( const vector
& addrs )const; vector get_vested_balances( const vector& objs )const; - vector get_vesting_balances( account_id_type account_id )const; + vector get_vesting_balances( const std::string account_id_or_name )const; // Assets - vector> get_assets(const vector& asset_ids)const; + asset_id_type get_asset_id_from_string(const std::string& symbol_or_id)const; + vector> get_assets(const vector& asset_symbols_or_ids)const; vector list_assets(const string& lower_bound_symbol, uint32_t limit)const; vector> lookup_asset_symbols(const vector& symbols_or_ids)const; + uint64_t get_asset_count()const; // Markets / feeds - vector get_limit_orders(asset_id_type a, asset_id_type b, uint32_t limit)const; - vector get_call_orders(asset_id_type a, uint32_t limit)const; - vector get_settle_orders(asset_id_type a, uint32_t limit)const; - vector get_margin_positions( const account_id_type& id )const; - void subscribe_to_market(std::function callback, asset_id_type a, asset_id_type b); - void unsubscribe_from_market(asset_id_type a, asset_id_type b); - market_ticker get_ticker( const string& base, const string& quote )const; + vector get_limit_orders(const std::string& a, const std::string& b, uint32_t limit)const; + vector get_account_limit_orders( const string& account_name_or_id, + const string &base, + const string "e, uint32_t limit, + optional ostart_id, + optional ostart_price ); + vector get_call_orders(const std::string& a, uint32_t limit)const; + vector get_settle_orders(const std::string& a, uint32_t limit)const; + vector get_margin_positions( const std::string account_id_or_name )const; + vector get_collateral_bids(const std::string& asset, uint32_t limit, uint32_t start)const; + + void subscribe_to_market(std::function callback, const std::string& a, const std::string& b); + void unsubscribe_from_market(const std::string& a, const std::string& b); + + market_ticker get_ticker( const string& base, const string& quote, bool skip_order_book = false )const; market_volume get_24_volume( const string& base, const string& quote )const; order_book get_order_book( const string& base, const string& quote, unsigned limit = 50 )const; + vector get_top_markets( uint32_t limit )const; vector get_trade_history( const string& base, const string& quote, fc::time_point_sec start, fc::time_point_sec stop, unsigned limit = 100 )const; + vector get_trade_history_by_sequence( const string& base, const string& quote, int64_t start, fc::time_point_sec stop, unsigned limit = 100 )const; // Witnesses vector> get_witnesses(const vector& witness_ids)const; - fc::optional get_witness_by_account(account_id_type account)const; + fc::optional get_witness_by_account(const std::string account_id_or_name)const; map lookup_witness_accounts(const string& lower_bound_name, uint32_t limit)const; uint64_t get_witness_count()const; // Committee members vector> get_committee_members(const vector& committee_member_ids)const; - fc::optional get_committee_member_by_account(account_id_type account)const; + fc::optional get_committee_member_by_account(const std::string account_id_or_name)const; map lookup_committee_member_accounts(const string& lower_bound_name, uint32_t limit)const; + uint64_t get_committee_count()const; + + // Workers + vector get_all_workers()const; + vector> get_workers_by_account(const std::string account_id_or_name)const; + uint64_t get_worker_count()const; // Votes vector lookup_vote_ids( const vector& votes )const; // Authority / validation std::string get_transaction_hex(const signed_transaction& trx)const; + std::string get_transaction_hex_without_sig(const signed_transaction& trx)const; + set get_required_signatures( const signed_transaction& trx, const flat_set& available_keys )const; set get_potential_signatures( const signed_transaction& trx )const; set
get_potential_address_signatures( const signed_transaction& trx )const; bool verify_authority( const signed_transaction& trx )const; - bool verify_account_authority( const string& name_or_id, const flat_set& signers )const; + bool verify_account_authority( const string& account_name_or_id, const flat_set& signers )const; processed_transaction validate_transaction( const signed_transaction& trx )const; - vector< fc::variant > get_required_fees( const vector& ops, asset_id_type id )const; + vector< fc::variant > get_required_fees( const vector& ops, const std::string& asset_id_or_symbol )const; // Proposed transactions - vector get_proposed_transactions( account_id_type id )const; + vector get_proposed_transactions( const std::string account_id_or_name )const; // Blinded balances vector get_blinded_balances( const flat_set& commitments )const; + // Withdrawals + vector get_withdraw_permissions_by_giver(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const; + vector get_withdraw_permissions_by_recipient(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const; + //private: + static string price_to_string( const price& _price, const asset_object& _base, const asset_object& _quote ); + template void subscribe_to_item( const T& i )const { @@ -148,10 +178,7 @@ class database_api_impl : public std::enable_shared_from_this return; if( !is_subscribed_to_item(i) ) - { - idump((i)); - _subscribe_filter.insert( vec.data(), vec.size() );//(vecconst char*)&i, sizeof(i) ); - } + _subscribe_filter.insert( vec.data(), vec.size() ); } template @@ -159,28 +186,155 @@ class database_api_impl : public std::enable_shared_from_this { if( !_subscribe_callback ) return false; - return true; + return _subscribe_filter.contains( i ); } + bool is_impacted_account( const flat_set& accounts) + { + if( !_subscribed_accounts.size() || !accounts.size() ) + return false; + + return std::any_of(accounts.begin(), accounts.end(), [this](const account_id_type& account) { + return _subscribed_accounts.find(account) != _subscribed_accounts.end(); + }); + } + + const std::pair get_order_market( const force_settlement_object& order ) + { + // TODO cache the result to avoid repeatly fetching from db + asset_id_type backing_id = order.balance.asset_id( _db ).bitasset_data( _db ).options.short_backing_asset; + auto tmp = std::make_pair( order.balance.asset_id, backing_id ); + if( tmp.first > tmp.second ) std::swap( tmp.first, tmp.second ); + return tmp; + } + + const account_object* get_account_from_string( const std::string& name_or_id ) const + { + // TODO cache the result to avoid repeatly fetching from db + FC_ASSERT( name_or_id.size() > 0); + const account_object* account = nullptr; + if (std::isdigit(name_or_id[0])) + account = _db.find(fc::variant(name_or_id, 1).as(1)); + else + { + const auto& idx = _db.get_index_type().indices().get(); + auto itr = idx.find(name_or_id); + if (itr != idx.end()) + account = &*itr; + } + FC_ASSERT( account, "no such account" ); + return account; + } + + const asset_object* get_asset_from_string( const std::string& symbol_or_id ) const + { + // TODO cache the result to avoid repeatly fetching from db + FC_ASSERT( symbol_or_id.size() > 0); + const asset_object* asset = nullptr; + if (std::isdigit(symbol_or_id[0])) + asset = _db.find(fc::variant(symbol_or_id, 1).as(1)); + else + { + const auto& idx = _db.get_index_type().indices().get(); + auto itr = idx.find(symbol_or_id); + if (itr != idx.end()) + asset = &*itr; + } + FC_ASSERT( asset, "no such asset" ); + return asset; + } + vector> get_assets(const vector& asset_ids)const + { + vector> result; result.reserve(asset_ids.size()); + std::transform(asset_ids.begin(), asset_ids.end(), std::back_inserter(result), + [this](asset_id_type id) -> optional { + if(auto o = _db.find(id)) + { + subscribe_to_item( id ); + return *o; + } + return {}; + }); + return result; + } + vector get_limit_orders(const asset_id_type a, const asset_id_type b, const uint32_t limit)const + { + FC_ASSERT( limit <= 300 ); + + const auto& limit_order_idx = _db.get_index_type(); + const auto& limit_price_idx = limit_order_idx.indices().get(); + + vector result; + result.reserve(limit*2); + + uint32_t count = 0; + auto limit_itr = limit_price_idx.lower_bound(price::max(a,b)); + auto limit_end = limit_price_idx.upper_bound(price::min(a,b)); + while(limit_itr != limit_end && count < limit) + { + result.push_back(*limit_itr); + ++limit_itr; + ++count; + } + count = 0; + limit_itr = limit_price_idx.lower_bound(price::max(b,a)); + limit_end = limit_price_idx.upper_bound(price::min(b,a)); + while(limit_itr != limit_end && count < limit) + { + result.push_back(*limit_itr); + ++limit_itr; + ++count; + } + + return result; + } + + template + const std::pair get_order_market( const T& order ) + { + return order.get_market(); + } + + template + void enqueue_if_subscribed_to_market(const object* obj, market_queue_type& queue, bool full_object=true) + { + const T* order = dynamic_cast(obj); + FC_ASSERT( order != nullptr); + + const auto& market = get_order_market( *order ); + + auto sub = _market_subscriptions.find( market ); + if( sub != _market_subscriptions.end() ) { + queue[market].emplace_back( full_object ? obj->to_variant() : fc::variant(obj->id, 1) ); + } + } + void broadcast_updates( const vector& updates ); + void broadcast_market_updates( const market_queue_type& queue); + void handle_object_changed(bool force_notify, bool full_object, const vector& ids, const flat_set& impacted_accounts, std::function find_object); /** called every time a block is applied to report the objects that were changed */ - void on_objects_changed(const vector& ids); - void on_objects_removed(const vector& objs); + void on_objects_new(const vector& ids, const flat_set& impacted_accounts); + void on_objects_changed(const vector& ids, const flat_set& impacted_accounts); + void on_objects_removed(const vector& ids, const vector& objs, const flat_set& impacted_accounts); void on_applied_block(); - mutable fc::bloom_filter _subscribe_filter; + bool _notify_remove_create = false; + mutable fc::bloom_filter _subscribe_filter; + std::set _subscribed_accounts; std::function _subscribe_callback; std::function _pending_trx_callback; std::function _block_applied_callback; + boost::signals2::scoped_connection _new_connection; boost::signals2::scoped_connection _change_connection; boost::signals2::scoped_connection _removed_connection; boost::signals2::scoped_connection _applied_block_connection; boost::signals2::scoped_connection _pending_trx_connection; map< pair, std::function > _market_subscriptions; graphene::chain::database& _db; + const application_options* _app_options = nullptr; }; ////////////////////////////////////////////////////////////////////// @@ -189,24 +343,28 @@ class database_api_impl : public std::enable_shared_from_this // // ////////////////////////////////////////////////////////////////////// -database_api::database_api( graphene::chain::database& db ) - : my( new database_api_impl( db ) ) {} +database_api::database_api( graphene::chain::database& db, const application_options* app_options ) + : my( new database_api_impl( db, app_options ) ) {} database_api::~database_api() {} -database_api_impl::database_api_impl( graphene::chain::database& db ):_db(db) +database_api_impl::database_api_impl( graphene::chain::database& db, const application_options* app_options ) +:_db(db), _app_options(app_options) { wlog("creating database api ${x}", ("x",int64_t(this)) ); - _change_connection = _db.changed_objects.connect([this](const vector& ids) { - on_objects_changed(ids); + _new_connection = _db.new_objects.connect([this](const vector& ids, const flat_set& impacted_accounts) { + on_objects_new(ids, impacted_accounts); }); - _removed_connection = _db.removed_objects.connect([this](const vector& objs) { - on_objects_removed(objs); + _change_connection = _db.changed_objects.connect([this](const vector& ids, const flat_set& impacted_accounts) { + on_objects_changed(ids, impacted_accounts); + }); + _removed_connection = _db.removed_objects.connect([this](const vector& ids, const vector& objs, const flat_set& impacted_accounts) { + on_objects_removed(ids, objs, impacted_accounts); }); _applied_block_connection = _db.applied_block.connect([this](const signed_block&){ on_applied_block(); }); _pending_trx_connection = _db.on_pending_transaction.connect([this](const signed_transaction& trx ){ - if( _pending_trx_callback ) _pending_trx_callback( fc::variant(trx) ); + if( _pending_trx_callback ) _pending_trx_callback( fc::variant(trx, GRAPHENE_MAX_NESTED_OBJECTS) ); }); } @@ -215,6 +373,71 @@ database_api_impl::~database_api_impl() elog("freeing database api ${x}", ("x",int64_t(this)) ); } +////////////////////////////////////////////////////////////////////// +// // +// Market ticker constructor // +// // +////////////////////////////////////////////////////////////////////// +market_ticker::market_ticker(const market_ticker_object& mto, + const fc::time_point_sec& now, + const asset_object& asset_base, + const asset_object& asset_quote, + const order_book& orders) +{ + time = now; + base = asset_base.symbol; + quote = asset_quote.symbol; + percent_change = "0"; + lowest_ask = "0"; + highest_bid = "0"; + + fc::uint128 bv; + fc::uint128 qv; + price latest_price = asset( mto.latest_base, mto.base ) / asset( mto.latest_quote, mto.quote ); + if( mto.base != asset_base.id ) + latest_price = ~latest_price; + latest = database_api_impl::price_to_string( latest_price, asset_base, asset_quote ); + if( mto.last_day_base != 0 && mto.last_day_quote != 0 // has trade data before 24 hours + && ( mto.last_day_base != mto.latest_base || mto.last_day_quote != mto.latest_quote ) ) // price changed + { + price last_day_price = asset( mto.last_day_base, mto.base ) / asset( mto.last_day_quote, mto.quote ); + if( mto.base != asset_base.id ) + last_day_price = ~last_day_price; + percent_change = price_diff_percent_string( last_day_price, latest_price ); + } + if( asset_base.id == mto.base ) + { + bv = mto.base_volume; + qv = mto.quote_volume; + } + else + { + bv = mto.quote_volume; + qv = mto.base_volume; + } + base_volume = uint128_amount_to_string( bv, asset_base.precision ); + quote_volume = uint128_amount_to_string( qv, asset_quote.precision ); + + if(!orders.asks.empty()) + lowest_ask = orders.asks[0].price; + if(!orders.bids.empty()) + highest_bid = orders.bids[0].price; +} +market_ticker::market_ticker(const fc::time_point_sec& now, + const asset_object& asset_base, + const asset_object& asset_quote) +{ + time = now; + base = asset_base.symbol; + quote = asset_quote.symbol; + latest = "0"; + lowest_ask = "0"; + highest_bid = "0"; + percent_change = "0"; + base_volume = "0"; + quote_volume = "0"; +} + ////////////////////////////////////////////////////////////////////// // // // Objects // @@ -237,10 +460,6 @@ fc::variants database_api_impl::get_objects(const vector& ids)co this->subscribe_to_item( id ); } } - else - { - elog( "getObjects without subscribe callback??" ); - } fc::variants result; result.reserve(ids.size()); @@ -261,24 +480,23 @@ fc::variants database_api_impl::get_objects(const vector& ids)co // // ////////////////////////////////////////////////////////////////////// -void database_api::set_subscribe_callback( std::function cb, bool clear_filter ) +void database_api::set_subscribe_callback( std::function cb, bool notify_remove_create ) { - my->set_subscribe_callback( cb, clear_filter ); + my->set_subscribe_callback( cb, notify_remove_create ); } -void database_api_impl::set_subscribe_callback( std::function cb, bool clear_filter ) +void database_api_impl::set_subscribe_callback( std::function cb, bool notify_remove_create ) { - edump((clear_filter)); - _subscribe_callback = cb; - if( clear_filter || !cb ) + if( notify_remove_create ) { - static fc::bloom_parameters param; - param.projected_element_count = 10000; - param.false_positive_probability = 1.0/10000; - param.maximum_size = 1024*8*8*2; - param.compute_optimal_parameters(); - _subscribe_filter = fc::bloom_filter(param); + FC_ASSERT( _app_options && _app_options->enable_subscribe_to_all, + "Subscribing to universal object creation and removal is disallowed in this server." ); } + + cancel_all_subscriptions(false, false); + + _subscribe_callback = cb; + _notify_remove_create = notify_remove_create; } void database_api::set_pending_transaction_callback( std::function cb ) @@ -303,13 +521,21 @@ void database_api_impl::set_block_applied_callback( std::functioncancel_all_subscriptions(); + my->cancel_all_subscriptions(true, true); } -void database_api_impl::cancel_all_subscriptions() +void database_api_impl::cancel_all_subscriptions( bool reset_callback, bool reset_market_subscriptions ) { - set_subscribe_callback( std::function(), true); - _market_subscriptions.clear(); + if ( reset_callback ) + _subscribe_callback = std::function(); + + if ( reset_market_subscriptions ) + _market_subscriptions.clear(); + + _notify_remove_create = false; + _subscribed_accounts.clear(); + static fc::bloom_parameters param(10000, 1.0/100, 1024*8*8*2); + _subscribe_filter = fc::bloom_filter(param); } ////////////////////////////////////////////////////////////////////// @@ -330,6 +556,20 @@ optional database_api_impl::get_block_header(uint32_t block_num) c return *result; return {}; } +map> database_api::get_block_header_batch(const vector block_nums)const +{ + return my->get_block_header_batch( block_nums ); +} + +map> database_api_impl::get_block_header_batch(const vector block_nums) const +{ + map> results; + for (const uint32_t block_num : block_nums) + { + results[block_num] = get_block_header(block_num); + } + return results; +} optional database_api::get_block(uint32_t block_num)const { @@ -435,7 +675,12 @@ vector> database_api::get_key_references( vector> database_api_impl::get_key_references( vector keys )const { - wdump( (keys) ); + uint64_t api_limit_get_key_references=_app_options->api_limit_get_key_references; + FC_ASSERT(keys.size() <= api_limit_get_key_references); + const auto& idx = _db.get_index_type(); + const auto& aidx = dynamic_cast(idx); + const auto& refs = aidx.get_secondary_index(); + vector< vector > final_result; final_result.reserve(keys.size()); @@ -455,10 +700,6 @@ vector> database_api_impl::get_key_references( vector(); - const auto& aidx = dynamic_cast&>(idx); - const auto& refs = aidx.get_secondary_index(); - auto itr = refs.account_to_key_memberships.find(key); vector result; for( auto& a : {a1,a2,a3,a4,a5} ) @@ -466,45 +707,79 @@ vector> database_api_impl::get_key_references( vectorsecond.size() ); + result.reserve( result.size() + itr->second.size() ); for( auto item : itr->second ) { - wdump((a)(item)(item(_db).name)); result.push_back(item); } } } + auto itr = refs.account_to_key_memberships.find(key); if( itr != refs.account_to_key_memberships.end() ) { - result.reserve( itr->second.size() ); + result.reserve( result.size() + itr->second.size() ); for( auto item : itr->second ) result.push_back(item); } final_result.emplace_back( std::move(result) ); } - for( auto i : final_result ) - subscribe_to_item(i); - return final_result; } +bool database_api::is_public_key_registered(string public_key) const +{ + return my->is_public_key_registered(public_key); +} + +bool database_api_impl::is_public_key_registered(string public_key) const +{ + // Short-circuit + if (public_key.empty()) { + return false; + } + + // Search among all keys using an existing map of *current* account keys + public_key_type key; + try { + key = public_key_type(public_key); + } catch ( ... ) { + // An invalid public key was detected + return false; + } + const auto& idx = _db.get_index_type(); + const auto& aidx = dynamic_cast(idx); + const auto& refs = aidx.get_secondary_index(); + auto itr = refs.account_to_key_memberships.find(key); + bool is_known = itr != refs.account_to_key_memberships.end(); + + return is_known; +} + ////////////////////////////////////////////////////////////////////// // // // Accounts // // // ////////////////////////////////////////////////////////////////////// -vector> database_api::get_accounts(const vector& account_ids)const +account_id_type database_api::get_account_id_from_string(const std::string& name_or_id)const +{ + return my->get_account_from_string( name_or_id )->id; +} + +vector> database_api::get_accounts(const vector& account_names_or_ids)const { - return my->get_accounts( account_ids ); + return my->get_accounts( account_names_or_ids ); } -vector> database_api_impl::get_accounts(const vector& account_ids)const +vector> database_api_impl::get_accounts(const vector& account_names_or_ids)const { - vector> result; result.reserve(account_ids.size()); - std::transform(account_ids.begin(), account_ids.end(), std::back_inserter(result), - [this](account_id_type id) -> optional { + vector> result; result.reserve(account_names_or_ids.size()); + std::transform(account_names_or_ids.begin(), account_names_or_ids.end(), std::back_inserter(result), + [this](std::string id_or_name) -> optional { + + const account_object* account = get_account_from_string(id_or_name); + account_id_type id = account->id; if(auto o = _db.find(id)) { subscribe_to_item( id ); @@ -515,6 +790,92 @@ vector> database_api_impl::get_accounts(const vector database_api::get_account_limit_orders( const string& account_name_or_id, const string &base, + const string "e, uint32_t limit, optional ostart_id, optional ostart_price) +{ + return my->get_account_limit_orders( account_name_or_id, base, quote, limit, ostart_id, ostart_price ); +} + +vector database_api_impl::get_account_limit_orders( const string& account_name_or_id, const string &base, + const string "e, uint32_t limit, optional ostart_id, optional ostart_price) +{ + FC_ASSERT( limit <= 101 ); + + vector results; + uint32_t count = 0; + + const account_object* account = get_account_from_string(account_name_or_id); + if (account == nullptr) + return results; + + auto assets = lookup_asset_symbols( {base, quote} ); + FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); + FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); + + auto base_id = assets[0]->id; + auto quote_id = assets[1]->id; + + if (ostart_price.valid()) { + FC_ASSERT(ostart_price->base.asset_id == base_id, "Base asset inconsistent with start price"); + FC_ASSERT(ostart_price->quote.asset_id == quote_id, "Quote asset inconsistent with start price"); + } + + const auto& index_by_account = _db.get_index_type().indices().get(); + limit_order_multi_index_type::index::type::const_iterator lower_itr; + limit_order_multi_index_type::index::type::const_iterator upper_itr; + + // if both order_id and price are invalid, query the first page + if ( !ostart_id.valid() && !ostart_price.valid() ) + { + lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, price::max(base_id, quote_id))); + } + else if ( ostart_id.valid() ) + { + // in case of the order been deleted during page querying + const limit_order_object *p_loo = _db.find(*ostart_id); + + if ( !p_loo ) + { + if ( ostart_price.valid() ) + { + lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, *ostart_price, *ostart_id)); + } + else + { + // start order id been deleted, yet not provided price either + FC_THROW("Order id invalid (maybe just been canceled?), and start price not provided"); + } + } + else + { + const limit_order_object &loo = *p_loo; + + // in case of the order not belongs to specified account or market + FC_ASSERT(loo.sell_price.base.asset_id == base_id, "Order base asset inconsistent"); + FC_ASSERT(loo.sell_price.quote.asset_id == quote_id, "Order quote asset inconsistent with order"); + FC_ASSERT(loo.seller == account->get_id(), "Order not owned by specified account"); + + lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, loo.sell_price, *ostart_id)); + } + } + else + { + // if reach here start_price must be valid + lower_itr = index_by_account.lower_bound(std::make_tuple(account->id, *ostart_price)); + } + + upper_itr = index_by_account.upper_bound(std::make_tuple(account->id, price::min(base_id, quote_id))); + + // Add the account's orders + for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) + { + const limit_order_object &order = *lower_itr; + results.emplace_back(order); + } + + return results; +} + std::map database_api::get_full_accounts( const vector& names_or_ids, bool subscribe ) { return my->get_full_accounts( names_or_ids, subscribe ); @@ -522,31 +883,26 @@ std::map database_api::get_full_accounts( const vector database_api_impl::get_full_accounts( const vector& names_or_ids, bool subscribe) { - idump((names_or_ids)); + const auto& proposal_idx = _db.get_index_type(); + const auto& pidx = dynamic_cast(proposal_idx); + const auto& proposals_by_account = pidx.get_secondary_index(); + std::map results; for (const std::string& account_name_or_id : names_or_ids) { - const account_object* account = nullptr; - if (std::isdigit(account_name_or_id[0])) - account = _db.find(fc::variant(account_name_or_id).as()); - else - { - const auto& idx = _db.get_index_type().indices().get(); - auto itr = idx.find(account_name_or_id); - if (itr != idx.end()) - account = &*itr; - } + const account_object* account = get_account_from_string(account_name_or_id); if (account == nullptr) continue; if( subscribe ) { - ilog( "subscribe to ${id}", ("id",account->name) ); - subscribe_to_item( account->id ); + if(_subscribed_accounts.size() < 100) { + _subscribed_accounts.insert( account->get_id() ); + subscribe_to_item( account->id ); + } } - // fc::mutable_variant_object full_account; full_account acnt; acnt.account = *account; acnt.statistics = account->statistics(_db); @@ -555,20 +911,11 @@ std::map database_api_impl::get_full_accounts( const acnt.lifetime_referrer_name = account->lifetime_referrer(_db).name; acnt.votes = lookup_vote_ids( vector(account->options.votes.begin(),account->options.votes.end()) ); - // Add the account itself, its statistics object, cashback balance, and referral account names - /* - full_account("account", *account)("statistics", account->statistics(_db)) - ("registrar_name", account->registrar(_db).name)("referrer_name", account->referrer(_db).name) - ("lifetime_referrer_name", account->lifetime_referrer(_db).name); - */ if (account->cashback_vb) { acnt.cashback_balance = account->cashback_balance(_db); } // Add the account's proposals - const auto& proposal_idx = _db.get_index_type(); - const auto& pidx = dynamic_cast&>(proposal_idx); - const auto& proposals_by_account = pidx.get_secondary_index(); auto required_approvals_itr = proposals_by_account._account_to_proposals.find( account->id ); if( required_approvals_itr != proposals_by_account._account_to_proposals.end() ) { @@ -579,12 +926,9 @@ std::map database_api_impl::get_full_accounts( const // Add the account's balances - auto balance_range = _db.get_index_type().indices().get().equal_range(boost::make_tuple(account->id)); - //vector balances; - std::for_each(balance_range.first, balance_range.second, - [&acnt](const account_balance_object& balance) { - acnt.balances.emplace_back(balance); - }); + const auto& balances = _db.get_index_type< primary_index< account_balance_index > >().get_secondary_index< balances_by_account_index >().get_account_balances( account->id ); + for( const auto balance : balances ) + acnt.balances.emplace_back( *balance.second ); // Add the account's vesting balances auto vesting_range = _db.get_index_type().indices().get().equal_range(account->id); @@ -604,6 +948,27 @@ std::map database_api_impl::get_full_accounts( const [&acnt] (const call_order_object& call) { acnt.call_orders.emplace_back(call); }); + auto settle_range = _db.get_index_type().indices().get().equal_range(account->id); + std::for_each(settle_range.first, settle_range.second, + [&acnt] (const force_settlement_object& settle) { + acnt.settle_orders.emplace_back(settle); + }); + + // get assets issued by user + auto asset_range = _db.get_index_type().indices().get().equal_range(account->id); + std::for_each(asset_range.first, asset_range.second, + [&acnt] (const asset_object& asset) { + acnt.assets.emplace_back(asset.id); + }); + + // get withdraws permissions + auto withdraw_range = _db.get_index_type().indices().get().equal_range(account->id); + std::for_each(withdraw_range.first, withdraw_range.second, + [&acnt] (const withdraw_permission_object& withdraw) { + acnt.withdraws.emplace_back(withdraw); + }); + + results[account_name_or_id] = acnt; } return results; @@ -623,16 +988,17 @@ optional database_api_impl::get_account_by_name( string name )co return optional(); } -vector database_api::get_account_references( account_id_type account_id )const +vector database_api::get_account_references( const std::string account_id_or_name )const { - return my->get_account_references( account_id ); + return my->get_account_references( account_id_or_name ); } -vector database_api_impl::get_account_references( account_id_type account_id )const +vector database_api_impl::get_account_references( const std::string account_id_or_name )const { const auto& idx = _db.get_index_type(); - const auto& aidx = dynamic_cast&>(idx); + const auto& aidx = dynamic_cast(idx); const auto& refs = aidx.get_secondary_index(); + const account_id_type account_id = get_account_from_string(account_id_or_name)->id; auto itr = refs.account_to_account_memberships.find(account_id); vector result; @@ -701,21 +1067,23 @@ uint64_t database_api_impl::get_account_count()const // // ////////////////////////////////////////////////////////////////////// -vector database_api::get_account_balances(account_id_type id, const flat_set& assets)const +vector database_api::get_account_balances(const std::string& account_name_or_id, const flat_set& assets)const { - return my->get_account_balances( id, assets ); + return my->get_account_balances( account_name_or_id, assets ); } -vector database_api_impl::get_account_balances(account_id_type acnt, const flat_set& assets)const +vector database_api_impl::get_account_balances(const std::string& account_name_or_id, const flat_set& assets)const { + const account_object* account = get_account_from_string(account_name_or_id); + account_id_type acnt = account->id; vector result; if (assets.empty()) { // if the caller passes in an empty list of assets, return balances for all assets the account owns - const account_balance_index& balance_index = _db.get_index_type(); - auto range = balance_index.indices().get().equal_range(boost::make_tuple(acnt)); - for (const account_balance_object& balance : boost::make_iterator_range(range.first, range.second)) - result.push_back(asset(balance.get_balance())); + const auto& balance_index = _db.get_index_type< primary_index< account_balance_index > >(); + const auto& balances = balance_index.get_secondary_index< balances_by_account_index >().get_account_balances( acnt ); + for( const auto balance : balances ) + result.push_back( balance.second->get_balance() ); } else { @@ -730,15 +1098,7 @@ vector database_api_impl::get_account_balances(account_id_type acnt, cons vector database_api::get_named_account_balances(const std::string& name, const flat_set& assets)const { - return my->get_named_account_balances( name, assets ); -} - -vector database_api_impl::get_named_account_balances(const std::string& name, const flat_set& assets) const -{ - const auto& accounts_by_name = _db.get_index_type().indices().get(); - auto itr = accounts_by_name.find(name); - FC_ASSERT( itr != accounts_by_name.end() ); - return get_account_balances(itr->get_id(), assets); + return my->get_account_balances( name, assets ); } vector database_api::get_balance_objects( const vector
& addrs )const @@ -788,15 +1148,16 @@ vector database_api_impl::get_vested_balances( const vector database_api::get_vesting_balances( account_id_type account_id )const +vector database_api::get_vesting_balances( const std::string account_id_or_name )const { - return my->get_vesting_balances( account_id ); + return my->get_vesting_balances( account_id_or_name ); } -vector database_api_impl::get_vesting_balances( account_id_type account_id )const +vector database_api_impl::get_vesting_balances( const std::string account_id_or_name )const { try { + const account_id_type account_id = get_account_from_string(account_id_or_name)->id; vector result; auto vesting_range = _db.get_index_type().indices().get().equal_range(account_id); std::for_each(vesting_range.first, vesting_range.second, @@ -805,7 +1166,7 @@ vector database_api_impl::get_vesting_balances( account_ }); return result; } - FC_CAPTURE_AND_RETHROW( (account_id) ); + FC_CAPTURE_AND_RETHROW( (account_id_or_name) ); } ////////////////////////////////////////////////////////////////////// @@ -814,16 +1175,23 @@ vector database_api_impl::get_vesting_balances( account_ // // ////////////////////////////////////////////////////////////////////// -vector> database_api::get_assets(const vector& asset_ids)const +asset_id_type database_api::get_asset_id_from_string(const std::string& symbol_or_id)const +{ + return my->get_asset_from_string( symbol_or_id )->id; +} + +vector> database_api::get_assets(const vector& asset_symbols_or_ids)const { - return my->get_assets( asset_ids ); + return my->get_assets( asset_symbols_or_ids ); } -vector> database_api_impl::get_assets(const vector& asset_ids)const +vector> database_api_impl::get_assets(const vector& asset_symbols_or_ids)const { - vector> result; result.reserve(asset_ids.size()); - std::transform(asset_ids.begin(), asset_ids.end(), std::back_inserter(result), - [this](asset_id_type id) -> optional { + vector> result; result.reserve(asset_symbols_or_ids.size()); + std::transform(asset_symbols_or_ids.begin(), asset_symbols_or_ids.end(), std::back_inserter(result), + [this](std::string id_or_name) -> optional { + const asset_object* asset = get_asset_from_string(id_or_name); + asset_id_type id = asset->id; if(auto o = _db.find(id)) { subscribe_to_item( id ); @@ -841,7 +1209,7 @@ vector database_api::list_assets(const string& lower_bound_symbol, vector database_api_impl::list_assets(const string& lower_bound_symbol, uint32_t limit)const { - FC_ASSERT( limit <= 100 ); + FC_ASSERT( limit <= 101 ); const auto& assets_by_symbol = _db.get_index_type().indices().get(); vector result; result.reserve(limit); @@ -857,6 +1225,16 @@ vector database_api_impl::list_assets(const string& lower_bound_sy return result; } +uint64_t database_api::get_asset_count()const +{ + return my->get_asset_count(); +} + +uint64_t database_api_impl::get_asset_count()const +{ + return _db.get_index_type().indices().size(); +} + vector> database_api::lookup_asset_symbols(const vector& symbols_or_ids)const { return my->lookup_asset_symbols( symbols_or_ids ); @@ -871,7 +1249,7 @@ vector> database_api_impl::lookup_asset_symbols(const vec [this, &assets_by_symbol](const string& symbol_or_id) -> optional { if( !symbol_or_id.empty() && std::isdigit(symbol_or_id[0]) ) { - auto ptr = _db.find(variant(symbol_or_id).as()); + auto ptr = _db.find(variant(symbol_or_id, 1).as(1)); return ptr == nullptr? optional() : *ptr; } auto itr = assets_by_symbol.find(symbol_or_id); @@ -886,7 +1264,7 @@ vector> database_api_impl::lookup_asset_symbols(const vec // // ////////////////////////////////////////////////////////////////////// -vector database_api::get_limit_orders(asset_id_type a, asset_id_type b, uint32_t limit)const +vector database_api::get_limit_orders(std::string a, std::string b, uint32_t limit)const { return my->get_limit_orders( a, b, limit ); } @@ -894,74 +1272,77 @@ vector database_api::get_limit_orders(asset_id_type a, asset /** * @return the limit orders for both sides of the book for the two assets specified up to limit number on each side. */ -vector database_api_impl::get_limit_orders(asset_id_type a, asset_id_type b, uint32_t limit)const +vector database_api_impl::get_limit_orders(const std::string& a, const std::string& b, uint32_t limit)const { - const auto& limit_order_idx = _db.get_index_type(); - const auto& limit_price_idx = limit_order_idx.indices().get(); - - vector result; + FC_ASSERT( limit <= 300 ); - uint32_t count = 0; - auto limit_itr = limit_price_idx.lower_bound(price::max(a,b)); - auto limit_end = limit_price_idx.upper_bound(price::min(a,b)); - while(limit_itr != limit_end && count < limit) - { - result.push_back(*limit_itr); - ++limit_itr; - ++count; - } - count = 0; - limit_itr = limit_price_idx.lower_bound(price::max(b,a)); - limit_end = limit_price_idx.upper_bound(price::min(b,a)); - while(limit_itr != limit_end && count < limit) - { - result.push_back(*limit_itr); - ++limit_itr; - ++count; - } + const asset_id_type asset_a_id = get_asset_from_string(a)->id; + const asset_id_type asset_b_id = get_asset_from_string(b)->id; - return result; + return get_limit_orders(asset_a_id, asset_b_id, limit); } -vector database_api::get_call_orders(asset_id_type a, uint32_t limit)const +vector database_api::get_call_orders(const std::string& a, uint32_t limit)const { return my->get_call_orders( a, limit ); } -vector database_api_impl::get_call_orders(asset_id_type a, uint32_t limit)const +vector database_api_impl::get_call_orders(const std::string& a, uint32_t limit)const { + FC_ASSERT( limit <= 300 ); + + const asset_id_type asset_a_id = get_asset_from_string(a)->id; const auto& call_index = _db.get_index_type().indices().get(); - const asset_object& mia = _db.get(a); + const asset_object& mia = _db.get(asset_a_id); price index_price = price::min(mia.bitasset_data(_db).options.short_backing_asset, mia.get_id()); - - return vector(call_index.lower_bound(index_price.min()), - call_index.lower_bound(index_price.max())); + + vector< call_order_object> result; + auto itr_min = call_index.lower_bound(index_price.min()); + auto itr_max = call_index.lower_bound(index_price.max()); + while( itr_min != itr_max && result.size() < limit ) + { + result.emplace_back(*itr_min); + ++itr_min; + } + return result; } -vector database_api::get_settle_orders(asset_id_type a, uint32_t limit)const +vector database_api::get_settle_orders(const std::string& a, uint32_t limit)const { return my->get_settle_orders( a, limit ); } -vector database_api_impl::get_settle_orders(asset_id_type a, uint32_t limit)const +vector database_api_impl::get_settle_orders(const std::string& a, uint32_t limit)const { + FC_ASSERT( limit <= 300 ); + + const asset_id_type asset_a_id = get_asset_from_string(a)->id; const auto& settle_index = _db.get_index_type().indices().get(); - const asset_object& mia = _db.get(a); - return vector(settle_index.lower_bound(mia.get_id()), - settle_index.upper_bound(mia.get_id())); + const asset_object& mia = _db.get(asset_a_id); + + vector result; + auto itr_min = settle_index.lower_bound(mia.get_id()); + auto itr_max = settle_index.upper_bound(mia.get_id()); + while( itr_min != itr_max && result.size() < limit ) + { + result.emplace_back(*itr_min); + ++itr_min; + } + return result; } -vector database_api::get_margin_positions( const account_id_type& id )const +vector database_api::get_margin_positions( const std::string account_id_or_name )const { - return my->get_margin_positions( id ); + return my->get_margin_positions( account_id_or_name ); } -vector database_api_impl::get_margin_positions( const account_id_type& id )const +vector database_api_impl::get_margin_positions( const std::string account_id_or_name )const { try { const auto& idx = _db.get_index_type(); const auto& aidx = idx.indices().get(); + const account_id_type id = get_account_from_string(account_id_or_name)->id; auto start = aidx.lower_bound( boost::make_tuple( id, asset_id_type(0) ) ); auto end = aidx.lower_bound( boost::make_tuple( id+1, asset_id_type(0) ) ); vector result; @@ -971,147 +1352,128 @@ vector database_api_impl::get_margin_positions( const account ++start; } return result; - } FC_CAPTURE_AND_RETHROW( (id) ) + } FC_CAPTURE_AND_RETHROW( (account_id_or_name) ) +} + +vector database_api::get_collateral_bids(const std::string& asset, uint32_t limit, uint32_t start)const +{ + return my->get_collateral_bids( asset, limit, start ); } -void database_api::subscribe_to_market(std::function callback, asset_id_type a, asset_id_type b) +vector database_api_impl::get_collateral_bids(const std::string& asset, uint32_t limit, uint32_t skip)const +{ try { + FC_ASSERT( limit <= 100 ); + const asset_id_type asset_id = get_asset_from_string(asset)->id; + const asset_object& swan = asset_id(_db); + FC_ASSERT( swan.is_market_issued() ); + const asset_bitasset_data_object& bad = swan.bitasset_data(_db); + const asset_object& back = bad.options.short_backing_asset(_db); + const auto& idx = _db.get_index_type(); + const auto& aidx = idx.indices().get(); + auto start = aidx.lower_bound( boost::make_tuple( asset_id, price::max(back.id, asset_id), collateral_bid_id_type() ) ); + auto end = aidx.lower_bound( boost::make_tuple( asset_id, price::min(back.id, asset_id), collateral_bid_id_type(GRAPHENE_DB_MAX_INSTANCE_ID) ) ); + vector result; + while( skip-- > 0 && start != end ) { ++start; } + while( start != end && limit-- > 0) + { + result.push_back(*start); + ++start; + } + return result; +} FC_CAPTURE_AND_RETHROW( (asset)(limit)(skip) ) } + +void database_api::subscribe_to_market(std::function callback, const std::string& a, const std::string& b) { my->subscribe_to_market( callback, a, b ); } -void database_api_impl::subscribe_to_market(std::function callback, asset_id_type a, asset_id_type b) +void database_api_impl::subscribe_to_market(std::function callback, const std::string& a, const std::string& b) { - if(a > b) std::swap(a,b); - FC_ASSERT(a != b); - _market_subscriptions[ std::make_pair(a,b) ] = callback; + auto asset_a_id = get_asset_from_string(a)->id; + auto asset_b_id = get_asset_from_string(b)->id; + + if(asset_a_id > asset_b_id) std::swap(asset_a_id,asset_b_id); + FC_ASSERT(asset_a_id != asset_b_id); + _market_subscriptions[ std::make_pair(asset_a_id,asset_b_id) ] = callback; } -void database_api::unsubscribe_from_market(asset_id_type a, asset_id_type b) +void database_api::unsubscribe_from_market(const std::string& a, const std::string& b) { my->unsubscribe_from_market( a, b ); } -void database_api_impl::unsubscribe_from_market(asset_id_type a, asset_id_type b) +void database_api_impl::unsubscribe_from_market(const std::string& a, const std::string& b) { - if(a > b) std::swap(a,b); - FC_ASSERT(a != b); - _market_subscriptions.erase(std::make_pair(a,b)); + auto asset_a_id = get_asset_from_string(a)->id; + auto asset_b_id = get_asset_from_string(b)->id; + + if(a > b) std::swap(asset_a_id,asset_b_id); + FC_ASSERT(asset_a_id != asset_b_id); + _market_subscriptions.erase(std::make_pair(asset_a_id,asset_b_id)); } +string database_api_impl::price_to_string( const price& _price, const asset_object& _base, const asset_object& _quote ) +{ try { + if( _price.base.asset_id == _base.id && _price.quote.asset_id == _quote.id ) + return graphene::app::price_to_string( _price, _base.precision, _quote.precision ); + else if( _price.base.asset_id == _quote.id && _price.quote.asset_id == _base.id ) + return graphene::app::price_to_string( ~_price, _base.precision, _quote.precision ); + else + FC_ASSERT( !"bad parameters" ); +} FC_CAPTURE_AND_RETHROW( (_price)(_base)(_quote) ) } + market_ticker database_api::get_ticker( const string& base, const string& quote )const { - return my->get_ticker( base, quote ); + return my->get_ticker( base, quote ); } -market_ticker database_api_impl::get_ticker( const string& base, const string& quote )const +market_ticker database_api_impl::get_ticker( const string& base, const string& quote, bool skip_order_book )const { - auto assets = lookup_asset_symbols( {base, quote} ); + FC_ASSERT( _app_options && _app_options->has_market_history_plugin, "Market history plugin is not enabled." ); + + const auto assets = lookup_asset_symbols( {base, quote} ); + FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); auto base_id = assets[0]->id; auto quote_id = assets[1]->id; - - market_ticker result; - - result.base = base; - result.quote = quote; - result.base_volume = 0; - result.quote_volume = 0; - result.percent_change = 0; - result.lowest_ask = 0; - result.highest_bid = 0; - - auto price_to_real = [&]( const share_type a, int p ) { return double( a.value ) / pow( 10, p ); }; - - try { - if( base_id > quote_id ) std::swap(base_id, quote_id); - - uint32_t day = 86400; - auto now = fc::time_point_sec( fc::time_point::now() ); - auto orders = get_order_book( base, quote, 1 ); - auto trades = get_trade_history( base, quote, now, fc::time_point_sec( now.sec_since_epoch() - day ), 100 ); - - result.latest = trades[0].price; - - for ( market_trade t: trades ) - { - result.base_volume += t.value; - result.quote_volume += t.amount; - } - - while (trades.size() == 100) - { - trades = get_trade_history( base, quote, trades[99].date, fc::time_point_sec( now.sec_since_epoch() - day ), 100 ); - - for ( market_trade t: trades ) - { - result.base_volume += t.value; - result.quote_volume += t.amount; - } - } - - trades = get_trade_history( base, quote, trades.back().date, fc::time_point_sec(), 1 ); - result.percent_change = trades.size() > 0 ? ( ( result.latest / trades.back().price ) - 1 ) * 100 : 0; - - //if (assets[0]->id == base_id) + if( base_id > quote_id ) std::swap( base_id, quote_id ); + const auto& ticker_idx = _db.get_index_type().indices().get(); + auto itr = ticker_idx.find( std::make_tuple( base_id, quote_id ) ); + const fc::time_point_sec now = _db.head_block_time(); + if( itr != ticker_idx.end() ) + { + order_book orders; + if (!skip_order_book) { - result.lowest_ask = orders.asks[0].price; - result.highest_bid = orders.bids[0].price; + orders = get_order_book(assets[0]->symbol, assets[1]->symbol, 1); } - - return result; - } FC_CAPTURE_AND_RETHROW( (base)(quote) ) + return market_ticker(*itr, now, *assets[0], *assets[1], orders); + } + // if no ticker is found for this market we return an empty ticker + market_ticker empty_result(now, *assets[0], *assets[1]); + return empty_result; } market_volume database_api::get_24_volume( const string& base, const string& quote )const { - return my->get_24_volume( base, quote ); + return my->get_24_volume( base, quote ); } market_volume database_api_impl::get_24_volume( const string& base, const string& quote )const { - auto assets = lookup_asset_symbols( {base, quote} ); - FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); - FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); - - auto base_id = assets[0]->id; - auto quote_id = assets[1]->id; + const auto& ticker = get_ticker( base, quote, true ); market_volume result; - result.base = base; - result.quote = quote; - result.base_volume = 0; - result.quote_volume = 0; - - try { - if( base_id > quote_id ) std::swap(base_id, quote_id); + result.time = ticker.time; + result.base = ticker.base; + result.quote = ticker.quote; + result.base_volume = ticker.base_volume; + result.quote_volume = ticker.quote_volume; - uint32_t bucket_size = 86400; - auto now = fc::time_point_sec( fc::time_point::now() ); - - auto trades = get_trade_history( base, quote, now, fc::time_point_sec( now.sec_since_epoch() - bucket_size ), 100 ); - - for ( market_trade t: trades ) - { - result.base_volume += t.value; - result.quote_volume += t.amount; - } - - while (trades.size() == 100) - { - trades = get_trade_history( base, quote, trades[99].date, fc::time_point_sec( now.sec_since_epoch() - bucket_size ), 100 ); - - for ( market_trade t: trades ) - { - result.base_volume += t.value; - result.quote_volume += t.amount; - } - } - - return result; - } FC_CAPTURE_AND_RETHROW( (base)(quote) ) -} + return result; +} order_book database_api::get_order_book( const string& base, const string& quote, unsigned limit )const { @@ -1135,32 +1497,22 @@ order_book database_api_impl::get_order_book( const string& base, const string& auto quote_id = assets[1]->id; auto orders = get_limit_orders( base_id, quote_id, limit ); - - auto asset_to_real = [&]( const asset& a, int p ) { return double(a.amount.value)/pow( 10, p ); }; - auto price_to_real = [&]( const price& p ) - { - if( p.base.asset_id == base_id ) - return asset_to_real( p.base, assets[0]->precision ) / asset_to_real( p.quote, assets[1]->precision ); - else - return asset_to_real( p.quote, assets[0]->precision ) / asset_to_real( p.base, assets[1]->precision ); - }; - for( const auto& o : orders ) { if( o.sell_price.base.asset_id == base_id ) { order ord; - ord.price = price_to_real( o.sell_price ); - ord.quote = asset_to_real( share_type( ( uint128_t( o.for_sale.value ) * o.sell_price.quote.amount.value ) / o.sell_price.base.amount.value ), assets[1]->precision ); - ord.base = asset_to_real( o.for_sale, assets[0]->precision ); + ord.price = price_to_string( o.sell_price, *assets[0], *assets[1] ); + ord.quote = assets[1]->amount_to_string( share_type( ( uint128_t( o.for_sale.value ) * o.sell_price.quote.amount.value ) / o.sell_price.base.amount.value ) ); + ord.base = assets[0]->amount_to_string( o.for_sale ); result.bids.push_back( ord ); } else { order ord; - ord.price = price_to_real( o.sell_price ); - ord.quote = asset_to_real( o.for_sale, assets[1]->precision ); - ord.base = asset_to_real( share_type( ( uint64_t( o.for_sale.value ) * o.sell_price.quote.amount.value ) / o.sell_price.base.amount.value ), assets[0]->precision ); + ord.price = price_to_string( o.sell_price, *assets[0], *assets[1] ); + ord.quote = assets[1]->amount_to_string( o.for_sale ); + ord.base = assets[0]->amount_to_string( share_type( ( uint128_t( o.for_sale.value ) * o.sell_price.quote.amount.value ) / o.sell_price.base.amount.value ) ); result.asks.push_back( ord ); } } @@ -1168,6 +1520,36 @@ order_book database_api_impl::get_order_book( const string& base, const string& return result; } +vector database_api::get_top_markets(uint32_t limit)const +{ + return my->get_top_markets(limit); +} + +vector database_api_impl::get_top_markets(uint32_t limit)const +{ + FC_ASSERT( _app_options && _app_options->has_market_history_plugin, "Market history plugin is not enabled." ); + + FC_ASSERT( limit <= 100 ); + + const auto& volume_idx = _db.get_index_type().indices().get(); + auto itr = volume_idx.rbegin(); + vector result; + result.reserve(limit); + const fc::time_point_sec now = _db.head_block_time(); + + while( itr != volume_idx.rend() && result.size() < limit) + { + const asset_object base = itr->base(_db); + const asset_object quote = itr->quote(_db); + order_book orders; + orders = get_order_book(base.symbol, quote.symbol, 1); + + result.emplace_back(market_ticker(*itr, now, base, quote, orders)); + ++itr; + } + return result; +} + vector database_api::get_trade_history( const string& base, const string& quote, fc::time_point_sec start, @@ -1183,6 +1565,8 @@ vector database_api_impl::get_trade_history( const string& base, fc::time_point_sec stop, unsigned limit )const { + FC_ASSERT( _app_options && _app_options->has_market_history_plugin, "Market history plugin is not enabled." ); + FC_ASSERT( limit <= 100 ); auto assets = lookup_asset_symbols( {base, quote} ); @@ -1192,17 +1576,105 @@ vector database_api_impl::get_trade_history( const string& base, auto base_id = assets[0]->id; auto quote_id = assets[1]->id; + if( base_id > quote_id ) std::swap( base_id, quote_id ); + + if ( start.sec_since_epoch() == 0 ) + start = fc::time_point_sec( fc::time_point::now() ); + + uint32_t count = 0; + const auto& history_idx = _db.get_index_type().indices().get(); + auto itr = history_idx.lower_bound( std::make_tuple( base_id, quote_id, start ) ); + vector result; + + while( itr != history_idx.end() && count < limit && !( itr->key.base != base_id || itr->key.quote != quote_id || itr->time < stop ) ) + { + { + market_trade trade; + + if( assets[0]->id == itr->op.receives.asset_id ) + { + trade.amount = assets[1]->amount_to_string( itr->op.pays ); + trade.value = assets[0]->amount_to_string( itr->op.receives ); + } + else + { + trade.amount = assets[1]->amount_to_string( itr->op.receives ); + trade.value = assets[0]->amount_to_string( itr->op.pays ); + } + + trade.date = itr->time; + trade.price = price_to_string( itr->op.fill_price, *assets[0], *assets[1] ); + + if( itr->op.is_maker ) + { + trade.sequence = -itr->key.sequence; + trade.side1_account_id = itr->op.account_id; + } + else + trade.side2_account_id = itr->op.account_id; + + auto next_itr = std::next(itr); + // Trades are usually tracked in each direction, exception: for global settlement only one side is recorded + if( next_itr != history_idx.end() && next_itr->key.base == base_id && next_itr->key.quote == quote_id + && next_itr->time == itr->time && next_itr->op.is_maker != itr->op.is_maker ) + { // next_itr now could be the other direction // FIXME not 100% sure + if( next_itr->op.is_maker ) + { + trade.sequence = -next_itr->key.sequence; + trade.side1_account_id = next_itr->op.account_id; + } + else + trade.side2_account_id = next_itr->op.account_id; + // skip the other direction + itr = next_itr; + } + + result.push_back( trade ); + ++count; + } + + ++itr; + } + + return result; +} + +vector database_api::get_trade_history_by_sequence( + const string& base, + const string& quote, + int64_t start, + fc::time_point_sec stop, + unsigned limit )const +{ + return my->get_trade_history_by_sequence( base, quote, start, stop, limit ); +} + +vector database_api_impl::get_trade_history_by_sequence( + const string& base, + const string& quote, + int64_t start, + fc::time_point_sec stop, + unsigned limit )const +{ + FC_ASSERT( _app_options && _app_options->has_market_history_plugin, "Market history plugin is not enabled." ); + + FC_ASSERT( limit <= 100 ); + FC_ASSERT( start >= 0 ); + int64_t start_seq = -start; + + auto assets = lookup_asset_symbols( {base, quote} ); + FC_ASSERT( assets[0], "Invalid base asset symbol: ${s}", ("s",base) ); + FC_ASSERT( assets[1], "Invalid quote asset symbol: ${s}", ("s",quote) ); + + auto base_id = assets[0]->id; + auto quote_id = assets[1]->id; + if( base_id > quote_id ) std::swap( base_id, quote_id ); const auto& history_idx = _db.get_index_type().indices().get(); history_key hkey; hkey.base = base_id; hkey.quote = quote_id; - hkey.sequence = std::numeric_limits::min(); - - auto price_to_real = [&]( const share_type a, int p ) { return double( a.value ) / pow( 10, p ); }; - - if ( start.sec_since_epoch() == 0 ) - start = fc::time_point_sec( fc::time_point::now() ); + hkey.sequence = start_seq; uint32_t count = 0; auto itr = history_idx.lower_bound( hkey ); @@ -1210,30 +1682,62 @@ vector database_api_impl::get_trade_history( const string& base, while( itr != history_idx.end() && count < limit && !( itr->key.base != base_id || itr->key.quote != quote_id || itr->time < stop ) ) { - if( itr->time < start ) + if( itr->key.sequence == start_seq ) // found the key, should skip this and the other direction if found + { + auto next_itr = std::next(itr); + if( next_itr != history_idx.end() && next_itr->key.base == base_id && next_itr->key.quote == quote_id + && next_itr->time == itr->time && next_itr->op.is_maker != itr->op.is_maker ) + { // next_itr now could be the other direction // FIXME not 100% sure + // skip the other direction + itr = next_itr; + } + } + else { market_trade trade; if( assets[0]->id == itr->op.receives.asset_id ) { - trade.amount = price_to_real( itr->op.pays.amount, assets[1]->precision ); - trade.value = price_to_real( itr->op.receives.amount, assets[0]->precision ); + trade.amount = assets[1]->amount_to_string( itr->op.pays ); + trade.value = assets[0]->amount_to_string( itr->op.receives ); } else { - trade.amount = price_to_real( itr->op.receives.amount, assets[1]->precision ); - trade.value = price_to_real( itr->op.pays.amount, assets[0]->precision ); + trade.amount = assets[1]->amount_to_string( itr->op.receives ); + trade.value = assets[0]->amount_to_string( itr->op.pays ); } trade.date = itr->time; - trade.price = trade.value / trade.amount; + trade.price = price_to_string( itr->op.fill_price, *assets[0], *assets[1] ); + + if( itr->op.is_maker ) + { + trade.sequence = -itr->key.sequence; + trade.side1_account_id = itr->op.account_id; + } + else + trade.side2_account_id = itr->op.account_id; + + auto next_itr = std::next(itr); + // Trades are usually tracked in each direction, exception: for global settlement only one side is recorded + if( next_itr != history_idx.end() && next_itr->key.base == base_id && next_itr->key.quote == quote_id + && next_itr->time == itr->time && next_itr->op.is_maker != itr->op.is_maker ) + { // next_itr now could be the other direction // FIXME not 100% sure + if( next_itr->op.is_maker ) + { + trade.sequence = -next_itr->key.sequence; + trade.side1_account_id = next_itr->op.account_id; + } + else + trade.side2_account_id = next_itr->op.account_id; + // skip the other direction + itr = next_itr; + } result.push_back( trade ); ++count; } - // Trades are tracked in each direction. - ++itr; ++itr; } @@ -1251,22 +1755,6 @@ vector> database_api::get_witnesses(const vectorget_witnesses( witness_ids ); } -vector database_api::get_workers_by_account(account_id_type account)const -{ - const auto& idx = my->_db.get_index_type().indices().get(); - auto itr = idx.find(account); - vector result; - - if( itr != idx.end() && itr->worker_account == account ) - { - result.emplace_back( *itr ); - ++itr; - } - - return result; -} - - vector> database_api_impl::get_witnesses(const vector& witness_ids)const { vector> result; result.reserve(witness_ids.size()); @@ -1279,14 +1767,15 @@ vector> database_api_impl::get_witnesses(const vector database_api::get_witness_by_account(account_id_type account)const +fc::optional database_api::get_witness_by_account(const std::string account_id_or_name)const { - return my->get_witness_by_account( account ); + return my->get_witness_by_account( account_id_or_name ); } -fc::optional database_api_impl::get_witness_by_account(account_id_type account) const +fc::optional database_api_impl::get_witness_by_account(const std::string account_id_or_name) const { const auto& idx = _db.get_index_type().indices().get(); + const account_id_type account = get_account_from_string(account_id_or_name)->id; auto itr = idx.find(account); if( itr != idx.end() ) return *itr; @@ -1354,14 +1843,15 @@ vector> database_api_impl::get_committee_membe return result; } -fc::optional database_api::get_committee_member_by_account(account_id_type account)const +fc::optional database_api::get_committee_member_by_account(const std::string account_id_or_name)const { - return my->get_committee_member_by_account( account ); + return my->get_committee_member_by_account( account_id_or_name ); } -fc::optional database_api_impl::get_committee_member_by_account(account_id_type account) const +fc::optional database_api_impl::get_committee_member_by_account(const std::string account_id_or_name) const { const auto& idx = _db.get_index_type().indices().get(); + const account_id_type account = get_account_from_string(account_id_or_name)->id; auto itr = idx.find(account); if( itr != idx.end() ) return *itr; @@ -1396,6 +1886,70 @@ map database_api_impl::lookup_committee_member return committee_members_by_account_name; } +uint64_t database_api::get_committee_count()const +{ + return my->get_committee_count(); +} + +uint64_t database_api_impl::get_committee_count()const +{ + return _db.get_index_type().indices().size(); +} + + +////////////////////////////////////////////////////////////////////// +// // +// Workers // +// // +////////////////////////////////////////////////////////////////////// + +vector database_api::get_all_workers()const +{ + return my->get_all_workers(); +} + +vector database_api_impl::get_all_workers()const +{ + vector result; + const auto& workers_idx = _db.get_index_type().indices().get(); + for( const auto& w : workers_idx ) + { + result.push_back( w ); + } + return result; +} + +vector> database_api::get_workers_by_account(const std::string account_id_or_name)const +{ + return my->get_workers_by_account( account_id_or_name ); +} + +vector> database_api_impl::get_workers_by_account(const std::string account_id_or_name)const +{ + vector> result; + const auto& workers_idx = _db.get_index_type().indices().get(); + + const account_id_type account = get_account_from_string(account_id_or_name)->id; + for( const auto& w : workers_idx ) + { + if( w.worker_account == account ) + result.push_back( w ); + } + return result; +} + +uint64_t database_api::get_worker_count()const +{ + return my->get_worker_count(); +} + +uint64_t database_api_impl::get_worker_count()const +{ + return _db.get_index_type().indices().size(); +} + + + ////////////////////////////////////////////////////////////////////// // // // Votes // @@ -1426,7 +1980,7 @@ vector database_api_impl::lookup_vote_ids( const vector& { auto itr = committee_idx.find( id ); if( itr != committee_idx.end() ) - result.emplace_back( variant( *itr ) ); + result.emplace_back( variant( *itr, 2 ) ); // Depth of committee_member_object is 1, add 1 here to be safe else result.emplace_back( variant() ); break; @@ -1435,7 +1989,7 @@ vector database_api_impl::lookup_vote_ids( const vector& { auto itr = witness_idx.find( id ); if( itr != witness_idx.end() ) - result.emplace_back( variant( *itr ) ); + result.emplace_back( variant( *itr, 2 ) ); // Depth of witness_object is 1, add 1 here to be safe else result.emplace_back( variant() ); break; @@ -1444,12 +1998,16 @@ vector database_api_impl::lookup_vote_ids( const vector& { auto itr = for_worker_idx.find( id ); if( itr != for_worker_idx.end() ) { - result.emplace_back( variant( *itr ) ); + result.emplace_back( variant( *itr, 4 ) ); // Depth of worker_object is 3, add 1 here to be safe. + // If we want to extract the balance object inside, + // need to increase this value } else { auto itr = against_worker_idx.find( id ); if( itr != against_worker_idx.end() ) { - result.emplace_back( variant( *itr ) ); + result.emplace_back( variant( *itr, 4 ) ); // Depth of worker_object is 3, add 1 here to be safe. + // If we want to extract the balance object inside, + // need to increase this value } else { result.emplace_back( variant() ); @@ -1458,6 +2016,8 @@ vector database_api_impl::lookup_vote_ids( const vector& break; } case vote_id_type::VOTE_TYPE_COUNT: break; // supress unused enum value warnings + default: + FC_CAPTURE_AND_THROW( fc::out_of_range_exception, (id) ); } } return result; @@ -1479,6 +2039,18 @@ std::string database_api_impl::get_transaction_hex(const signed_transaction& trx return fc::to_hex(fc::raw::pack(trx)); } +std::string database_api::get_transaction_hex_without_sig( + const signed_transaction &trx) const +{ + return my->get_transaction_hex_without_sig(trx); +} + +std::string database_api_impl::get_transaction_hex_without_sig( + const signed_transaction &trx) const +{ + return fc::to_hex(fc::raw::pack(static_cast(trx))); +} + set database_api::get_required_signatures( const signed_transaction& trx, const flat_set& available_keys )const { return my->get_required_signatures( trx, available_keys ); @@ -1486,13 +2058,11 @@ set database_api::get_required_signatures( const signed_transac set database_api_impl::get_required_signatures( const signed_transaction& trx, const flat_set& available_keys )const { - wdump((trx)(available_keys)); auto result = trx.get_required_signatures( _db.get_chain_id(), available_keys, [&]( account_id_type id ){ return &id(_db).active; }, [&]( account_id_type id ){ return &id(_db).owner; }, _db.get_global_properties().parameters.max_authority_depth ); - wdump((result)); return result; } @@ -1507,7 +2077,6 @@ set
database_api::get_potential_address_signatures( const signed_transa set database_api_impl::get_potential_signatures( const signed_transaction& trx )const { - wdump((trx)); set result; trx.get_required_signatures( _db.get_chain_id(), @@ -1529,7 +2098,15 @@ set database_api_impl::get_potential_signatures( const signed_t _db.get_global_properties().parameters.max_authority_depth ); - wdump((result)); + // Insert keys in required "other" authories + flat_set required_active; + flat_set required_owner; + vector other; + trx.get_required_authorities( required_active, required_owner, other ); + for( const auto& auth : other ) + for( const auto& key : auth.get_keys() ) + result.insert( key ); + return result; } @@ -1566,40 +2143,38 @@ bool database_api::verify_authority( const signed_transaction& trx )const bool database_api_impl::verify_authority( const signed_transaction& trx )const { trx.verify_authority( _db.get_chain_id(), - [&]( account_id_type id ){ return &id(_db).active; }, - [&]( account_id_type id ){ return &id(_db).owner; }, + [this]( account_id_type id ){ return &id(_db).active; }, + [this]( account_id_type id ){ return &id(_db).owner; }, _db.get_global_properties().parameters.max_authority_depth ); return true; } -bool database_api::verify_account_authority( const string& name_or_id, const flat_set& signers )const +bool database_api::verify_account_authority( const string& account_name_or_id, const flat_set& signers )const { - return my->verify_account_authority( name_or_id, signers ); + return my->verify_account_authority( account_name_or_id, signers ); } -bool database_api_impl::verify_account_authority( const string& name_or_id, const flat_set& keys )const +bool database_api_impl::verify_account_authority( const string& account_name_or_id, + const flat_set& keys )const { - FC_ASSERT( name_or_id.size() > 0); - const account_object* account = nullptr; - if (std::isdigit(name_or_id[0])) - account = _db.find(fc::variant(name_or_id).as()); - else + // create a dummy transfer + transfer_operation op; + op.from = get_account_from_string(account_name_or_id)->id; + std::vector ops; + ops.emplace_back(op); + + try + { + graphene::chain::verify_authority(ops, keys, + [this]( account_id_type id ){ return &id(_db).active; }, + [this]( account_id_type id ){ return &id(_db).owner; } ); + } + catch (fc::exception& ex) { - const auto& idx = _db.get_index_type().indices().get(); - auto itr = idx.find(name_or_id); - if (itr != idx.end()) - account = &*itr; + return false; } - FC_ASSERT( account, "no such account" ); - - /// reuse trx.verify_authority by creating a dummy transfer - signed_transaction trx; - transfer_operation op; - op.from = account->id; - trx.operations.emplace_back(op); - - return verify_authority( trx ); + return true; } processed_transaction database_api::validate_transaction( const signed_transaction& trx )const @@ -1612,9 +2187,9 @@ processed_transaction database_api_impl::validate_transaction( const signed_tran return _db.validate_transaction(trx); } -vector< fc::variant > database_api::get_required_fees( const vector& ops, asset_id_type id )const +vector< fc::variant > database_api::get_required_fees( const vector& ops, const std::string& asset_id_or_symbol )const { - return my->get_required_fees( ops, id ); + return my->get_required_fees( ops, asset_id_or_symbol ); } /** @@ -1643,7 +2218,7 @@ struct get_required_fees_helper { asset fee = current_fee_schedule.set_fee( op, core_exchange_rate ); fc::variant result; - fc::to_variant( fee, result ); + fc::to_variant( fee, result, GRAPHENE_NET_MAX_NESTED_OBJECTS ); return result; } } @@ -1663,7 +2238,7 @@ struct get_required_fees_helper // two mutually recursive functions instead of a visitor result.first = current_fee_schedule.set_fee( proposal_create_op, core_exchange_rate ); fc::variant vresult; - fc::to_variant( result, vresult ); + fc::to_variant( result, vresult, GRAPHENE_NET_MAX_NESTED_OBJECTS ); return vresult; } @@ -1673,7 +2248,7 @@ struct get_required_fees_helper uint32_t current_recursion = 0; }; -vector< fc::variant > database_api_impl::get_required_fees( const vector& ops, asset_id_type id )const +vector< fc::variant > database_api_impl::get_required_fees( const vector& ops, const std::string& asset_id_or_symbol )const { vector< operation > _ops = ops; // @@ -1683,7 +2258,7 @@ vector< fc::variant > database_api_impl::get_required_fees( const vector result; result.reserve(ops.size()); - const asset_object& a = id(_db); + const asset_object& a = *get_asset_from_string(asset_id_or_symbol); get_required_fees_helper helper( _db.current_fee_schedule(), a.options.core_exchange_rate, @@ -1701,16 +2276,17 @@ vector< fc::variant > database_api_impl::get_required_fees( const vector database_api::get_proposed_transactions( account_id_type id )const +vector database_api::get_proposed_transactions( const std::string account_id_or_name )const { - return my->get_proposed_transactions( id ); + return my->get_proposed_transactions( account_id_or_name ); } /** TODO: add secondary index that will accelerate this process */ -vector database_api_impl::get_proposed_transactions( account_id_type id )const +vector database_api_impl::get_proposed_transactions( const std::string account_id_or_name )const { const auto& idx = _db.get_index_type(); vector result; + const account_id_type id = get_account_from_string(account_id_or_name)->id; idx.inspect_all_objects( [&](const object& obj){ const proposal_object& p = static_cast(obj); @@ -1749,6 +2325,56 @@ vector database_api_impl::get_blinded_balances( const fl return result; } +////////////////////////////////////////////////////////////////////// +// // +// Withdrawals // +// // +////////////////////////////////////////////////////////////////////// + +vector database_api::get_withdraw_permissions_by_giver(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const +{ + return my->get_withdraw_permissions_by_giver( account_id_or_name, start, limit ); +} + +vector database_api_impl::get_withdraw_permissions_by_giver(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const +{ + FC_ASSERT( limit <= 101 ); + vector result; + + const auto& withdraw_idx = _db.get_index_type().indices().get(); + auto withdraw_index_end = withdraw_idx.end(); + const account_id_type account = get_account_from_string(account_id_or_name)->id; + auto withdraw_itr = withdraw_idx.lower_bound(boost::make_tuple(account, start)); + while(withdraw_itr != withdraw_index_end && withdraw_itr->withdraw_from_account == account && result.size() < limit) + { + result.push_back(*withdraw_itr); + ++withdraw_itr; + } + return result; +} + +vector database_api::get_withdraw_permissions_by_recipient(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const +{ + return my->get_withdraw_permissions_by_recipient( account_id_or_name, start, limit ); +} + +vector database_api_impl::get_withdraw_permissions_by_recipient(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const +{ + FC_ASSERT( limit <= 101 ); + vector result; + + const auto& withdraw_idx = _db.get_index_type().indices().get(); + auto withdraw_index_end = withdraw_idx.end(); + const account_id_type account = get_account_from_string(account_id_or_name)->id; + auto withdraw_itr = withdraw_idx.lower_bound(boost::make_tuple(account, start)); + while(withdraw_itr != withdraw_index_end && withdraw_itr->authorized_account == account && result.size() < limit) + { + result.push_back(*withdraw_itr); + ++withdraw_itr; + } + return result; +} + ////////////////////////////////////////////////////////////////////// // // // Private methods // @@ -1757,108 +2383,113 @@ vector database_api_impl::get_blinded_balances( const fl void database_api_impl::broadcast_updates( const vector& updates ) { - if( updates.size() ) { + if( updates.size() && _subscribe_callback ) { auto capture_this = shared_from_this(); fc::async([capture_this,updates](){ - capture_this->_subscribe_callback( fc::variant(updates) ); + if(capture_this->_subscribe_callback) + capture_this->_subscribe_callback( fc::variant(updates) ); }); } } -void database_api_impl::on_objects_removed( const vector& objs ) +void database_api_impl::broadcast_market_updates( const market_queue_type& queue) { - /// we need to ensure the database_api is not deleted for the life of the async operation - if( _subscribe_callback ) + if( queue.size() ) { - vector updates; - updates.reserve(objs.size()); - - for( auto obj : objs ) - updates.emplace_back( obj->id ); - broadcast_updates( updates ); + auto capture_this = shared_from_this(); + fc::async([capture_this, this, queue](){ + for( const auto& item : queue ) + { + auto sub = _market_subscriptions.find(item.first); + if( sub != _market_subscriptions.end() ) + sub->second( fc::variant(item.second ) ); + } + }); } +} - if( _market_subscriptions.size() ) - { - map< pair, vector > broadcast_queue; - for( const auto& obj : objs ) - { - const limit_order_object* order = dynamic_cast(obj); - if( order ) - { - auto sub = _market_subscriptions.find( order->get_market() ); - if( sub != _market_subscriptions.end() ) - broadcast_queue[order->get_market()].emplace_back( order->id ); - } - } - if( broadcast_queue.size() ) - { - auto capture_this = shared_from_this(); - fc::async([capture_this,this,broadcast_queue](){ - for( const auto& item : broadcast_queue ) - { - auto sub = _market_subscriptions.find(item.first); - if( sub != _market_subscriptions.end() ) - sub->second( fc::variant(item.second ) ); - } - }); +void database_api_impl::on_objects_removed( const vector& ids, const vector& objs, const flat_set& impacted_accounts) +{ + handle_object_changed(_notify_remove_create, false, ids, impacted_accounts, + [objs](object_id_type id) -> const object* { + auto it = std::find_if( + objs.begin(), objs.end(), + [id](const object* o) {return o != nullptr && o->id == id;}); + + if (it != objs.end()) + return *it; + + return nullptr; } - } + ); } -void database_api_impl::on_objects_changed(const vector& ids) +void database_api_impl::on_objects_new(const vector& ids, const flat_set& impacted_accounts) { - vector updates; - map< pair, vector > market_broadcast_queue; + handle_object_changed(_notify_remove_create, true, ids, impacted_accounts, + std::bind(&object_database::find_object, &_db, std::placeholders::_1) + ); +} - for(auto id : ids) +void database_api_impl::on_objects_changed(const vector& ids, const flat_set& impacted_accounts) +{ + handle_object_changed(false, true, ids, impacted_accounts, + std::bind(&object_database::find_object, &_db, std::placeholders::_1) + ); +} + +void database_api_impl::handle_object_changed(bool force_notify, bool full_object, const vector& ids, const flat_set& impacted_accounts, std::function find_object) +{ + if( _subscribe_callback ) { - const object* obj = nullptr; - if( _subscribe_callback ) - { - obj = _db.find_object( id ); - if( obj ) - { - updates.emplace_back( obj->to_variant() ); - } - else - { - updates.emplace_back(id); // send just the id to indicate removal - } - } + vector updates; - if( _market_subscriptions.size() ) + for(auto id : ids) { - if( !_subscribe_callback ) - obj = _db.find_object( id ); - if( obj ) + if( force_notify || is_subscribed_to_item(id) || is_impacted_account(impacted_accounts) ) { - const limit_order_object* order = dynamic_cast(obj); - if( order ) + if( full_object ) + { + auto obj = find_object(id); + if( obj ) + { + updates.emplace_back( obj->to_variant() ); + } + } + else { - auto sub = _market_subscriptions.find( order->get_market() ); - if( sub != _market_subscriptions.end() ) - market_broadcast_queue[order->get_market()].emplace_back( order->id ); + updates.emplace_back( fc::variant( id, 1 ) ); } } } - } - auto capture_this = shared_from_this(); + if( updates.size() ) + broadcast_updates(updates); + } - /// pushing the future back / popping the prior future if it is complete. - /// if a connection hangs then this could get backed up and result in - /// a failure to exit cleanly. - fc::async([capture_this,this,updates,market_broadcast_queue](){ - if( _subscribe_callback ) _subscribe_callback( updates ); + if( _market_subscriptions.size() ) + { + market_queue_type broadcast_queue; - for( const auto& item : market_broadcast_queue ) + for(auto id : ids) { - auto sub = _market_subscriptions.find(item.first); - if( sub != _market_subscriptions.end() ) - sub->second( fc::variant(item.second ) ); + if( id.is() ) + { + enqueue_if_subscribed_to_market( find_object(id), broadcast_queue, full_object ); + } + else if( id.is() ) + { + enqueue_if_subscribed_to_market( find_object(id), broadcast_queue, full_object ); + } + else if( id.is() ) + { + enqueue_if_subscribed_to_market( find_object(id), broadcast_queue, full_object ); + } } - }); + + if( broadcast_queue.size() ) + broadcast_market_updates(broadcast_queue); + } } /** note: this method cannot yield because it is called in the middle of @@ -1871,7 +2502,7 @@ void database_api_impl::on_applied_block() auto capture_this = shared_from_this(); block_id_type block_id = _db.head_block_id(); fc::async([this,capture_this,block_id](){ - _block_applied_callback(fc::variant(block_id)); + _block_applied_callback(fc::variant(block_id, 1)); }); } @@ -1886,7 +2517,7 @@ void database_api_impl::on_applied_block() continue; const operation_history_object& op = *o_op; - std::pair market; + optional< std::pair > market; switch(op.op.which()) { /* This is sent via the object_changed callback @@ -1902,8 +2533,9 @@ void database_api_impl::on_applied_block() */ default: break; } - if(_market_subscriptions.count(market)) - subscribed_markets_ops[market].push_back(std::make_pair(op.op, op.result)); + if( market.valid() && _market_subscriptions.count(*market) ) + // FIXME this may cause fill_order_operation be pushed before order creation + subscribed_markets_ops[*market].emplace_back(std::make_pair(op.op, op.result)); } /// we need to ensure the database_api is not deleted for the life of the async operation auto capture_this = shared_from_this(); @@ -1912,7 +2544,7 @@ void database_api_impl::on_applied_block() { auto itr = _market_subscriptions.find(item.first); if(itr != _market_subscriptions.end()) - itr->second(fc::variant(item.second)); + itr->second(fc::variant(item.second, GRAPHENE_NET_MAX_NESTED_OBJECTS)); } }); } diff --git a/libraries/app/impacted.cpp b/libraries/app/impacted.cpp deleted file mode 100644 index 8578742318..0000000000 --- a/libraries/app/impacted.cpp +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include -#include - -namespace graphene { namespace app { - -using namespace fc; -using namespace graphene::chain; - -// TODO: Review all of these, especially no-ops -struct get_impacted_account_visitor -{ - flat_set& _impacted; - get_impacted_account_visitor( flat_set& impact ):_impacted(impact) {} - typedef void result_type; - - void operator()( const transfer_operation& op ) - { - _impacted.insert( op.to ); - } - - void operator()( const asset_claim_fees_operation& op ){} - void operator()( const limit_order_create_operation& op ) {} - void operator()( const limit_order_cancel_operation& op ) - { - _impacted.insert( op.fee_paying_account ); - } - void operator()( const call_order_update_operation& op ) {} - void operator()( const fill_order_operation& op ) - { - _impacted.insert( op.account_id ); - } - - void operator()( const account_create_operation& op ) - { - _impacted.insert( op.registrar ); - _impacted.insert( op.referrer ); - add_authority_accounts( _impacted, op.owner ); - add_authority_accounts( _impacted, op.active ); - } - - void operator()( const account_update_operation& op ) - { - _impacted.insert( op.account ); - if( op.owner ) - add_authority_accounts( _impacted, *(op.owner) ); - if( op.active ) - add_authority_accounts( _impacted, *(op.active) ); - } - - void operator()( const account_whitelist_operation& op ) - { - _impacted.insert( op.account_to_list ); - } - - void operator()( const account_upgrade_operation& op ) {} - void operator()( const account_transfer_operation& op ) - { - _impacted.insert( op.new_owner ); - } - - void operator()( const asset_create_operation& op ) {} - void operator()( const asset_update_operation& op ) - { - if( op.new_issuer ) - _impacted.insert( *(op.new_issuer) ); - } - - void operator()( const asset_update_bitasset_operation& op ) {} - void operator()( const asset_update_feed_producers_operation& op ) {} - - void operator()( const asset_issue_operation& op ) - { - _impacted.insert( op.issue_to_account ); - } - - void operator()( const asset_reserve_operation& op ) {} - void operator()( const asset_fund_fee_pool_operation& op ) {} - void operator()( const asset_settle_operation& op ) {} - void operator()( const asset_global_settle_operation& op ) {} - void operator()( const asset_publish_feed_operation& op ) {} - void operator()( const witness_create_operation& op ) - { - _impacted.insert( op.witness_account ); - } - void operator()( const witness_update_operation& op ) - { - _impacted.insert( op.witness_account ); - } - - void operator()( const proposal_create_operation& op ) - { - vector other; - for( const auto& proposed_op : op.proposed_ops ) - operation_get_required_authorities( proposed_op.op, _impacted, _impacted, other ); - for( auto& o : other ) - add_authority_accounts( _impacted, o ); - } - - void operator()( const proposal_update_operation& op ) {} - void operator()( const proposal_delete_operation& op ) {} - - void operator()( const withdraw_permission_create_operation& op ) - { - _impacted.insert( op.authorized_account ); - } - - void operator()( const withdraw_permission_update_operation& op ) - { - _impacted.insert( op.authorized_account ); - } - - void operator()( const withdraw_permission_claim_operation& op ) - { - _impacted.insert( op.withdraw_from_account ); - } - - void operator()( const withdraw_permission_delete_operation& op ) - { - _impacted.insert( op.authorized_account ); - } - - void operator()( const committee_member_create_operation& op ) - { - _impacted.insert( op.committee_member_account ); - } - void operator()( const committee_member_update_operation& op ) - { - _impacted.insert( op.committee_member_account ); - } - void operator()( const committee_member_update_global_parameters_operation& op ) {} - - void operator()( const vesting_balance_create_operation& op ) - { - _impacted.insert( op.owner ); - } - - void operator()( const vesting_balance_withdraw_operation& op ) {} - void operator()( const worker_create_operation& op ) {} - void operator()( const custom_operation& op ) {} - void operator()( const assert_operation& op ) {} - void operator()( const balance_claim_operation& op ) {} - - void operator()( const override_transfer_operation& op ) - { - _impacted.insert( op.to ); - _impacted.insert( op.from ); - _impacted.insert( op.issuer ); - } - - void operator()( const transfer_to_blind_operation& op ) - { - _impacted.insert( op.from ); - for( const auto& out : op.outputs ) - add_authority_accounts( _impacted, out.owner ); - } - - void operator()( const blind_transfer_operation& op ) - { - for( const auto& in : op.inputs ) - add_authority_accounts( _impacted, in.owner ); - for( const auto& out : op.outputs ) - add_authority_accounts( _impacted, out.owner ); - } - - void operator()( const transfer_from_blind_operation& op ) - { - _impacted.insert( op.to ); - for( const auto& in : op.inputs ) - add_authority_accounts( _impacted, in.owner ); - } - - void operator()( const asset_settle_cancel_operation& op ) - { - _impacted.insert( op.account ); - } - - void operator()( const fba_distribute_operation& op ) - { - _impacted.insert( op.account_id ); - } - -}; - -void operation_get_impacted_accounts( const operation& op, flat_set& result ) -{ - get_impacted_account_visitor vtor = get_impacted_account_visitor( result ); - op.visit( vtor ); -} - -void transaction_get_impacted_accounts( const transaction& tx, flat_set& result ) -{ - for( const auto& op : tx.operations ) - operation_get_impacted_accounts( op, result ); -} - -} } diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index eef2b6d985..484cde78c5 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -30,6 +30,8 @@ #include +#include + #include #include @@ -49,6 +51,7 @@ namespace graphene { namespace app { using namespace graphene::chain; using namespace graphene::market_history; + using namespace graphene::grouped_orders; using namespace fc::ecc; using namespace std; @@ -71,6 +74,40 @@ namespace graphene { namespace app { string message_out; }; + struct account_asset_balance + { + string name; + account_id_type account_id; + share_type amount; + }; + struct asset_holders + { + asset_id_type asset_id; + int count; + }; + + struct history_operation_detail { + uint32_t total_count = 0; + vector operation_history_objs; + }; + + /** + * @brief summary data of a group of limit orders + */ + struct limit_order_group + { + limit_order_group( const std::pair& p ) + : min_price( p.first.min_price ), + max_price( p.second.max_price ), + total_for_sale( p.second.total_for_sale ) + {} + limit_order_group() {} + + price min_price; ///< possible lowest price in the group + price max_price; ///< possible highest price in the group + share_type total_for_sale; ///< total amount of asset for sale, asset id is min_price.base.asset_id + }; + /** * @brief The history_api class implements the RPC API for account history * @@ -79,25 +116,63 @@ namespace graphene { namespace app { class history_api { public: - history_api(application& app):_app(app){} + history_api(application& app) + :_app(app), database_api( std::ref(*app.chain_database()), &(app.get_options())) {} /** * @brief Get operations relevant to the specificed account - * @param account The account whose history should be queried + * @param account_id_or_name The account ID or name whose history should be queried * @param stop ID of the earliest operation to retrieve * @param limit Maximum number of operations to retrieve (must not exceed 100) * @param start ID of the most recent operation to retrieve * @return A list of operations performed by account, ordered from most recent to oldest. */ - vector get_account_history(account_id_type account, - operation_history_id_type stop = operation_history_id_type(), - unsigned limit = 100, - operation_history_id_type start = operation_history_id_type())const; + vector get_account_history( + const std::string account_id_or_name, + operation_history_id_type stop = operation_history_id_type(), + unsigned limit = 100, + operation_history_id_type start = operation_history_id_type() + )const; + + /** + * @brief Get operations relevant to the specified account filtering by operation type + * @param account_id_or_name The account ID or name whose history should be queried + * @param operation_types The IDs of the operation we want to get operations in the account + * ( 0 = transfer , 1 = limit order create, ...) + * @param start the sequence number where to start looping back throw the history + * @param limit the max number of entries to return (from start number) + * @return history_operation_detail + */ + history_operation_detail get_account_history_by_operations( + const std::string account_id_or_name, + vector operation_types, + uint32_t start, + unsigned limit + ); + + /** + * @brief Get only asked operations relevant to the specified account + * @param account_id_or_name The account ID or name whose history should be queried + * @param operation_type The type of the operation we want to get operations in the account + * ( 0 = transfer , 1 = limit order create, ...) + * @param stop ID of the earliest operation to retrieve + * @param limit Maximum number of operations to retrieve (must not exceed 100) + * @param start ID of the most recent operation to retrieve + * @return A list of operations performed by account, ordered from most recent to oldest. + */ + vector get_account_history_operations( + const std::string account_id_or_name, + int operation_type, + operation_history_id_type start = operation_history_id_type(), + operation_history_id_type stop = operation_history_id_type(), + unsigned limit = 100 + )const; + /** * @breif Get operations relevant to the specified account referenced * by an event numbering specific to the account. The current number of operations * for the account can be found in the account statistics (or use 0 for start). - * @param account The account whose history should be queried + * @param account_id_or_name The account ID or name whose history should be queried * @param stop Sequence number of earliest operation. 0 is default and will * query 'limit' number of operations. * @param limit Maximum number of operations to retrieve (must not exceed 100) @@ -105,19 +180,67 @@ namespace graphene { namespace app { * 0 is default, which will start querying from the most recent operation. * @return A list of operations performed by account, ordered from most recent to oldest. */ - vector get_relative_account_history( account_id_type account, - uint32_t stop = 0, + vector get_relative_account_history( const std::string account_id_or_name, + uint64_t stop = 0, unsigned limit = 100, - uint32_t start = 0) const; + uint64_t start = 0) const; + + /** + * @brief Get details of order executions occurred most recently in a trading pair + * @param a Asset symbol or ID in a trading pair + * @param b The other asset symbol or ID in the trading pair + * @param limit Maximum records to return + * @return a list of order_history objects, in "most recent first" order + */ + vector get_fill_order_history( std::string a, std::string b, uint32_t limit )const; - vector get_fill_order_history( asset_id_type a, asset_id_type b, uint32_t limit )const; - vector get_market_history( asset_id_type a, asset_id_type b, uint32_t bucket_seconds, + /** + * @brief Get OHLCV data of a trading pair in a time range + * @param a Asset symbol or ID in a trading pair + * @param b The other asset symbol or ID in the trading pair + * @param bucket_seconds Length of each time bucket in seconds. + * Note: it need to be within result of get_market_history_buckets() API, otherwise no data will be returned + * @param start The start of a time range, E.G. "2018-01-01T00:00:00" + * @param end The end of the time range + * @return A list of OHLCV data, in "least recent first" order. + * If there are more than 200 records in the specified time range, the first 200 records will be returned. + */ + vector get_market_history( std::string a, std::string b, uint32_t bucket_seconds, fc::time_point_sec start, fc::time_point_sec end )const; + + /** + * @brief Get OHLCV time bucket lengths supported (configured) by this API server + * @return A list of time bucket lengths in seconds. E.G. if the result contains a number "300", + * it means this API server supports OHLCV data aggregated in 5-minute buckets. + */ flat_set get_market_history_buckets()const; private: application& _app; + graphene::app::database_api database_api; }; + /** + * @brief Block api + */ + class block_api + { + public: + block_api(graphene::chain::database& db); + ~block_api(); + + /** + * @brief Get signed blocks + * @param block_num_from The lowest block number + * @param block_num_to The highest block number + * @return A list of signed blocks from block_num_from till block_num_to + */ + vector> get_blocks(uint32_t block_num_from, uint32_t block_num_to)const; + + private: + graphene::chain::database& _db; + }; + + /** * @brief The network_broadcast_api class allows broadcasting of transactions. */ @@ -143,14 +266,24 @@ namespace graphene { namespace app { * The transaction will be checked for validity in the local database prior to broadcasting. If it fails to * apply locally, an error will be thrown and the transaction will not be broadcast. */ - void broadcast_transaction(const signed_transaction& trx); + void broadcast_transaction(const precomputable_transaction& trx); + + /** this version of broadcast transaction registers a callback method that will be called when the transaction is + * included into a block. The callback method includes the transaction id, block number, and transaction number in the + * block. + */ + void broadcast_transaction_with_callback( confirmation_callback cb, const precomputable_transaction& trx); /** this version of broadcast transaction registers a callback method that will be called when the transaction is * included into a block. The callback method includes the transaction id, block number, and transaction number in the * block. */ - void broadcast_transaction_with_callback( confirmation_callback cb, const signed_transaction& trx); + fc::variant broadcast_transaction_synchronous(const precomputable_transaction& trx); + /** + * @brief Broadcast a signed block to the network + * @param block The signed block to broadcast + */ void broadcast_block( const signed_block& block ); /** @@ -219,22 +352,55 @@ namespace graphene { namespace app { public: crypto_api(); - fc::ecc::blind_signature blind_sign( const extended_private_key_type& key, const fc::ecc::blinded_hash& hash, int i ); - - signature_type unblind_signature( const extended_private_key_type& key, - const extended_public_key_type& bob, - const fc::ecc::blind_signature& sig, - const fc::sha256& hash, - int i ); - + /** + * @brief Generates a pedersen commitment: *commit = blind * G + value * G2. + * The commitment is 33 bytes, the blinding factor is 32 bytes. + * For more information about pederson commitment check next url https://en.wikipedia.org/wiki/Commitment_scheme + * @param blind Sha-256 blind factor type + * @param value Positive 64-bit integer value + * @return A 33-byte pedersen commitment: *commit = blind * G + value * G2 + */ fc::ecc::commitment_type blind( const fc::ecc::blind_factor_type& blind, uint64_t value ); + /** + * @brief Get sha-256 blind factor type + * @param blinds_in List of sha-256 blind factor types + * @param non_neg 32-bit integer value + * @return A blind factor type + */ fc::ecc::blind_factor_type blind_sum( const std::vector& blinds_in, uint32_t non_neg ); - bool verify_sum( const std::vector& commits_in, const std::vector& neg_commits_in, int64_t excess ); + /** + * @brief Verifies that commits + neg_commits + excess == 0 + * @param commits_in List of 33-byte pedersen commitments + * @param neg_commits_in List of 33-byte pedersen commitments + * @param excess Sum of two list of 33-byte pedersen commitments where sums the first set and subtracts the second + * @return Boolean - true in event of commits + neg_commits + excess == 0, otherwise false + */ + bool verify_sum( + const std::vector& commits_in, const std::vector& neg_commits_in, int64_t excess + ); + /** + * @brief Verifies range proof for 33-byte pedersen commitment + * @param commit 33-byte pedersen commitment + * @param proof List of characters + * @return A structure with success, min and max values + */ verify_range_result verify_range( const fc::ecc::commitment_type& commit, const std::vector& proof ); + /** + * @brief Proves with respect to min_value the range for pedersen + * commitment which has the provided blinding factor and value + * @param min_value Positive 64-bit integer value + * @param commit 33-byte pedersen commitment + * @param commit_blind Sha-256 blind factor type for the correct digits + * @param nonce Sha-256 blind factor type for our non-forged signatures + * @param exp Exponents base 10 in range [-1 ; 18] inclusively + * @param min_bits 8-bit positive integer, must be in range [0 ; 64] inclusively + * @param actual_value 64-bit positive integer, must be greater or equal min_value + * @return A list of characters as proof in proof + */ std::vector range_proof_sign( uint64_t min_value, const commitment_type& commit, const blind_factor_type& commit_blind, @@ -243,15 +409,102 @@ namespace graphene { namespace app { uint8_t min_bits, uint64_t actual_value ); - + /** + * @brief Verifies range proof rewind for 33-byte pedersen commitment + * @param nonce Sha-256 blind refactor type + * @param commit 33-byte pedersen commitment + * @param proof List of characters + * @return A structure with success, min, max, value_out, blind_out and message_out values + */ verify_range_proof_rewind_result verify_range_proof_rewind( const blind_factor_type& nonce, const fc::ecc::commitment_type& commit, const std::vector& proof ); - + /** + * @brief Gets "range proof" info. The cli_wallet includes functionality for sending blind transfers + * in which the values of the input and outputs amounts are “blinded.” + * In the case where a transaction produces two or more outputs, (e.g. an amount to the intended + * recipient plus “change” back to the sender), + * a "range proof" must be supplied to prove that none of the outputs commit to a negative value. + * @param proof List of proof's characters + * @return A range proof info structure with exponent, mantissa, min and max values + */ range_proof_info range_get_info( const std::vector& proof ); }; + /** + * @brief + */ + class asset_api + { + public: + asset_api(graphene::app::application& app); + ~asset_api(); + + /** + * @brief Get asset holders for a specific asset + * @param asset The specific asset id or symbol + * @param start The start index + * @param limit Maximum limit must not exceed 100 + * @return A list of asset holders for the specified asset + */ + vector get_asset_holders( std::string asset, uint32_t start, uint32_t limit )const; + + /** + * @brief Get asset holders count for a specific asset + * @param asset The specific asset id or symbol + * @return Holders count for the specified asset + */ + int get_asset_holders_count( std::string asset )const; + + /** + * @brief Get all asset holders + * @return A list of all asset holders + */ + vector get_all_asset_holders() const; + + private: + graphene::app::application& _app; + graphene::chain::database& _db; + graphene::app::database_api database_api; + }; + + /** + * @brief the orders_api class exposes access to data processed with grouped orders plugin. + */ + class orders_api + { + public: + orders_api(application& app):_app(app), database_api( std::ref(*app.chain_database()), &(app.get_options()) ){} + //virtual ~orders_api() {} + + /** + * @breif Get tracked groups configured by the server. + * @return A list of numbers which indicate configured groups, of those, 1 means 0.01% diff on price. + */ + flat_set get_tracked_groups()const; + + /** + * @breif Get grouped limit orders in given market. + * + * @param base_asset ID or symbol of asset being sold + * @param quote_asset ID or symbol of asset being purchased + * @param group Maximum price diff within each order group, have to be one of configured values + * @param start Optional price to indicate the first order group to retrieve + * @param limit Maximum number of order groups to retrieve (must not exceed 101) + * @return The grouped limit orders, ordered from best offered price to worst + */ + vector< limit_order_group > get_grouped_limit_orders( std::string base_asset, + std::string quote_asset, + uint16_t group, + optional start, + uint32_t limit )const; + + private: + application& _app; + graphene::app::database_api database_api; + }; + /** * @brief The login_api class implements the bottom layer of the RPC API * @@ -273,6 +526,8 @@ namespace graphene { namespace app { * has sucessfully authenticated. */ bool login(const string& user, const string& password); + /// @brief Retrieve the network block API + fc::api block()const; /// @brief Retrieve the network broadcast API fc::api network_broadcast()const; /// @brief Retrieve the database API @@ -283,19 +538,26 @@ namespace graphene { namespace app { fc::api network_node()const; /// @brief Retrieve the cryptography API fc::api crypto()const; + /// @brief Retrieve the asset API + fc::api asset()const; + /// @brief Retrieve the orders API + fc::api orders()const; /// @brief Retrieve the debug API (if available) fc::api debug()const; - private: /// @brief Called to enable an API, not reflected. void enable_api( const string& api_name ); + private: application& _app; + optional< fc::api > _block_api; optional< fc::api > _database_api; optional< fc::api > _network_broadcast_api; optional< fc::api > _network_node_api; optional< fc::api > _history_api; optional< fc::api > _crypto_api; + optional< fc::api > _asset_api; + optional< fc::api > _orders_api; optional< fc::api > _debug_api; }; @@ -307,19 +569,32 @@ FC_REFLECT( graphene::app::verify_range_result, (success)(min_val)(max_val) ) FC_REFLECT( graphene::app::verify_range_proof_rewind_result, (success)(min_val)(max_val)(value_out)(blind_out)(message_out) ) +FC_REFLECT( graphene::app::history_operation_detail, + (total_count)(operation_history_objs) ) +FC_REFLECT( graphene::app::limit_order_group, + (min_price)(max_price)(total_for_sale) ) //FC_REFLECT_TYPENAME( fc::ecc::compact_signature ); //FC_REFLECT_TYPENAME( fc::ecc::commitment_type ); +FC_REFLECT( graphene::app::account_asset_balance, (name)(account_id)(amount) ); +FC_REFLECT( graphene::app::asset_holders, (asset_id)(count) ); + FC_API(graphene::app::history_api, (get_account_history) + (get_account_history_by_operations) + (get_account_history_operations) (get_relative_account_history) (get_fill_order_history) (get_market_history) (get_market_history_buckets) ) +FC_API(graphene::app::block_api, + (get_blocks) + ) FC_API(graphene::app::network_broadcast_api, (broadcast_transaction) (broadcast_transaction_with_callback) + (broadcast_transaction_synchronous) (broadcast_block) ) FC_API(graphene::app::network_node_api, @@ -331,8 +606,6 @@ FC_API(graphene::app::network_node_api, (set_advanced_node_parameters) ) FC_API(graphene::app::crypto_api, - (blind_sign) - (unblind_signature) (blind) (blind_sum) (verify_sum) @@ -341,12 +614,24 @@ FC_API(graphene::app::crypto_api, (verify_range_proof_rewind) (range_get_info) ) +FC_API(graphene::app::asset_api, + (get_asset_holders) + (get_asset_holders_count) + (get_all_asset_holders) + ) +FC_API(graphene::app::orders_api, + (get_tracked_groups) + (get_grouped_limit_orders) + ) FC_API(graphene::app::login_api, (login) + (block) (network_broadcast) (database) (history) (network_node) (crypto) + (asset) + (orders) (debug) ) diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index 26ae78efd6..6f1a0d6e90 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -35,35 +35,60 @@ namespace graphene { namespace app { class abstract_plugin; + class application_options + { + public: + bool enable_subscribe_to_all = false; + bool has_market_history_plugin = false; + uint64_t api_limit_get_account_history_operations = 100; + uint64_t api_limit_get_account_history = 100; + uint64_t api_limit_get_grouped_limit_orders = 101; + uint64_t api_limit_get_relative_account_history = 100; + uint64_t api_limit_get_account_history_by_operations = 100; + uint64_t api_limit_get_asset_holders = 100; + uint64_t api_limit_get_key_references = 100; + }; + class application { public: application(); ~application(); - void set_program_options( boost::program_options::options_description& command_line_options, - boost::program_options::options_description& configuration_file_options )const; - void initialize(const fc::path& data_dir, const boost::program_options::variables_map&options); - void initialize_plugins( const boost::program_options::variables_map& options ); + void set_program_options(boost::program_options::options_description& command_line_options, + boost::program_options::options_description& configuration_file_options)const; + void initialize(const fc::path& data_dir, const boost::program_options::variables_map& options); + void initialize_plugins(const boost::program_options::variables_map& options); void startup(); void shutdown(); void startup_plugins(); void shutdown_plugins(); template - std::shared_ptr register_plugin() - { + std::shared_ptr register_plugin(bool auto_load = false) { auto plug = std::make_shared(); plug->plugin_set_app(this); - boost::program_options::options_description plugin_cli_options("Options for plugin " + plug->plugin_name()), plugin_cfg_options; + string cli_plugin_desc = plug->plugin_name() + " plugin. " + plug->plugin_description() + "\nOptions"; + boost::program_options::options_description plugin_cli_options( cli_plugin_desc ), plugin_cfg_options; plug->plugin_set_program_options(plugin_cli_options, plugin_cfg_options); + if( !plugin_cli_options.options().empty() ) _cli_options.add(plugin_cli_options); + if( !plugin_cfg_options.options().empty() ) + { + std::string header_name = "plugin-cfg-header-" + plug->plugin_name(); + std::string header_desc = plug->plugin_name() + " plugin options"; + _cfg_options.add_options()(header_name.c_str(), header_desc.c_str()); _cfg_options.add(plugin_cfg_options); + } + + add_available_plugin( plug ); + + if (auto_load) + enable_plugin(plug->plugin_name()); - add_plugin( plug->plugin_name(), plug ); return plug; } std::shared_ptr get_plugin( const string& name )const; @@ -73,13 +98,13 @@ namespace graphene { namespace app { { std::shared_ptr abs_plugin = get_plugin( name ); std::shared_ptr result = std::dynamic_pointer_cast( abs_plugin ); - FC_ASSERT( result != std::shared_ptr() ); + FC_ASSERT( result != std::shared_ptr(), "Unable to load plugin '${p}'", ("p",name) ); return result; } net::node_ptr p2p_node(); std::shared_ptr chain_database()const; - + void set_api_limit(); void set_block_production(bool producing_blocks); fc::optional< api_access_info > get_api_access_info( const string& username )const; void set_api_access_info(const string& username, api_access_info&& permissions); @@ -88,8 +113,12 @@ namespace graphene { namespace app { /// Emitted when syncing finishes (is_finished_syncing will return true) boost::signals2::signal syncing_finished; + const application_options& get_options(); + + void enable_plugin( const string& name ); + private: - void add_plugin( const string& name, std::shared_ptr p ); + void add_available_plugin( std::shared_ptr p ); std::shared_ptr my; boost::program_options::options_description _cli_options; diff --git a/libraries/deterministic_openssl_rand/include/graphene/utilities/deterministic_openssl_rand.hpp b/libraries/app/include/graphene/app/config_util.hpp similarity index 74% rename from libraries/deterministic_openssl_rand/include/graphene/utilities/deterministic_openssl_rand.hpp rename to libraries/app/include/graphene/app/config_util.hpp index 693723cb4e..d7358f228c 100644 --- a/libraries/deterministic_openssl_rand/include/graphene/utilities/deterministic_openssl_rand.hpp +++ b/libraries/app/include/graphene/app/config_util.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2018 Lubos Ilcik, and contributors. * * The MIT License * @@ -22,10 +22,13 @@ * THE SOFTWARE. */ #pragma once -#include -namespace graphene { namespace utilities { +#include +#include -void set_random_seed_for_testing(const fc::sha512& new_seed); +namespace graphene { namespace app { -} } // end namespace graphene::utilities + void load_configuration_options(const fc::path &data_dir, const boost::program_options::options_description &cfg_options, + boost::program_options::variables_map &options); + +} } // graphene::app \ No newline at end of file diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 95ce16d68d..fe97083956 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -66,9 +66,9 @@ class database_api_impl; struct order { - double price; - double quote; - double base; + string price; + string quote; + string base; }; struct order_book @@ -81,30 +81,45 @@ struct order_book struct market_ticker { + time_point_sec time; string base; string quote; - double latest; - double lowest_ask; - double highest_bid; - double percent_change; - double base_volume; - double quote_volume; + string latest; + string lowest_ask; + string highest_bid; + string percent_change; + string base_volume; + string quote_volume; + + market_ticker() {} + market_ticker(const market_ticker_object& mto, + const fc::time_point_sec& now, + const asset_object& asset_base, + const asset_object& asset_quote, + const order_book& orders); + market_ticker(const fc::time_point_sec& now, + const asset_object& asset_base, + const asset_object& asset_quote); }; struct market_volume { + time_point_sec time; string base; string quote; - double base_volume; - double quote_volume; + string base_volume; + string quote_volume; }; struct market_trade { + int64_t sequence = 0; fc::time_point_sec date; - double price; - double amount; - double value; + string price; + string amount; + string value; + account_id_type side1_account_id = GRAPHENE_NULL_ACCOUNT; + account_id_type side2_account_id = GRAPHENE_NULL_ACCOUNT; }; /** @@ -117,7 +132,7 @@ struct market_trade class database_api { public: - database_api(graphene::chain::database& db); + database_api(graphene::chain::database& db, const application_options* app_options = nullptr ); ~database_api(); ///////////// @@ -137,8 +152,29 @@ class database_api // Subscriptions // /////////////////// - void set_subscribe_callback( std::function cb, bool clear_filter ); - void set_pending_transaction_callback( std::function cb ); + /** + * @brief Register a callback handle which then can be used to subscribe to object database changes + * @param cb The callback handle to register + * @param nofity_remove_create Whether subscribe to universal object creation and removal events. + * If this is set to true, the API server will notify all newly created objects and ID of all + * newly removed objects to the client, no matter whether client subscribed to the objects. + * By default, API servers don't allow subscribing to universal events, which can be changed + * on server startup. + */ + void set_subscribe_callback( std::function cb, bool notify_remove_create ); + /** + * @brief Register a callback handle which will get notified when a transaction is pushed to database + * @param cb The callback handle to register + * + * Note: a transaction can be pushed to database and be popped from database several times while + * processing, before and after included in a block. Everytime when a push is done, the client will + * be notified. + */ + void set_pending_transaction_callback( std::function cb ); + /** + * @brief Register a callback handle which will get notified when a block is pushed to database + * @param cb The callback handle to register + */ void set_block_applied_callback( std::function cb ); /** * @brief Stop receiving any notifications @@ -158,6 +194,14 @@ class database_api */ optional get_block_header(uint32_t block_num)const; + /** + * @brief Retrieve multiple block header by block numbers + * @param block_num vector containing heights of the block whose header should be returned + * @return array of headers of the referenced blocks, or null if no matching block was found + */ + map> get_block_header_batch(const vector block_nums)const; + + /** * @brief Retrieve a full, signed block * @param block_num Height of the block to be returned @@ -212,18 +256,64 @@ class database_api vector> get_key_references( vector key )const; + /** + * Determine whether a textual representation of a public key + * (in Base-58 format) is *currently* linked + * to any *registered* (i.e. non-stealth) account on the blockchain + * @param public_key Public key + * @return Whether a public key is known + */ + bool is_public_key_registered(string public_key) const; + ////////////// // Accounts // ////////////// + /** + * @brief Get account object from a name or ID + * @param account_name_or_id ID or name of the accounts + * @return Account ID + * + */ + account_id_type get_account_id_from_string(const std::string& name_or_id) const; + /** * @brief Get a list of accounts by ID - * @param account_ids IDs of the accounts to retrieve + * @param account_names_or_ids IDs or names of the accounts to retrieve * @return The accounts corresponding to the provided IDs * * This function has semantics identical to @ref get_objects */ - vector> get_accounts(const vector& account_ids)const; + vector> get_accounts(const vector& account_names_or_ids)const; + + /** + * @brief Fetch all orders relevant to the specified account and specified market, result orders + * are sorted descendingly by price + * + * @param account_name_or_id The name or ID of an account to retrieve + * @param base Base asset + * @param quote Quote asset + * @param limit The limitation of items each query can fetch, not greater than 101 + * @param start_id Start order id, fetch orders which price lower than this order, or price equal to this order + * but order ID greater than this order + * @param start_price Fetch orders with price lower than or equal to this price + * + * @return List of orders from @ref account_name_or_id to the corresponding account + * + * @note + * 1. if @ref account_name_or_id cannot be tied to an account, empty result will be returned + * 2. @ref start_id and @ref start_price can be empty, if so the api will return the "first page" of orders; + * if start_id is specified, its price will be used to do page query preferentially, otherwise the start_price + * will be used; start_id and start_price may be used cooperatively in case of the order specified by start_id + * was just canceled accidentally, in such case, the result orders' price may lower or equal to start_price, + * but orders' id greater than start_id + */ + vector get_account_limit_orders( const string& account_name_or_id, + const string &base, + const string "e, + uint32_t limit = 101, + optional ostart_id = optional(), + optional ostart_price = optional()); /** * @brief Fetch all objects relevant to the specified accounts and subscribe to updates @@ -243,7 +333,7 @@ class database_api /** * @return all accounts that referr to the key or account id in their owner or active authorities. */ - vector get_account_references( account_id_type account_id )const; + vector get_account_references( const std::string account_id_or_name )const; /** * @brief Get a list of accounts by name @@ -268,11 +358,11 @@ class database_api /** * @brief Get an account's balances in various assets - * @param id ID of the account to get balances for + * @param account_name_or_id ID or name of the account to get balances for * @param assets IDs of the assets to get balances of; if empty, get all assets account has a balance in * @return Balances of the account */ - vector get_account_balances(account_id_type id, const flat_set& assets)const; + vector get_account_balances(const std::string& account_name_or_id, const flat_set& assets)const; /// Semantically equivalent to @ref get_account_balances, but takes a name instead of an ID. vector get_named_account_balances(const std::string& name, const flat_set& assets)const; @@ -282,7 +372,7 @@ class database_api vector get_vested_balances( const vector& objs )const; - vector get_vesting_balances( account_id_type account_id )const; + vector get_vesting_balances( const std::string account_id_or_name )const; /** * @brief Get the total number of accounts registered with the blockchain @@ -293,19 +383,26 @@ class database_api // Assets // //////////// + /** + * @brief Get asset id from a symbol or ID + * @param symbol_or_id ID or symbol of the asset + * @return asset id + */ + asset_id_type get_asset_id_from_string(const std::string& symbol_or_id) const; + /** * @brief Get a list of assets by ID - * @param asset_ids IDs of the assets to retrieve + * @param asset_symbols_or_ids Symbol names or IDs of the assets to retrieve * @return The assets corresponding to the provided IDs * * This function has semantics identical to @ref get_objects */ - vector> get_assets(const vector& asset_ids)const; + vector> get_assets(const vector& asset_symbols_or_ids)const; /** * @brief Get assets alphabetically by symbol name * @param lower_bound_symbol Lower bound of symbol names to retrieve - * @param limit Maximum number of assets to fetch (must not exceed 100) + * @param limit Maximum number of assets to fetch (must not exceed 101) * @return The assets found */ vector list_assets(const string& lower_bound_symbol, uint32_t limit)const; @@ -319,58 +416,73 @@ class database_api */ vector> lookup_asset_symbols(const vector& symbols_or_ids)const; + /** + * @brief Get assets count + * @return The assets count + */ + uint64_t get_asset_count()const; + ///////////////////// // Markets / feeds // ///////////////////// /** * @brief Get limit orders in a given market - * @param a ID of asset being sold - * @param b ID of asset being purchased + * @param a Symbol or ID of asset being sold + * @param b Symbol or ID of asset being purchased * @param limit Maximum number of orders to retrieve * @return The limit orders, ordered from least price to greatest */ - vector get_limit_orders(asset_id_type a, asset_id_type b, uint32_t limit)const; + vector get_limit_orders(std::string a, std::string b, uint32_t limit)const; /** * @brief Get call orders in a given asset - * @param a ID of asset being called + * @param a Symbol or ID of asset being called * @param limit Maximum number of orders to retrieve * @return The call orders, ordered from earliest to be called to latest */ - vector get_call_orders(asset_id_type a, uint32_t limit)const; + vector get_call_orders(const std::string& a, uint32_t limit)const; /** * @brief Get forced settlement orders in a given asset - * @param a ID of asset being settled + * @param a Symbol or ID of asset being settled * @param limit Maximum number of orders to retrieve * @return The settle orders, ordered from earliest settlement date to latest */ - vector get_settle_orders(asset_id_type a, uint32_t limit)const; + vector get_settle_orders(const std::string& a, uint32_t limit)const; /** - * @return all open margin positions for a given account id. + * @brief Get collateral_bid_objects for a given asset + * @param a Symbol or ID of asset + * @param limit Maximum number of objects to retrieve + * @param start skip that many results + * @return The settle orders, ordered from earliest settlement date to latest */ - vector get_margin_positions( const account_id_type& id )const; + vector get_collateral_bids(const std::string& a, uint32_t limit, uint32_t start)const; + + /** + * @return all open margin positions for a given account id or name. + */ + vector get_margin_positions( const std::string account_id_or_name )const; /** * @brief Request notification when the active orders in the market between two assets changes * @param callback Callback method which is called when the market changes - * @param a First asset ID - * @param b Second asset ID + * @param a First asset Symbol or ID + * @param b Second asset Symbol or ID * * Callback will be passed a variant containing a vector>. The vector will * contain, in order, the operations which changed the market, and their results. */ void subscribe_to_market(std::function callback, - asset_id_type a, asset_id_type b); + const std::string& a, const std::string& b); /** * @brief Unsubscribe from updates to a given market - * @param a First asset ID - * @param b Second asset ID + * @param a First asset Symbol ID + * @param b Second asset Symbol ID */ - void unsubscribe_from_market( asset_id_type a, asset_id_type b ); + void unsubscribe_from_market( const std::string& a, const std::string& b ); /** * @brief Returns the ticker for the market assetA:assetB @@ -398,16 +510,42 @@ class database_api order_book get_order_book( const string& base, const string& quote, unsigned limit = 50 )const; /** - * @brief Returns recent trades for the market assetA:assetB - * Note: Currentlt, timezone offsets are not supported. The time must be UTC. - * @param a String name of the first asset - * @param b String name of the second asset - * @param stop Stop time as a UNIX timestamp - * @param limit Number of trasactions to retrieve, capped at 100 - * @param start Start time as a UNIX timestamp + * @brief Returns vector of tickers sorted by reverse base_volume + * Note: this API is experimental and subject to change in next releases + * @param limit Max number of results + * @return Desc Sorted ticker vector + */ + vector get_top_markets(uint32_t limit)const; + + /** + * @brief Returns recent trades for the market base:quote, ordered by time, most recent first. + * Note: Currently, timezone offsets are not supported. The time must be UTC. The range is [stop, start). + * In case when there are more than 100 trades occurred in the same second, this API only returns + * the first 100 records, can use another API `get_trade_history_by_sequence` to query for the rest. + * @param base symbol or ID of the base asset + * @param quote symbol or ID of the quote asset + * @param start Start time as a UNIX timestamp, the latest trade to retrieve + * @param stop Stop time as a UNIX timestamp, the earliest trade to retrieve + * @param limit Number of trasactions to retrieve, capped at 100. * @return Recent transactions in the market */ - vector get_trade_history( const string& base, const string& quote, fc::time_point_sec start, fc::time_point_sec stop, unsigned limit = 100 )const; + vector get_trade_history( const string& base, const string& quote, + fc::time_point_sec start, fc::time_point_sec stop, + unsigned limit = 100 )const; + + /** + * @brief Returns trades for the market base:quote, ordered by time, most recent first. + * Note: Currently, timezone offsets are not supported. The time must be UTC. The range is [stop, start). + * @param base symbol or ID of the base asset + * @param quote symbol or ID of the quote asset + * @param start Start sequence as an Integer, the latest trade to retrieve + * @param stop Stop time as a UNIX timestamp, the earliest trade to retrieve + * @param limit Number of trasactions to retrieve, capped at 100 + * @return Transactions in the market + */ + vector get_trade_history_by_sequence( const string& base, const string& quote, + int64_t start, fc::time_point_sec stop, + unsigned limit = 100 )const; @@ -426,10 +564,10 @@ class database_api /** * @brief Get the witness owned by a given account - * @param account The ID of the account whose witness should be retrieved + * @param account_id_or_name The ID of the account whose witness should be retrieved * @return The witness object, or null if the account does not have a witness */ - fc::optional get_witness_by_account(account_id_type account)const; + fc::optional get_witness_by_account(const std::string account_id_or_name)const; /** * @brief Get names and IDs for registered witnesses @@ -459,10 +597,10 @@ class database_api /** * @brief Get the committee_member owned by a given account - * @param account The ID of the account whose committee_member should be retrieved + * @param account The ID or name of the account whose committee_member should be retrieved * @return The committee_member object, or null if the account does not have a committee_member */ - fc::optional get_committee_member_by_account(account_id_type account)const; + fc::optional get_committee_member_by_account(const std::string account_id_or_name)const; /** * @brief Get names and IDs for registered committee_members @@ -472,13 +610,35 @@ class database_api */ map lookup_committee_member_accounts(const string& lower_bound_name, uint32_t limit)const; + /** + * @brief Get the total number of committee registered with the blockchain + */ + uint64_t get_committee_count()const; + - /// WORKERS + /////////////////////// + // Worker proposals // + /////////////////////// + + /** + * @brief Get all workers + * @return All the workers + * + */ + vector get_all_workers()const; /** - * Return the worker objects associated with this account. + * @brief Get the workers owned by a given account + * @param account_id_or_name The ID or name of the account whose worker should be retrieved + * @return The worker object, or null if the account does not have a worker */ - vector get_workers_by_account(account_id_type account)const; + vector> get_workers_by_account(const std::string account_id_or_name)const; + + /** + * @brief Get the total number of workers registered with the blockchain + */ + uint64_t get_worker_count()const; + /////////// @@ -502,6 +662,10 @@ class database_api /// @brief Get a hexdump of the serialized binary form of a transaction std::string get_transaction_hex(const signed_transaction& trx)const; + /// @brief Get a hexdump of the serialized binary form of a + /// signatures-stripped transaction + std::string get_transaction_hex_without_sig( const signed_transaction &trx ) const; + /** * This API will take a partially signed transaction and a set of public keys that the owner has the ability to sign for * and return the minimal subset of public keys that should add signatures to the transaction. @@ -522,9 +686,12 @@ class database_api bool verify_authority( const signed_transaction& trx )const; /** - * @return true if the signers have enough authority to authorize an account + * @brief Verify that the public keys have enough authority to approve an operation for this account + * @param account_name_or_id the account to check + * @param signers the public keys + * @return true if the passed in keys have enough authority to approve an operation for this account */ - bool verify_account_authority( const string& name_or_id, const flat_set& signers )const; + bool verify_account_authority( const string& account_name_or_id, const flat_set& signers )const; /** * Validates a transaction against the current state without broadcasting it on the network. @@ -532,10 +699,9 @@ class database_api processed_transaction validate_transaction( const signed_transaction& trx )const; /** - * For each operation calculate the required fee in the specified asset type. If the asset type does - * not have a valid core_exchange_rate + * For each operation calculate the required fee in the specified asset type. */ - vector< fc::variant > get_required_fees( const vector& ops, asset_id_type id )const; + vector< fc::variant > get_required_fees( const vector& ops, const std::string& asset_id_or_symbol )const; /////////////////////////// // Proposed transactions // @@ -544,7 +710,7 @@ class database_api /** * @return the set of proposed transactions relevant to the specified account id. */ - vector get_proposed_transactions( account_id_type id )const; + vector get_proposed_transactions( const std::string account_id_or_name )const; ////////////////////// // Blinded balances // @@ -555,6 +721,28 @@ class database_api */ vector get_blinded_balances( const flat_set& commitments )const; + ///////////////// + // Withdrawals // + ///////////////// + + /** + * @brief Get non expired withdraw permission objects for a giver(ex:recurring customer) + * @param account Account ID or name to get objects from + * @param start Withdraw permission objects(1.12.X) before this ID will be skipped in results. Pagination purposes. + * @param limit Maximum number of objects to retrieve + * @return Withdraw permission objects for the account + */ + vector get_withdraw_permissions_by_giver(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const; + + /** + * @brief Get non expired withdraw permission objects for a recipient(ex:service provider) + * @param account Account ID or name to get objects from + * @param start Withdraw permission objects(1.12.X) before this ID will be skipped in results. Pagination purposes. + * @param limit Maximum number of objects to retrieve + * @return Withdraw permission objects for the account + */ + vector get_withdraw_permissions_by_recipient(const std::string account_id_or_name, withdraw_permission_id_type start, uint32_t limit)const; + private: std::shared_ptr< database_api_impl > my; }; @@ -563,9 +751,10 @@ class database_api FC_REFLECT( graphene::app::order, (price)(quote)(base) ); FC_REFLECT( graphene::app::order_book, (base)(quote)(bids)(asks) ); -FC_REFLECT( graphene::app::market_ticker, (base)(quote)(latest)(lowest_ask)(highest_bid)(percent_change)(base_volume)(quote_volume) ); -FC_REFLECT( graphene::app::market_volume, (base)(quote)(base_volume)(quote_volume) ); -FC_REFLECT( graphene::app::market_trade, (date)(price)(amount)(value) ); +FC_REFLECT( graphene::app::market_ticker, + (time)(base)(quote)(latest)(lowest_ask)(highest_bid)(percent_change)(base_volume)(quote_volume) ); +FC_REFLECT( graphene::app::market_volume, (time)(base)(quote)(base_volume)(quote_volume) ); +FC_REFLECT( graphene::app::market_trade, (sequence)(date)(price)(amount)(value)(side1_account_id)(side2_account_id) ); FC_API(graphene::app::database_api, // Objects @@ -579,6 +768,7 @@ FC_API(graphene::app::database_api, // Blocks and transactions (get_block_header) + (get_block_header_batch) (get_block) (get_transaction) (get_recent_transaction_by_id) @@ -592,8 +782,10 @@ FC_API(graphene::app::database_api, // Keys (get_key_references) + (is_public_key_registered) // Accounts + (get_account_id_from_string) (get_accounts) (get_full_accounts) (get_account_by_name) @@ -613,18 +805,24 @@ FC_API(graphene::app::database_api, (get_assets) (list_assets) (lookup_asset_symbols) + (get_asset_count) + (get_asset_id_from_string) // Markets / feeds (get_order_book) (get_limit_orders) + (get_account_limit_orders) (get_call_orders) (get_settle_orders) (get_margin_positions) + (get_collateral_bids) (subscribe_to_market) (unsubscribe_from_market) (get_ticker) (get_24_volume) + (get_top_markets) (get_trade_history) + (get_trade_history_by_sequence) // Witnesses (get_witnesses) @@ -636,14 +834,19 @@ FC_API(graphene::app::database_api, (get_committee_members) (get_committee_member_by_account) (lookup_committee_member_accounts) + (get_committee_count) // workers + (get_all_workers) (get_workers_by_account) + (get_worker_count) + // Votes (lookup_vote_ids) // Authority / validation (get_transaction_hex) + (get_transaction_hex_without_sig) (get_required_signatures) (get_potential_signatures) (get_potential_address_signatures) @@ -657,4 +860,9 @@ FC_API(graphene::app::database_api, // Blinded balances (get_blinded_balances) + + // Withdrawals + (get_withdraw_permissions_by_giver) + (get_withdraw_permissions_by_recipient) + ) diff --git a/libraries/app/include/graphene/app/full_account.hpp b/libraries/app/include/graphene/app/full_account.hpp index 7472463086..dea5eb7e6a 100644 --- a/libraries/app/include/graphene/app/full_account.hpp +++ b/libraries/app/include/graphene/app/full_account.hpp @@ -26,6 +26,7 @@ #include #include #include +#include namespace graphene { namespace app { using namespace graphene::chain; @@ -43,12 +44,15 @@ namespace graphene { namespace app { vector vesting_balances; vector limit_orders; vector call_orders; + vector settle_orders; vector proposals; + vector assets; + vector withdraws; }; } } -FC_REFLECT( graphene::app::full_account, +FC_REFLECT( graphene::app::full_account, (account) (statistics) (registrar_name) @@ -60,5 +64,8 @@ FC_REFLECT( graphene::app::full_account, (vesting_balances) (limit_orders) (call_orders) - (proposals) + (settle_orders) + (proposals) + (assets) + (withdraws) ) diff --git a/libraries/app/include/graphene/app/plugin.hpp b/libraries/app/include/graphene/app/plugin.hpp index 872207442d..45336f677c 100644 --- a/libraries/app/include/graphene/app/plugin.hpp +++ b/libraries/app/include/graphene/app/plugin.hpp @@ -35,6 +35,7 @@ class abstract_plugin public: virtual ~abstract_plugin(){} virtual std::string plugin_name()const = 0; + virtual std::string plugin_description()const = 0; /** * @brief Perform early startup routines and register plugin indexes, callbacks, etc. @@ -100,6 +101,7 @@ class plugin : public abstract_plugin virtual ~plugin() override; virtual std::string plugin_name()const override; + virtual std::string plugin_description()const override; virtual void plugin_initialize( const boost::program_options::variables_map& options ) override; virtual void plugin_startup() override; virtual void plugin_shutdown() override; @@ -121,16 +123,24 @@ class plugin : public abstract_plugin /// @group Some useful tools for boost::program_options arguments using vectors of JSON strings /// @{ template -T dejsonify(const string& s) +T dejsonify(const string& s, uint32_t max_depth) { - return fc::json::from_string(s).as(); + return fc::json::from_string(s).as(max_depth); +} + +namespace impl { + template + T dejsonify( const string& s ) + { + return graphene::app::dejsonify( s, GRAPHENE_MAX_NESTED_OBJECTS ); + } } #define DEFAULT_VALUE_VECTOR(value) default_value({fc::json::to_string(value)}, fc::json::to_string(value)) #define LOAD_VALUE_SET(options, name, container, type) \ if( options.count(name) ) { \ const std::vector& ops = options[name].as>(); \ - std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), &graphene::app::dejsonify); \ + std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), &graphene::app::impl::dejsonify); \ } /// @} diff --git a/libraries/chain/include/graphene/chain/protocol/protocol.hpp b/libraries/app/include/graphene/app/util.hpp similarity index 62% rename from libraries/chain/include/graphene/chain/protocol/protocol.hpp rename to libraries/app/include/graphene/app/util.hpp index faf6bdc20b..520ce6c502 100644 --- a/libraries/chain/include/graphene/chain/protocol/protocol.hpp +++ b/libraries/app/include/graphene/app/util.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2018 Abit More, and contributors. * * The MIT License * @@ -22,5 +22,22 @@ * THE SOFTWARE. */ #pragma once -#include -#include + +#include + +#include + +#include + +namespace graphene { namespace app { + using namespace graphene::chain; + + typedef boost::multiprecision::uint256_t u256; + + u256 to256( const fc::uint128& t ); + fc::uint128 to_capped128( const u256& t ); + string uint128_amount_to_string( const fc::uint128& amount, const uint8_t precision ); + string price_to_string( const price& _price, const uint8_t base_precision, const uint8_t quote_precision); + string price_diff_percent_string( const price& old_price, const price& new_price ); + +} } diff --git a/libraries/app/plugin.cpp b/libraries/app/plugin.cpp index 8568d37115..cae488a666 100644 --- a/libraries/app/plugin.cpp +++ b/libraries/app/plugin.cpp @@ -43,6 +43,11 @@ std::string plugin::plugin_name()const return ""; } +std::string plugin::plugin_description()const +{ + return ""; +} + void plugin::plugin_initialize( const boost::program_options::variables_map& options ) { return; diff --git a/libraries/app/util.cpp b/libraries/app/util.cpp new file mode 100644 index 0000000000..a7c80b9ebe --- /dev/null +++ b/libraries/app/util.cpp @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2018 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + + +namespace graphene { namespace app { + +u256 to256( const fc::uint128& t ) +{ + u256 v(t.hi); + v <<= 64; + v += t.lo; + return v; +} + +fc::uint128 to_capped128( const u256& t ) +{ + static u256 max128 = to256( fc::uint128::max_value() ); + if( t >= max128 ) + return fc::uint128::max_value(); + fc::uint128 result; + u256 hi(t); + hi >>= 64; + result.hi = static_cast< uint64_t >( hi ); + u256 lo(t); + hi <<= 64; + lo -= hi; + result.lo = static_cast< uint64_t >( lo ); + return result; +} + +string uint128_amount_to_string( const fc::uint128& amount, const uint8_t precision ) +{ try { + string s = string( amount ); + if( precision == 0 || amount == fc::uint128() ) + return s; + + std::stringstream ss; + uint8_t pos = s.find_last_not_of( '0' ); // should be >= 0 + uint8_t len = s.size(); + if( len > precision ) + { + uint8_t left_len = len - precision; + ss << s.substr( 0, left_len ); + if( pos >= left_len ) + ss << '.' << s.substr( left_len, pos - left_len + 1 ); + } + else + { + ss << "0."; + for( uint8_t i = precision - len; i > 0; --i ) + ss << '0'; + ss << s.substr( 0, pos + 1 ); + } + return ss.str(); +} FC_CAPTURE_AND_RETHROW( (amount)(precision) ) } + +string price_to_string( const price& _price, const uint8_t base_precision, const uint8_t quote_precision ) +{ try { + if( _price.base.amount == 0 ) + return "0"; + FC_ASSERT( _price.base.amount >= 0 ); + FC_ASSERT( _price.quote.amount >= 0 ); + FC_ASSERT( base_precision <= 19 ); + FC_ASSERT( quote_precision <= 19 ); + price new_price = _price; + if( new_price.quote.amount == 0 ) + { + new_price.base.amount = std::numeric_limits::max(); + new_price.quote.amount = 1; + } + + // times (10**19) so won't overflow but have good accuracy + fc::uint128 price128 = fc::uint128( new_price.base.amount.value ) * uint64_t(10000000000000000000ULL) + / new_price.quote.amount.value; + + return uint128_amount_to_string( price128, 19 + base_precision - quote_precision ); +} FC_CAPTURE_AND_RETHROW( (_price)(base_precision)(quote_precision) ) } + +string price_diff_percent_string( const price& old_price, const price& new_price ) +{ try { + FC_ASSERT( old_price.base.asset_id == new_price.base.asset_id ); + FC_ASSERT( old_price.quote.asset_id == new_price.quote.asset_id ); + FC_ASSERT( old_price.base.amount >= 0 ); + FC_ASSERT( old_price.quote.amount >= 0 ); + FC_ASSERT( new_price.base.amount >= 0 ); + FC_ASSERT( new_price.quote.amount >= 0 ); + price old_price1 = old_price; + if( old_price.base.amount == 0 ) + { + old_price1.base.amount = 1; + old_price1.quote.amount = std::numeric_limits::max(); + } + else if( old_price.quote.amount == 0 ) + { + old_price1.base.amount = std::numeric_limits::max(); + old_price1.quote.amount = 1; + } + price new_price1 = new_price; + if( new_price.base.amount == 0 ) + { + new_price1.base.amount = 1; + new_price1.quote.amount = std::numeric_limits::max(); + } + else if( new_price.quote.amount == 0 ) + { + new_price1.base.amount = std::numeric_limits::max(); + new_price1.quote.amount = 1; + } + + // change = new/old - 1 = (new_base/new_quote)/(old_base/old_quote) - 1 + // = (new_base * old_quote) / (new_quote * old_base) - 1 + // = (new_base * old_quote - new_quote * old_base) / (new_quote * old_base) + fc::uint128 new128 = fc::uint128( new_price1.base.amount.value ) * old_price1.quote.amount.value; + fc::uint128 old128 = fc::uint128( old_price1.base.amount.value ) * new_price1.quote.amount.value; + bool non_negative = (new128 >= old128); + fc::uint128 diff128; + if( non_negative ) + diff128 = new128 - old128; + else + diff128 = old128 - new128; + static fc::uint128 max = fc::uint128::max_value() / 10000; + if( diff128 <= max ) + diff128 = diff128 * 10000 / old128; + else + { + u256 diff256 = to256( diff128 ); + diff256 *= 10000; + diff256 /= to256( old128 ); + diff128 = to_capped128( diff256 ); + } + string diff_str = uint128_amount_to_string( diff128, 2 ); // at most 2 decimal digits + if( non_negative || diff_str == "0" ) + return diff_str; + else + return "-" + diff_str; +} FC_CAPTURE_AND_RETHROW( (old_price)(new_price) ) } + +} } // graphene::app diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 9cc4285dd8..4e25cd6b39 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -6,6 +6,7 @@ set_source_files_properties( "${CMAKE_CURRENT_BINARY_DIR}/include/graphene/chain add_dependencies( build_hardfork_hpp cat-parts ) file(GLOB HEADERS "include/graphene/chain/*.hpp") +file(GLOB PROTOCOL_HEADERS "include/graphene/chain/protocol/*.hpp") if( GRAPHENE_DISABLE_UNITY_BUILD ) set( GRAPHENE_DB_FILES @@ -19,6 +20,7 @@ if( GRAPHENE_DISABLE_UNITY_BUILD ) db_market.cpp db_update.cpp db_witness_schedule.cpp + db_notify.cpp ) message( STATUS "Graphene database unity build disabled" ) else( GRAPHENE_DISABLE_UNITY_BUILD ) @@ -41,6 +43,7 @@ add_library( graphene_chain protocol/assert.cpp protocol/account.cpp protocol/transfer.cpp + protocol/chain_parameters.cpp protocol/committee_member.cpp protocol/witness.cpp protocol/market.cpp @@ -82,6 +85,7 @@ add_library( graphene_chain account_object.cpp asset_object.cpp fba_object.cpp + market_object.cpp proposal_object.cpp vesting_balance_object.cpp @@ -90,6 +94,7 @@ add_library( graphene_chain is_authorized_asset.cpp ${HEADERS} + ${PROTOCOL_HEADERS} "${CMAKE_CURRENT_BINARY_DIR}/include/graphene/chain/hardfork.hpp" ) @@ -109,3 +114,5 @@ INSTALL( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/chain" ) +INSTALL( FILES ${PROTOCOL_HEADERS} DESTINATION "include/graphene/chain/protocol" ) diff --git a/libraries/chain/account_evaluator.cpp b/libraries/chain/account_evaluator.cpp index b9b277160e..98e0766652 100644 --- a/libraries/chain/account_evaluator.cpp +++ b/libraries/chain/account_evaluator.cpp @@ -22,17 +22,17 @@ * THE SOFTWARE. */ -#include - #include #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -69,11 +69,13 @@ void verify_account_votes( const database& db, const account_options& options ) FC_ASSERT( options.num_committee <= chain_params.maximum_committee_count, "Voted for more committee members than currently allowed (${c})", ("c", chain_params.maximum_committee_count) ); + FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." ); + uint32_t max_vote_id = gpo.next_available_vote_id; bool has_worker_votes = false; for( auto id : options.votes ) { - FC_ASSERT( id < max_vote_id ); + FC_ASSERT( id < max_vote_id, "Can not vote for ${id} which does not exist.", ("id",id) ); has_worker_votes |= (id.type() == vote_id_type::worker); } @@ -84,14 +86,37 @@ void verify_account_votes( const database& db, const account_options& options ) { if( id.type() == vote_id_type::worker ) { - FC_ASSERT( against_worker_idx.find( id ) == against_worker_idx.end() ); + FC_ASSERT( against_worker_idx.find( id ) == against_worker_idx.end(), + "Can no longer vote against a worker." ); + } + } + } + if ( db.head_block_time() >= HARDFORK_CORE_143_TIME ) { + const auto& approve_worker_idx = db.get_index_type().indices().get(); + const auto& committee_idx = db.get_index_type().indices().get(); + const auto& witness_idx = db.get_index_type().indices().get(); + for ( auto id : options.votes ) { + switch ( id.type() ) { + case vote_id_type::committee: + FC_ASSERT( committee_idx.find(id) != committee_idx.end(), + "Can not vote for ${id} which does not exist.", ("id",id) ); + break; + case vote_id_type::witness: + FC_ASSERT( witness_idx.find(id) != witness_idx.end(), + "Can not vote for ${id} which does not exist.", ("id",id) ); + break; + case vote_id_type::worker: + FC_ASSERT( approve_worker_idx.find( id ) != approve_worker_idx.end(), + "Can not vote for ${id} which does not exist.", ("id",id) ); + break; + default: + FC_THROW( "Invalid Vote Type: ${id}", ("id", id) ); + break; } } } - } - void_result account_create_evaluator::do_evaluate( const account_create_operation& op ) { try { database& d = db(); @@ -108,7 +133,6 @@ void_result account_create_evaluator::do_evaluate( const account_create_operatio FC_ASSERT( !op.extensions.value.buyback_options.valid() ); } - FC_ASSERT( d.find_object(op.options.voting_account), "Invalid proxy account specified." ); FC_ASSERT( fee_paying_account->is_lifetime_member(), "Only Lifetime members may register an account." ); FC_ASSERT( op.referrer(d).is_member(d.head_block_time()), "The referrer must be either a lifetime or annual subscriber." ); @@ -132,7 +156,8 @@ void_result account_create_evaluator::do_evaluate( const account_create_operatio if( op.name.size() ) { auto current_account_itr = acnt_indx.indices().get().find( op.name ); - FC_ASSERT( current_account_itr == acnt_indx.indices().get().end() ); + FC_ASSERT( current_account_itr == acnt_indx.indices().get().end(), + "Account '${a}' already exists.", ("a",op.name) ); } return void_result(); @@ -161,12 +186,15 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio referrer_percent = GRAPHENE_100_PERCENT; } - const auto& new_acnt_object = db().create( [&]( account_object& obj ){ + const auto& global_properties = d.get_global_properties(); + + const auto& new_acnt_object = d.create( [&o,&d,&global_properties,referrer_percent]( account_object& obj ) + { obj.registrar = o.registrar; obj.referrer = o.referrer; - obj.lifetime_referrer = o.referrer(db()).lifetime_referrer; + obj.lifetime_referrer = o.referrer(d).lifetime_referrer; - auto& params = db().get_global_properties().parameters; + const auto& params = global_properties.parameters; obj.network_fee_percentage = params.network_percent_of_fee; obj.lifetime_referrer_fee_percentage = params.lifetime_referrer_percent_of_fee; obj.referrer_rewards_percentage = referrer_percent; @@ -175,7 +203,11 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio obj.owner = o.owner; obj.active = o.active; obj.options = o.options; - obj.statistics = db().create([&](account_statistics_object& s){s.owner = obj.id;}).id; + obj.statistics = d.create([&obj](account_statistics_object& s){ + s.owner = obj.id; + s.name = obj.name; + s.is_voting = obj.options.is_voting(); + }).id; if( o.extensions.value.owner_special_authority.valid() ) obj.owner_special_authority = *(o.extensions.value.owner_special_authority); @@ -188,6 +220,7 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio } }); + /* if( has_small_percent ) { wlog( "Account affected by #453 registered in block ${n}: ${na} reg=${reg} ref=${ref}:${refp} ltr=${ltr}:${ltrp}", @@ -196,18 +229,20 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio ("refp", new_acnt_object.referrer_rewards_percentage) ("ltrp", new_acnt_object.lifetime_referrer_fee_percentage) ); wlog( "Affected account object is ${o}", ("o", new_acnt_object) ); } + */ - const auto& dynamic_properties = db().get_dynamic_global_properties(); - db().modify(dynamic_properties, [](dynamic_global_property_object& p) { + const auto& dynamic_properties = d.get_dynamic_global_properties(); + d.modify(dynamic_properties, [](dynamic_global_property_object& p) { ++p.accounts_registered_this_interval; }); - const auto& global_properties = db().get_global_properties(); - if( dynamic_properties.accounts_registered_this_interval % - global_properties.parameters.accounts_per_fee_scale == 0 ) - db().modify(global_properties, [&dynamic_properties](global_property_object& p) { + if( dynamic_properties.accounts_registered_this_interval % global_properties.parameters.accounts_per_fee_scale == 0 + && global_properties.parameters.account_fee_scale_bitshifts != 0 ) + { + d.modify(global_properties, [](global_property_object& p) { p.parameters.current_fees->get().basic_fee <<= p.parameters.account_fee_scale_bitshifts; }); + } if( o.extensions.value.owner_special_authority.valid() || o.extensions.value.active_special_authority.valid() ) @@ -276,8 +311,25 @@ void_result account_update_evaluator::do_evaluate( const account_update_operatio void_result account_update_evaluator::do_apply( const account_update_operation& o ) { try { database& d = db(); - bool sa_before, sa_after; - d.modify( *acnt, [&](account_object& a){ + + bool sa_before = acnt->has_special_authority(); + + // update account statistics + if( o.new_options.valid() ) + { + d.modify( acnt->statistics( d ), [&]( account_statistics_object& aso ) + { + if(o.new_options->is_voting() != acnt->options.is_voting()) + aso.is_voting = !aso.is_voting; + + if((o.new_options->votes != acnt->options.votes || + o.new_options->voting_account != acnt->options.voting_account)) + aso.last_vote_time = d.head_block_time(); + } ); + } + + // update account object + d.modify( *acnt, [&o](account_object& a){ if( o.owner ) { a.owner = *o.owner; @@ -289,7 +341,6 @@ void_result account_update_evaluator::do_apply( const account_update_operation& a.top_n_control_flags = 0; } if( o.new_options ) a.options = *o.new_options; - sa_before = a.has_special_authority(); if( o.extensions.value.owner_special_authority.valid() ) { a.owner_special_authority = *(o.extensions.value.owner_special_authority); @@ -300,17 +351,18 @@ void_result account_update_evaluator::do_apply( const account_update_operation& a.active_special_authority = *(o.extensions.value.active_special_authority); a.top_n_control_flags = 0; } - sa_after = a.has_special_authority(); }); - if( sa_before & (!sa_after) ) + bool sa_after = acnt->has_special_authority(); + + if( sa_before && (!sa_after) ) { const auto& sa_idx = d.get_index_type< special_authority_index >().indices().get(); auto sa_it = sa_idx.find( o.account ); assert( sa_it != sa_idx.end() ); d.remove( *sa_it ); } - else if( (!sa_before) & sa_after ) + else if( (!sa_before) && sa_after ) { d.create< special_authority_object >( [&]( special_authority_object& sa ) { @@ -327,7 +379,7 @@ void_result account_whitelist_evaluator::do_evaluate(const account_whitelist_ope listed_account = &o.account_to_list(d); if( !d.get_global_properties().parameters.allow_non_member_whitelists ) - FC_ASSERT(o.authorizing_account(d).is_lifetime_member()); + FC_ASSERT( o.authorizing_account(d).is_lifetime_member(), "The authorizing account must be a lifetime member." ); return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } diff --git a/libraries/chain/account_object.cpp b/libraries/chain/account_object.cpp index 90d97692a5..7acaf10b21 100644 --- a/libraries/chain/account_object.cpp +++ b/libraries/chain/account_object.cpp @@ -46,6 +46,8 @@ void account_balance_object::adjust_balance(const asset& delta) { assert(delta.asset_id == asset_type); balance += delta.amount; + if( asset_type == asset_id_type() ) // CORE asset + maintenance_flag = true; } void account_statistics_object::process_fees(const account_object& a, database& d) const @@ -57,8 +59,8 @@ void account_statistics_object::process_fees(const account_object& a, database& // Check the referrer -- if he's no longer a member, pay to the lifetime referrer instead. // No need to check the registrar; registrars are required to be lifetime members. if( account.referrer(d).is_basic_account(d.head_block_time()) ) - d.modify(account, [](account_object& a) { - a.referrer = a.lifetime_referrer; + d.modify( account, [](account_object& acc) { + acc.referrer = acc.lifetime_referrer; }); share_type network_cut = cut_fee(core_fee_total, account.network_fee_percentage); @@ -74,8 +76,8 @@ void account_statistics_object::process_fees(const account_object& a, database& share_type lifetime_cut = cut_fee(core_fee_total, account.lifetime_referrer_fee_percentage); share_type referral = core_fee_total - network_cut - lifetime_cut; - d.modify(asset_dynamic_data_id_type()(d), [network_cut](asset_dynamic_data_object& d) { - d.accumulated_fees += network_cut; + d.modify( d.get_core_dynamic_data(), [network_cut](asset_dynamic_data_object& addo) { + addo.accumulated_fees += network_cut; }); // Potential optimization: Skip some of this math and object lookups by special casing on the account type. @@ -119,9 +121,9 @@ set account_member_index::get_account_members(const account_obj result.insert(auth.first); return result; } -set account_member_index::get_key_members(const account_object& a)const +set account_member_index::get_key_members(const account_object& a)const { - set result; + set result; for( auto auth : a.owner.key_auths ) result.insert(auth.first); for( auto auth : a.active.key_auths ) @@ -213,7 +215,7 @@ void account_member_index::object_modified(const object& after) { - set after_key_members = get_key_members(a); + set after_key_members = get_key_members(a); vector removed; removed.reserve(before_key_members.size()); std::set_difference(before_key_members.begin(), before_key_members.end(), @@ -267,4 +269,54 @@ void account_referrer_index::object_modified( const object& after ) { } +const uint8_t balances_by_account_index::bits = 20; +const uint64_t balances_by_account_index::mask = (1ULL << balances_by_account_index::bits) - 1; + +void balances_by_account_index::object_inserted( const object& obj ) +{ + const auto& abo = dynamic_cast< const account_balance_object& >( obj ); + while( balances.size() < (abo.owner.instance.value >> bits) + 1 ) + { + balances.reserve( (abo.owner.instance.value >> bits) + 1 ); + balances.resize( balances.size() + 1 ); + balances.back().resize( 1ULL << bits ); + } + balances[abo.owner.instance.value >> bits][abo.owner.instance.value & mask][abo.asset_type] = &abo; +} + +void balances_by_account_index::object_removed( const object& obj ) +{ + const auto& abo = dynamic_cast< const account_balance_object& >( obj ); + if( balances.size() < (abo.owner.instance.value >> bits) + 1 ) return; + balances[abo.owner.instance.value >> bits][abo.owner.instance.value & mask].erase( abo.asset_type ); +} + +void balances_by_account_index::about_to_modify( const object& before ) +{ + ids_being_modified.emplace( before.id ); +} + +void balances_by_account_index::object_modified( const object& after ) +{ + FC_ASSERT( ids_being_modified.top() == after.id, "Modification of ID is not supported!"); + ids_being_modified.pop(); +} + +const map< asset_id_type, const account_balance_object* >& balances_by_account_index::get_account_balances( const account_id_type& acct )const +{ + static const map< asset_id_type, const account_balance_object* > _empty; + + if( balances.size() < (acct.instance.value >> bits) + 1 ) return _empty; + return balances[acct.instance.value >> bits][acct.instance.value & mask]; +} + +const account_balance_object* balances_by_account_index::get_account_balance( const account_id_type& acct, const asset_id_type& asset )const +{ + if( balances.size() < (acct.instance.value >> bits) + 1 ) return nullptr; + const auto& mine = balances[acct.instance.value >> bits][acct.instance.value & mask]; + const auto itr = mine.find( asset ); + if( mine.end() == itr ) return nullptr; + return itr->second; +} + } } // graphene::chain diff --git a/libraries/chain/asset_evaluator.cpp b/libraries/chain/asset_evaluator.cpp index 25daa7cdd9..9194a02d25 100644 --- a/libraries/chain/asset_evaluator.cpp +++ b/libraries/chain/asset_evaluator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2015-2018 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -32,6 +32,8 @@ #include +#include + namespace graphene { namespace chain { void_result asset_create_evaluator::do_evaluate( const asset_create_operation& op ) @@ -53,35 +55,31 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o auto asset_symbol_itr = asset_indx.find( op.symbol ); FC_ASSERT( asset_symbol_itr == asset_indx.end() ); - if( d.head_block_time() <= HARDFORK_409_TIME ) + if( d.head_block_time() > HARDFORK_385_TIME ) { - auto dotpos = op.symbol.find( '.' ); + auto dotpos = op.symbol.rfind( '.' ); if( dotpos != std::string::npos ) { auto prefix = op.symbol.substr( 0, dotpos ); - auto asset_symbol_itr = asset_indx.find( op.symbol ); + auto asset_symbol_itr = asset_indx.find( prefix ); FC_ASSERT( asset_symbol_itr != asset_indx.end(), "Asset ${s} may only be created by issuer of ${p}, but ${p} has not been registered", ("s",op.symbol)("p",prefix) ); FC_ASSERT( asset_symbol_itr->issuer == op.issuer, "Asset ${s} may only be created by issuer of ${p}, ${i}", ("s",op.symbol)("p",prefix)("i", op.issuer(d).name) ); } + + if(d.head_block_time() <= HARDFORK_CORE_620_TIME ) { // TODO: remove this check after hf_620 + static const std::locale& loc = std::locale::classic(); + FC_ASSERT(isalpha(op.symbol.back(), loc), "Asset ${s} must end with alpha character before hardfork 620", ("s",op.symbol)); + } } else { - auto dotpos = op.symbol.rfind( '.' ); + auto dotpos = op.symbol.find( '.' ); if( dotpos != std::string::npos ) - { - auto prefix = op.symbol.substr( 0, dotpos ); - auto asset_symbol_itr = asset_indx.find( prefix ); - FC_ASSERT( asset_symbol_itr != asset_indx.end(), "Asset ${s} may only be created by issuer of ${p}, but ${p} has not been registered", - ("s",op.symbol)("p",prefix) ); - FC_ASSERT( asset_symbol_itr->issuer == op.issuer, "Asset ${s} may only be created by issuer of ${p}, ${i}", - ("s",op.symbol)("p",prefix)("i", op.issuer(d).name) ); - } + wlog( "Asset ${s} has a name which requires hardfork 385", ("s",op.symbol) ); } - core_fee_paid -= core_fee_paid.value/2; - if( op.bitasset_opts ) { const asset_object& backing = op.bitasset_opts->short_backing_asset(d); @@ -108,25 +106,45 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } +void asset_create_evaluator::pay_fee() +{ + fee_is_odd = core_fee_paid.value & 1; + core_fee_paid -= core_fee_paid.value/2; + generic_evaluator::pay_fee(); +} + object_id_type asset_create_evaluator::do_apply( const asset_create_operation& op ) { try { + database& d = db(); + + bool hf_429 = fee_is_odd && d.head_block_time() > HARDFORK_CORE_429_TIME; + const asset_dynamic_data_object& dyn_asset = - db().create( [&]( asset_dynamic_data_object& a ) { + d.create( [hf_429,this]( asset_dynamic_data_object& a ) { a.current_supply = 0; - a.fee_pool = core_fee_paid; //op.calculate_fee(db().current_fee_schedule()).value / 2; + a.fee_pool = core_fee_paid - (hf_429 ? 1 : 0); }); + if( fee_is_odd && !hf_429 ) + { + d.modify( d.get_core_dynamic_data(), []( asset_dynamic_data_object& dd ) { + dd.current_supply++; + }); + } + asset_bitasset_data_id_type bit_asset_id; + + auto next_asset_id = d.get_index_type().get_next_id(); + if( op.bitasset_opts.valid() ) - bit_asset_id = db().create( [&]( asset_bitasset_data_object& a ) { + bit_asset_id = d.create( [&op,next_asset_id]( asset_bitasset_data_object& a ) { a.options = *op.bitasset_opts; a.is_prediction_market = op.is_prediction_market; + a.asset_id = next_asset_id; }).id; - auto next_asset_id = db().get_index_type().get_next_id(); - const asset_object& new_asset = - db().create( [&]( asset_object& a ) { + d.create( [&op,next_asset_id,&dyn_asset,bit_asset_id]( asset_object& a ) { a.issuer = op.issuer; a.symbol = op.symbol; a.precision = op.precision; @@ -139,7 +157,7 @@ object_id_type asset_create_evaluator::do_apply( const asset_create_operation& o if( op.bitasset_opts.valid() ) a.bitasset_data_id = bit_asset_id; }); - assert( new_asset.id == next_asset_id ); + FC_ASSERT( new_asset.id == next_asset_id, "Unexpected object database error, object id mismatch" ); return new_asset.id; } FC_CAPTURE_AND_RETHROW( (op) ) } @@ -165,7 +183,7 @@ void_result asset_issue_evaluator::do_apply( const asset_issue_operation& o ) { try { db().adjust_balance( o.issue_to_account, o.asset_to_issue ); - db().modify( *asset_dyn_data, [&]( asset_dynamic_data_object& data ){ + db().modify( *asset_dyn_data, [&o]( asset_dynamic_data_object& data ){ data.current_supply += o.asset_to_issue.amount; }); @@ -197,7 +215,7 @@ void_result asset_reserve_evaluator::do_apply( const asset_reserve_operation& o { try { db().adjust_balance( o.payer, -o.amount_to_reserve ); - db().modify( *asset_dyn_data, [&]( asset_dynamic_data_object& data ){ + db().modify( *asset_dyn_data, [&o]( asset_dynamic_data_object& data ){ data.current_supply -= o.amount_to_reserve.amount; }); @@ -219,13 +237,30 @@ void_result asset_fund_fee_pool_evaluator::do_apply(const asset_fund_fee_pool_op { try { db().adjust_balance(o.from_account, -o.amount); - db().modify( *asset_dyn_data, [&]( asset_dynamic_data_object& data ) { + db().modify( *asset_dyn_data, [&o]( asset_dynamic_data_object& data ) { data.fee_pool += o.amount; }); return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } +static void validate_new_issuer( const database& d, const asset_object& a, account_id_type new_issuer ) +{ try { + FC_ASSERT(d.find_object(new_issuer)); + if( a.is_market_issued() && new_issuer == GRAPHENE_COMMITTEE_ACCOUNT ) + { + const asset_object& backing = a.bitasset_data(d).options.short_backing_asset(d); + if( backing.is_market_issued() ) + { + const asset_object& backing_backing = backing.bitasset_data(d).options.short_backing_asset(d); + FC_ASSERT( backing_backing.get_id() == asset_id_type(), + "May not create a blockchain-controlled market asset which is not backed by CORE."); + } else + FC_ASSERT( backing.get_id() == asset_id_type(), + "May not create a blockchain-controlled market asset which is not backed by CORE."); + } +} FC_CAPTURE_AND_RETHROW( (a)(new_issuer) ) } + void_result asset_update_evaluator::do_evaluate(const asset_update_operation& o) { try { database& d = db(); @@ -237,19 +272,9 @@ void_result asset_update_evaluator::do_evaluate(const asset_update_operation& o) if( o.new_issuer ) { - FC_ASSERT(d.find_object(*o.new_issuer)); - if( a.is_market_issued() && *o.new_issuer == GRAPHENE_COMMITTEE_ACCOUNT ) - { - const asset_object& backing = a.bitasset_data(d).options.short_backing_asset(d); - if( backing.is_market_issued() ) - { - const asset_object& backing_backing = backing.bitasset_data(d).options.short_backing_asset(d); - FC_ASSERT( backing_backing.get_id() == asset_id_type(), - "May not create a blockchain-controlled market asset which is not backed by CORE."); - } else - FC_ASSERT( backing.get_id() == asset_id_type(), - "May not create a blockchain-controlled market asset which is not backed by CORE."); - } + FC_ASSERT( d.head_block_time() < HARDFORK_CORE_199_TIME, + "Since Hardfork #199, updating issuer requires the use of asset_update_issuer_operation."); + validate_new_issuer( d, a, *o.new_issuer ); } if( (d.head_block_time() < HARDFORK_572_TIME) || (a.dynamic_asset_data_id(d).current_supply != 0) ) @@ -264,7 +289,9 @@ void_result asset_update_evaluator::do_evaluate(const asset_update_operation& o) "Flag change is forbidden by issuer permissions"); asset_to_update = &a; - FC_ASSERT( o.issuer == a.issuer, "", ("o.issuer", o.issuer)("a.issuer", a.issuer) ); + FC_ASSERT( o.issuer == a.issuer, + "Incorrect issuer for asset! (${o.issuer} != ${a.issuer})", + ("o.issuer", o.issuer)("a.issuer", a.issuer) ); const auto& chain_parameters = d.get_global_properties().parameters; @@ -283,7 +310,7 @@ void_result asset_update_evaluator::do_apply(const asset_update_operation& o) database& d = db(); // If we are now disabling force settlements, cancel all open force settlement orders - if( o.new_options.flags & disable_force_settle && asset_to_update->can_force_settle() ) + if( (o.new_options.flags & disable_force_settle) && asset_to_update->can_force_settle() ) { const auto& idx = d.get_index_type().indices().get(); // Funky iteration code because we're removing objects as we go. We have to re-initialize itr every loop instead @@ -291,10 +318,24 @@ void_result asset_update_evaluator::do_apply(const asset_update_operation& o) for( auto itr = idx.lower_bound(o.asset_to_update); itr != idx.end() && itr->settlement_asset_id() == o.asset_to_update; itr = idx.lower_bound(o.asset_to_update) ) - d.cancel_order(*itr); + d.cancel_settle_order(*itr); } - d.modify(*asset_to_update, [&](asset_object& a) { + // For market-issued assets, if core change rate changed, update flag in bitasset data + if( asset_to_update->is_market_issued() + && asset_to_update->options.core_exchange_rate != o.new_options.core_exchange_rate ) + { + const auto& bitasset = asset_to_update->bitasset_data(d); + if( !bitasset.asset_cer_updated ) + { + d.modify( bitasset, [](asset_bitasset_data_object& b) + { + b.asset_cer_updated = true; + }); + } + } + + d.modify(*asset_to_update, [&o](asset_object& a) { if( o.new_issuer ) a.issuer = *o.new_issuer; a.options = o.new_options; @@ -303,65 +344,406 @@ void_result asset_update_evaluator::do_apply(const asset_update_operation& o) return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } -void_result asset_update_bitasset_evaluator::do_evaluate(const asset_update_bitasset_operation& o) +void_result asset_update_issuer_evaluator::do_evaluate(const asset_update_issuer_operation& o) { try { database& d = db(); const asset_object& a = o.asset_to_update(d); - FC_ASSERT(a.is_market_issued(), "Cannot update BitAsset-specific settings on a non-BitAsset."); + validate_new_issuer( d, a, o.new_issuer ); - const asset_bitasset_data_object& b = a.bitasset_data(d); - FC_ASSERT( !b.has_settlement(), "Cannot update a bitasset after a settlement has executed" ); - if( o.new_options.short_backing_asset != b.options.short_backing_asset ) + asset_to_update = &a; + FC_ASSERT( o.issuer == a.issuer, + "Incorrect issuer for asset! (${o.issuer} != ${a.issuer})", + ("o.issuer", o.issuer)("a.issuer", a.issuer) ); + + if( d.head_block_time() < HARDFORK_CORE_199_TIME ) { - FC_ASSERT(a.dynamic_asset_data_id(d).current_supply == 0); - FC_ASSERT(d.find_object(o.new_options.short_backing_asset)); + // TODO: remove after HARDFORK_CORE_199_TIME has passed + FC_ASSERT(false, "Not allowed until hardfork 199"); + } + + return void_result(); +} FC_CAPTURE_AND_RETHROW((o)) } + +void_result asset_update_issuer_evaluator::do_apply(const asset_update_issuer_operation& o) +{ try { + database& d = db(); + d.modify(*asset_to_update, [&](asset_object& a) { + a.issuer = o.new_issuer; + }); + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (o) ) } + +/**************** + * Loop through assets, looking for ones that are backed by the asset being changed. When found, + * perform checks to verify validity + * + * @param d the database + * @param op the bitasset update operation being performed + * @param new_backing_asset + * @param true if after hf 922/931 (if nothing triggers, this and the logic that depends on it + * should be removed). + */ +void check_children_of_bitasset(database& d, const asset_update_bitasset_operation& op, + const asset_object& new_backing_asset, bool after_hf_922_931) +{ + // no need to do these checks if the new backing asset is CORE + if ( new_backing_asset.get_id() == asset_id_type() ) + return; + + // loop through all assets that have this asset as a backing asset + const auto& idx = d.get_index_type() + .indices() + .get(); + auto backed_range = idx.equal_range(op.asset_to_update); + std::for_each( backed_range.first, backed_range.second, + [after_hf_922_931, &new_backing_asset, &d, &op](const asset_bitasset_data_object& bitasset_data) + { + const auto& child = bitasset_data.asset_id(d); + if ( after_hf_922_931 ) + { + FC_ASSERT( child.get_id() != op.new_options.short_backing_asset, + "A BitAsset would be invalidated by changing this backing asset ('A' backed by 'B' backed by 'A')." ); + + FC_ASSERT( child.issuer != GRAPHENE_COMMITTEE_ACCOUNT, + "A blockchain-controlled market asset would be invalidated by changing this backing asset." ); + + FC_ASSERT( !new_backing_asset.is_market_issued(), + "A non-blockchain controlled BitAsset would be invalidated by changing this backing asset."); + + } + else + { + if( child.get_id() == op.new_options.short_backing_asset ) + { + wlog( "Before hf-922-931, modified an asset to be backed by another, but would cause a continuous " + "loop. A cannot be backed by B which is backed by A." ); + return; + } + + if( child.issuer == GRAPHENE_COMMITTEE_ACCOUNT ) + { + wlog( "before hf-922-931, modified an asset to be backed by a non-CORE, but this asset " + "is a backing asset for a committee-issued asset. This occurred at block ${b}", + ("b", d.head_block_num())); + return; + } + else + { + if ( new_backing_asset.is_market_issued() ) // a.k.a. !UIA + { + wlog( "before hf-922-931, modified an asset to be backed by an MPA, but this asset " + "is a backing asset for another MPA, which would cause MPA backed by MPA backed by MPA. " + "This occurred at block ${b}", + ("b", d.head_block_num())); + return; + } + } // if child.issuer + } // if hf 922/931 + } ); // end of lambda and std::for_each() +} // check_children_of_bitasset + +void_result asset_update_bitasset_evaluator::do_evaluate(const asset_update_bitasset_operation& op) +{ try { + database& d = db(); + + const asset_object& asset_obj = op.asset_to_update(d); + + FC_ASSERT( asset_obj.is_market_issued(), "Cannot update BitAsset-specific settings on a non-BitAsset." ); + + FC_ASSERT( op.issuer == asset_obj.issuer, "Only asset issuer can update bitasset_data of the asset." ); + + const asset_bitasset_data_object& current_bitasset_data = asset_obj.bitasset_data(d); + + FC_ASSERT( !current_bitasset_data.has_settlement(), "Cannot update a bitasset after a global settlement has executed" ); - if( a.issuer == GRAPHENE_COMMITTEE_ACCOUNT ) + bool after_hf_core_922_931 = ( d.get_dynamic_global_properties().next_maintenance_time > HARDFORK_CORE_922_931_TIME ); + + // Are we changing the backing asset? + if( op.new_options.short_backing_asset != current_bitasset_data.options.short_backing_asset ) + { + FC_ASSERT( asset_obj.dynamic_asset_data_id(d).current_supply == 0, + "Cannot update a bitasset if there is already a current supply." ); + + const asset_object& new_backing_asset = op.new_options.short_backing_asset(d); // check if the asset exists + + if( after_hf_core_922_931 ) // TODO remove this check after hard fork if things in `else` did not occur + { + FC_ASSERT( op.new_options.short_backing_asset != asset_obj.get_id(), + "Cannot update an asset to be backed by itself." ); + + if( current_bitasset_data.is_prediction_market ) + { + FC_ASSERT( asset_obj.precision == new_backing_asset.precision, + "The precision of the asset and backing asset must be equal." ); + } + + if( asset_obj.issuer == GRAPHENE_COMMITTEE_ACCOUNT ) + { + if( new_backing_asset.is_market_issued() ) + { + FC_ASSERT( new_backing_asset.bitasset_data(d).options.short_backing_asset == asset_id_type(), + "May not modify a blockchain-controlled market asset to be backed by an asset which is not " + "backed by CORE." ); + + check_children_of_bitasset( d, op, new_backing_asset, after_hf_core_922_931 ); + } + else + { + FC_ASSERT( new_backing_asset.get_id() == asset_id_type(), + "May not modify a blockchain-controlled market asset to be backed by an asset which is not " + "market issued asset nor CORE." ); + } + } + else + { + // not a committee issued asset + + // If we're changing to a backing_asset that is not CORE, we need to look at any + // asset ( "CHILD" ) that has this one as a backing asset. If CHILD is committee-owned, + // the change is not allowed. If CHILD is user-owned, then this asset's backing + // asset must be either CORE or a UIA. + if ( new_backing_asset.get_id() != asset_id_type() ) // not backed by CORE + { + check_children_of_bitasset( d, op, new_backing_asset, after_hf_core_922_931 ); + } + + } + + // Check if the new backing asset is itself backed by something. It must be CORE or a UIA + if ( new_backing_asset.is_market_issued() ) + { + asset_id_type backing_backing_asset_id = new_backing_asset.bitasset_data(d).options.short_backing_asset; + FC_ASSERT( (backing_backing_asset_id == asset_id_type() || !backing_backing_asset_id(d).is_market_issued()), + "A BitAsset cannot be backed by a BitAsset that itself is backed by a BitAsset."); + } + } + else // prior to HF 922 / 931 { - const asset_object& backing = a.bitasset_data(d).options.short_backing_asset(d); - if( backing.is_market_issued() ) + // code to check if issues occurred before hard fork. TODO cleanup after hard fork + if( op.new_options.short_backing_asset == asset_obj.get_id() ) + { + wlog( "before hf-922-931, op.new_options.short_backing_asset == asset_obj.get_id() at block ${b}", + ("b",d.head_block_num()) ); + } + if( current_bitasset_data.is_prediction_market && asset_obj.precision != new_backing_asset.precision ) + { + wlog( "before hf-922-931, for a PM, asset_obj.precision != new_backing_asset.precision at block ${b}", + ("b",d.head_block_num()) ); + } + + if( asset_obj.issuer == GRAPHENE_COMMITTEE_ACCOUNT ) + { + // code to check if issues occurred before hard fork. TODO cleanup after hard fork + if( new_backing_asset.is_market_issued() ) + { + if( new_backing_asset.bitasset_data(d).options.short_backing_asset != asset_id_type() ) + wlog( "before hf-922-931, modified a blockchain-controlled market asset to be backed by an asset " + "which is not backed by CORE at block ${b}", + ("b",d.head_block_num()) ); + + check_children_of_bitasset( d, op, new_backing_asset, after_hf_core_922_931 ); + } + else + { + if( new_backing_asset.get_id() != asset_id_type() ) + wlog( "before hf-922-931, modified a blockchain-controlled market asset to be backed by an asset " + "which is not market issued asset nor CORE at block ${b}", + ("b",d.head_block_num()) ); + } + + //prior to HF 922_931, these checks were mistakenly using the old backing_asset + const asset_object& old_backing_asset = current_bitasset_data.options.short_backing_asset(d); + + if( old_backing_asset.is_market_issued() ) + { + FC_ASSERT( old_backing_asset.bitasset_data(d).options.short_backing_asset == asset_id_type(), + "May not modify a blockchain-controlled market asset to be backed by an asset which is not " + "backed by CORE." ); + } + else + { + FC_ASSERT( old_backing_asset.get_id() == asset_id_type(), + "May not modify a blockchain-controlled market asset to be backed by an asset which is not " + "market issued asset nor CORE." ); + } + } + else { - const asset_object& backing_backing = backing.bitasset_data(d).options.short_backing_asset(d); - FC_ASSERT( backing_backing.get_id() == asset_id_type(), - "May not create a blockchain-controlled market asset which is not backed by CORE."); - } else - FC_ASSERT( backing.get_id() == asset_id_type(), - "May not create a blockchain-controlled market asset which is not backed by CORE."); + // not a committee issued asset + + // If we're changing to a backing_asset that is not CORE, we need to look at any + // asset ( "CHILD" ) that has this one as a backing asset. If CHILD is committee-owned, + // the change is not allowed. If CHILD is user-owned, then this asset's backing + // asset must be either CORE or a UIA. + if ( new_backing_asset.get_id() != asset_id_type() ) // not backed by CORE + { + check_children_of_bitasset( d, op, new_backing_asset, after_hf_core_922_931 ); + } + } + // if the new backing asset is backed by something which is not CORE and not a UIA, this is not allowed + // Check if the new backing asset is itself backed by something. It must be CORE or a UIA + if ( new_backing_asset.is_market_issued() ) + { + asset_id_type backing_backing_asset_id = new_backing_asset.bitasset_data(d).options.short_backing_asset; + if ( backing_backing_asset_id != asset_id_type() && backing_backing_asset_id(d).is_market_issued() ) + { + wlog( "before hf-922-931, a BitAsset cannot be backed by a BitAsset that itself " + "is backed by a BitAsset. This occurred at block ${b}", + ("b", d.head_block_num() ) ); + } // not core, not UIA + } // if market issued } } - bitasset_to_update = &b; - FC_ASSERT( o.issuer == a.issuer, "", ("o.issuer", o.issuer)("a.issuer", a.issuer) ); + const auto& chain_parameters = d.get_global_properties().parameters; + if( after_hf_core_922_931 ) // TODO remove this check after hard fork if things in `else` did not occur + { + FC_ASSERT( op.new_options.feed_lifetime_sec > chain_parameters.block_interval, + "Feed lifetime must exceed block interval." ); + FC_ASSERT( op.new_options.force_settlement_delay_sec > chain_parameters.block_interval, + "Force settlement delay must exceed block interval." ); + } + else // code to check if issues occurred before hard fork. TODO cleanup after hard fork + { + if( op.new_options.feed_lifetime_sec <= chain_parameters.block_interval ) + wlog( "before hf-922-931, op.new_options.feed_lifetime_sec <= chain_parameters.block_interval at block ${b}", + ("b",d.head_block_num()) ); + if( op.new_options.force_settlement_delay_sec <= chain_parameters.block_interval ) + wlog( "before hf-922-931, op.new_options.force_settlement_delay_sec <= chain_parameters.block_interval at block ${b}", + ("b",d.head_block_num()) ); + } + + bitasset_to_update = ¤t_bitasset_data; + asset_to_update = &asset_obj; return void_result(); -} FC_CAPTURE_AND_RETHROW( (o) ) } +} FC_CAPTURE_AND_RETHROW( (op) ) } + +/******* + * @brief Apply requested changes to bitasset options + * + * This applies the requested changes to the bitasset object. It also cleans up the + * releated feeds + * + * @param op the requested operation + * @param db the database + * @param bdo the actual database object + * @param asset_to_update the asset_object related to this bitasset_data_object + * @returns true if the feed price is changed, and after hf core-868-890 + */ +static bool update_bitasset_object_options( + const asset_update_bitasset_operation& op, database& db, + asset_bitasset_data_object& bdo, const asset_object& asset_to_update ) +{ + const fc::time_point_sec& next_maint_time = db.get_dynamic_global_properties().next_maintenance_time; + bool after_hf_core_868_890 = ( next_maint_time > HARDFORK_CORE_868_890_TIME ); -void_result asset_update_bitasset_evaluator::do_apply(const asset_update_bitasset_operation& o) -{ try { - bool should_update_feeds = false; // If the minimum number of feeds to calculate a median has changed, we need to recalculate the median - if( o.new_options.minimum_feeds != bitasset_to_update->options.minimum_feeds ) + bool should_update_feeds = false; + if( op.new_options.minimum_feeds != bdo.options.minimum_feeds ) should_update_feeds = true; - db().modify(*bitasset_to_update, [&](asset_bitasset_data_object& b) { - b.options = o.new_options; + // after hardfork core-868-890, we also should call update_median_feeds if the feed_lifetime_sec changed + if( after_hf_core_868_890 + && op.new_options.feed_lifetime_sec != bdo.options.feed_lifetime_sec ) + { + should_update_feeds = true; + } - if( should_update_feeds ) - b.update_median_feeds(db().head_block_time()); - }); + // feeds must be reset if the backing asset is changed after hardfork core-868-890 + bool backing_asset_changed = false; + bool is_witness_or_committee_fed = false; + if( after_hf_core_868_890 + && op.new_options.short_backing_asset != bdo.options.short_backing_asset ) + { + backing_asset_changed = true; + should_update_feeds = true; + if( asset_to_update.options.flags & ( witness_fed_asset | committee_fed_asset ) ) + is_witness_or_committee_fed = true; + } - return void_result(); -} FC_CAPTURE_AND_RETHROW( (o) ) } + bdo.options = op.new_options; + + // are we modifying the underlying? If so, reset the feeds + if( backing_asset_changed ) + { + if( is_witness_or_committee_fed ) + { + bdo.feeds.clear(); + } + else + { + // for non-witness-feeding and non-committee-feeding assets, modify all feeds + // published by producers to nothing, since we can't simply remove them. For more information: + // https://github.com/bitshares/bitshares-core/pull/832#issuecomment-384112633 + for( auto& current_feed : bdo.feeds ) + { + current_feed.second.second.settlement_price = price(); + } + } + } + + if( should_update_feeds ) + { + const auto old_feed = bdo.current_feed; + bdo.update_median_feeds( db.head_block_time() ); + + // TODO review and refactor / cleanup after hard fork: + // 1. if hf_core_868_890 and core-935 occurred at same time + // 2. if wlog did not actually get called + + // We need to call check_call_orders if the price feed changes after hardfork core-935 + if( next_maint_time > HARDFORK_CORE_935_TIME ) + return ( !( old_feed == bdo.current_feed ) ); + + // We need to call check_call_orders if the settlement price changes after hardfork core-868-890 + if( after_hf_core_868_890 ) + { + if( old_feed.settlement_price != bdo.current_feed.settlement_price ) + return true; + else + { + if( !( old_feed == bdo.current_feed ) ) + wlog( "Settlement price did not change but current_feed changed at block ${b}", ("b",db.head_block_num()) ); + } + } + } + + return false; +} + +void_result asset_update_bitasset_evaluator::do_apply(const asset_update_bitasset_operation& op) +{ + try + { + auto& db_conn = db(); + const auto& asset_being_updated = (*asset_to_update); + bool to_check_call_orders = false; + + db_conn.modify( *bitasset_to_update, + [&op, &asset_being_updated, &to_check_call_orders, &db_conn]( asset_bitasset_data_object& bdo ) + { + to_check_call_orders = update_bitasset_object_options( op, db_conn, bdo, asset_being_updated ); + }); + + if( to_check_call_orders ) + // Process margin calls, allow black swan, not for a new limit order + db_conn.check_call_orders( asset_being_updated, true, false, bitasset_to_update ); + + return void_result(); + + } FC_CAPTURE_AND_RETHROW( (op) ) +} void_result asset_update_feed_producers_evaluator::do_evaluate(const asset_update_feed_producers_evaluator::operation_type& o) { try { database& d = db(); - FC_ASSERT( o.new_feed_producers.size() <= d.get_global_properties().parameters.maximum_asset_feed_publishers ); - for( auto id : o.new_feed_producers ) - d.get_object(id); + FC_ASSERT( o.new_feed_producers.size() <= d.get_global_properties().parameters.maximum_asset_feed_publishers, + "Cannot specify more feed producers than maximum allowed" ); const asset_object& a = o.asset_to_update(d); @@ -369,18 +751,33 @@ void_result asset_update_feed_producers_evaluator::do_evaluate(const asset_updat FC_ASSERT(!(a.options.flags & committee_fed_asset), "Cannot set feed producers on a committee-fed asset."); FC_ASSERT(!(a.options.flags & witness_fed_asset), "Cannot set feed producers on a witness-fed asset."); - const asset_bitasset_data_object& b = a.bitasset_data(d); - bitasset_to_update = &b; - FC_ASSERT( a.issuer == o.issuer ); + FC_ASSERT( a.issuer == o.issuer, "Only asset issuer can update feed producers of an asset" ); + + asset_to_update = &a; + + // Make sure all producers exist. Check these after asset because account lookup is more expensive + for( auto id : o.new_feed_producers ) + d.get_object(id); + return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } void_result asset_update_feed_producers_evaluator::do_apply(const asset_update_feed_producers_evaluator::operation_type& o) { try { - db().modify(*bitasset_to_update, [&](asset_bitasset_data_object& a) { + database& d = db(); + const auto head_time = d.head_block_time(); + const asset_bitasset_data_object& bitasset_to_update = asset_to_update->bitasset_data(d); + d.modify( bitasset_to_update, [&o,head_time](asset_bitasset_data_object& a) { //This is tricky because I have a set of publishers coming in, but a map of publisher to feed is stored. //I need to update the map such that the keys match the new publishers, but not munge the old price feeds from //publishers who are being kept. + + // TODO possible performance optimization: + // Since both the map and the set are ordered by account already, we can iterate through them only once + // and avoid lookups while iterating by maintaining two iterators at same time. + // However, this operation is not used much, and both the set and the map are small, + // so likely we won't gain much with the optimization. + //First, remove any old publishers who are no longer publishers for( auto itr = a.feeds.begin(); itr != a.feeds.end(); ) { @@ -390,12 +787,14 @@ void_result asset_update_feed_producers_evaluator::do_apply(const asset_update_f ++itr; } //Now, add any new publishers - for( auto itr = o.new_feed_producers.begin(); itr != o.new_feed_producers.end(); ++itr ) - if( !a.feeds.count(*itr) ) - a.feeds[*itr]; - a.update_median_feeds(db().head_block_time()); + for( const account_id_type acc : o.new_feed_producers ) + { + a.feeds[acc]; + } + a.update_median_feeds( head_time ); }); - db().check_call_orders( o.asset_to_update(db()) ); + // Process margin calls, allow black swan, not for a new limit order + d.check_call_orders( *asset_to_update, true, false, &bitasset_to_update ); return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } @@ -404,15 +803,19 @@ void_result asset_global_settle_evaluator::do_evaluate(const asset_global_settle { try { const database& d = db(); asset_to_settle = &op.asset_to_settle(d); - FC_ASSERT(asset_to_settle->is_market_issued()); - FC_ASSERT(asset_to_settle->can_global_settle()); - FC_ASSERT(asset_to_settle->issuer == op.issuer ); - FC_ASSERT(asset_to_settle->dynamic_data(d).current_supply > 0); + FC_ASSERT( asset_to_settle->is_market_issued(), "Can only globally settle market-issued assets" ); + FC_ASSERT( asset_to_settle->can_global_settle(), "The global_settle permission of this asset is disabled" ); + FC_ASSERT( asset_to_settle->issuer == op.issuer, "Only asset issuer can globally settle an asset" ); + FC_ASSERT( asset_to_settle->dynamic_data(d).current_supply > 0, "Can not globally settle an asset with zero supply" ); + + const asset_bitasset_data_object& _bitasset_data = asset_to_settle->bitasset_data(d); + // if there is a settlement for this asset, then no further global settle may be taken + FC_ASSERT( !_bitasset_data.has_settlement(), "This asset has settlement, cannot global settle again" ); + const auto& idx = d.get_index_type().indices().get(); - assert( !idx.empty() ); - auto itr = idx.lower_bound(boost::make_tuple(price::min(asset_to_settle->bitasset_data(d).options.short_backing_asset, - op.asset_to_settle))); - assert( itr != idx.end() && itr->debt_type() == op.asset_to_settle ); + FC_ASSERT( !idx.empty(), "Internal error: no debt position found" ); + auto itr = idx.lower_bound( price::min( _bitasset_data.options.short_backing_asset, op.asset_to_settle ) ); + FC_ASSERT( itr != idx.end() && itr->debt_type() == op.asset_to_settle, "Internal error: no debt position found" ); const call_order_object& least_collateralized_short = *itr; FC_ASSERT(least_collateralized_short.get_debt() * op.settle_price <= least_collateralized_short.get_collateral(), "Cannot force settle at supplied price: least collateralized short lacks sufficient collateral to settle."); @@ -423,7 +826,7 @@ void_result asset_global_settle_evaluator::do_evaluate(const asset_global_settle void_result asset_global_settle_evaluator::do_apply(const asset_global_settle_evaluator::operation_type& op) { try { database& d = db(); - d.globally_settle_asset( op.asset_to_settle(db()), op.settle_price ); + d.globally_settle_asset( *asset_to_settle, op.settle_price ); return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } @@ -436,7 +839,9 @@ void_result asset_settle_evaluator::do_evaluate(const asset_settle_evaluator::op FC_ASSERT(asset_to_settle->can_force_settle() || bitasset.has_settlement() ); if( bitasset.is_prediction_market ) FC_ASSERT( bitasset.has_settlement(), "global settlement must occur before force settling a prediction market" ); - else if( bitasset.current_feed.settlement_price.is_null() ) + else if( bitasset.current_feed.settlement_price.is_null() + && ( d.head_block_time() <= HARDFORK_CORE_216_TIME + || !bitasset.has_settlement() ) ) FC_THROW_EXCEPTION(insufficient_feeds, "Cannot force settle with no price feed."); FC_ASSERT(d.get_balance(d.get(op.account), *asset_to_settle) >= op.amount); @@ -446,30 +851,54 @@ void_result asset_settle_evaluator::do_evaluate(const asset_settle_evaluator::op operation_result asset_settle_evaluator::do_apply(const asset_settle_evaluator::operation_type& op) { try { database& d = db(); - d.adjust_balance(op.account, -op.amount); const auto& bitasset = asset_to_settle->bitasset_data(d); if( bitasset.has_settlement() ) { - auto settled_amount = op.amount * bitasset.settlement_price; - FC_ASSERT( settled_amount.amount <= bitasset.settlement_fund ); + const auto& mia_dyn = asset_to_settle->dynamic_asset_data_id(d); + + auto settled_amount = op.amount * bitasset.settlement_price; // round down, in favor of global settlement fund + if( op.amount.amount == mia_dyn.current_supply ) + settled_amount.amount = bitasset.settlement_fund; // avoid rounding problems + else + FC_ASSERT( settled_amount.amount <= bitasset.settlement_fund ); // should be strictly < except for PM with zero outcome + + if( settled_amount.amount == 0 && !bitasset.is_prediction_market ) + { + if( d.get_dynamic_global_properties().next_maintenance_time > HARDFORK_CORE_184_TIME ) + FC_THROW( "Settle amount is too small to receive anything due to rounding" ); + else // TODO remove this warning after hard fork core-184 + wlog( "Something for nothing issue (#184, variant F) occurred at block #${block}", ("block",d.head_block_num()) ); + } - d.modify( bitasset, [&]( asset_bitasset_data_object& obj ){ - obj.settlement_fund -= settled_amount.amount; - }); + asset pays = op.amount; + if( op.amount.amount != mia_dyn.current_supply + && settled_amount.amount != 0 + && d.get_dynamic_global_properties().next_maintenance_time > HARDFORK_CORE_342_TIME ) + { + pays = settled_amount.multiply_and_round_up( bitasset.settlement_price ); + } - d.adjust_balance(op.account, settled_amount); + d.adjust_balance( op.account, -pays ); - const auto& mia_dyn = asset_to_settle->dynamic_asset_data_id(d); + if( settled_amount.amount > 0 ) + { + d.modify( bitasset, [&]( asset_bitasset_data_object& obj ){ + obj.settlement_fund -= settled_amount.amount; + }); + + d.adjust_balance( op.account, settled_amount ); + } d.modify( mia_dyn, [&]( asset_dynamic_data_object& obj ){ - obj.current_supply -= op.amount.amount; - }); + obj.current_supply -= pays.amount; + }); return settled_amount; } else { + d.adjust_balance( op.account, -op.amount ); return d.create([&](force_settlement_object& s) { s.owner = op.account; s.balance = op.amount; @@ -484,41 +913,55 @@ void_result asset_publish_feeds_evaluator::do_evaluate(const asset_publish_feed_ const asset_object& base = o.asset_id(d); //Verify that this feed is for a market-issued asset and that asset is backed by the base - FC_ASSERT(base.is_market_issued()); + FC_ASSERT( base.is_market_issued(), "Can only publish price feeds for market-issued assets" ); const asset_bitasset_data_object& bitasset = base.bitasset_data(d); - FC_ASSERT( !bitasset.has_settlement(), "No further feeds may be published after a settlement event" ); + if( bitasset.is_prediction_market || d.head_block_time() <= HARDFORK_CORE_216_TIME ) + { + FC_ASSERT( !bitasset.has_settlement(), "No further feeds may be published after a settlement event" ); + } + + // the settlement price must be quoted in terms of the backing asset + FC_ASSERT( o.feed.settlement_price.quote.asset_id == bitasset.options.short_backing_asset, + "Quote asset type in settlement price should be same as backing asset of this asset" ); - FC_ASSERT( o.feed.settlement_price.quote.asset_id == bitasset.options.short_backing_asset ); if( d.head_block_time() > HARDFORK_480_TIME ) { if( !o.feed.core_exchange_rate.is_null() ) { - FC_ASSERT( o.feed.core_exchange_rate.quote.asset_id == asset_id_type() ); + FC_ASSERT( o.feed.core_exchange_rate.quote.asset_id == asset_id_type(), + "Quote asset in core exchange rate should be CORE asset" ); } } else { if( (!o.feed.settlement_price.is_null()) && (!o.feed.core_exchange_rate.is_null()) ) { - FC_ASSERT( o.feed.settlement_price.quote.asset_id == o.feed.core_exchange_rate.quote.asset_id ); + // Old buggy code, but we have to live with it + FC_ASSERT( o.feed.settlement_price.quote.asset_id == o.feed.core_exchange_rate.quote.asset_id, "Bad feed" ); } } //Verify that the publisher is authoritative to publish a feed if( base.options.flags & witness_fed_asset ) { - FC_ASSERT( d.get(GRAPHENE_WITNESS_ACCOUNT).active.account_auths.count(o.publisher) ); + FC_ASSERT( d.get(GRAPHENE_WITNESS_ACCOUNT).active.account_auths.count(o.publisher), + "Only active witnesses are allowed to publish price feeds for this asset" ); } else if( base.options.flags & committee_fed_asset ) { - FC_ASSERT( d.get(GRAPHENE_COMMITTEE_ACCOUNT).active.account_auths.count(o.publisher) ); + FC_ASSERT( d.get(GRAPHENE_COMMITTEE_ACCOUNT).active.account_auths.count(o.publisher), + "Only active committee members are allowed to publish price feeds for this asset" ); } else { - FC_ASSERT(bitasset.feeds.count(o.publisher)); + FC_ASSERT( bitasset.feeds.count(o.publisher), + "The account is not in the set of allowed price feed producers of this asset" ); } + asset_ptr = &base; + bitasset_ptr = &bitasset; + return void_result(); } FC_CAPTURE_AND_RETHROW((o)) } @@ -527,8 +970,8 @@ void_result asset_publish_feeds_evaluator::do_apply(const asset_publish_feed_ope database& d = db(); - const asset_object& base = o.asset_id(d); - const asset_bitasset_data_object& bad = base.bitasset_data(d); + const asset_object& base = *asset_ptr; + const asset_bitasset_data_object& bad = *bitasset_ptr; auto old_feed = bad.current_feed; // Store medians for this asset @@ -538,7 +981,20 @@ void_result asset_publish_feeds_evaluator::do_apply(const asset_publish_feed_ope }); if( !(old_feed == bad.current_feed) ) - db().check_call_orders(base); + { + if( bad.has_settlement() ) // implies head_block_time > HARDFORK_CORE_216_TIME + { + const auto& mia_dyn = base.dynamic_asset_data_id(d); + if( !bad.current_feed.settlement_price.is_null() + && ( mia_dyn.current_supply == 0 + || ~price::call_price(asset(mia_dyn.current_supply, o.asset_id), + asset(bad.settlement_fund, bad.options.short_backing_asset), + bad.current_feed.maintenance_collateral_ratio ) < bad.current_feed.settlement_price ) ) + d.revive_bitasset(base); + } + // Process margin calls, allow black swan, not for a new limit order + d.check_call_orders( base, true, false, bitasset_ptr ); + } return void_result(); } FC_CAPTURE_AND_RETHROW((o)) } @@ -571,4 +1027,31 @@ void_result asset_claim_fees_evaluator::do_apply( const asset_claim_fees_operati } FC_CAPTURE_AND_RETHROW( (o) ) } +void_result asset_claim_pool_evaluator::do_evaluate( const asset_claim_pool_operation& o ) +{ try { + FC_ASSERT( db().head_block_time() >= HARDFORK_CORE_188_TIME, + "This operation is only available after Hardfork #188!" ); + FC_ASSERT( o.asset_id(db()).issuer == o.issuer, "Asset fee pool may only be claimed by the issuer" ); + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (o) ) } + +void_result asset_claim_pool_evaluator::do_apply( const asset_claim_pool_operation& o ) +{ try { + database& d = db(); + + const asset_object& a = o.asset_id(d); + const asset_dynamic_data_object& addo = a.dynamic_asset_data_id(d); + FC_ASSERT( o.amount_to_claim.amount <= addo.fee_pool, "Attempt to claim more fees than is available", ("addo",addo) ); + + d.modify( addo, [&o]( asset_dynamic_data_object& _addo ) { + _addo.fee_pool -= o.amount_to_claim.amount; + }); + + d.adjust_balance( o.issuer, o.amount_to_claim ); + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (o) ) } + + } } // graphene::chain diff --git a/libraries/chain/asset_object.cpp b/libraries/chain/asset_object.cpp index d5ee605988..47fd3c146b 100644 --- a/libraries/chain/asset_object.cpp +++ b/libraries/chain/asset_object.cpp @@ -43,10 +43,19 @@ share_type asset_bitasset_data_object::max_force_settlement_volume(share_type cu return volume.to_uint64(); } +/****** + * @brief calculate the median feed + * + * This calculates the median feed. It sets the current_feed_publication_time + * and current_feed member variables + * + * @param current_time the time to use in the calculations + */ void graphene::chain::asset_bitasset_data_object::update_median_feeds(time_point_sec current_time) { current_feed_publication_time = current_time; vector> current_feeds; + // find feeds that were alive at current_time for( const pair>& f : feeds ) { if( (current_time - f.second.first).to_seconds() < options.feed_lifetime_sec && @@ -61,12 +70,15 @@ void graphene::chain::asset_bitasset_data_object::update_median_feeds(time_point if( current_feeds.size() < options.minimum_feeds ) { //... don't calculate a median, and set a null feed + feed_cer_updated = false; // new median cer is null, won't update asset_object anyway, set to false for better performance current_feed_publication_time = current_time; current_feed = price_feed(); return; } if( current_feeds.size() == 1 ) { + if( current_feed.core_exchange_rate != current_feeds.front().get().core_exchange_rate ) + feed_cer_updated = true; current_feed = std::move(current_feeds.front()); return; } @@ -85,6 +97,8 @@ void graphene::chain::asset_bitasset_data_object::update_median_feeds(time_point #undef CALCULATE_MEDIAN_VALUE // *** End Median Calculations *** + if( current_feed.core_exchange_rate != median_feed.core_exchange_rate ) + feed_cer_updated = true; current_feed = median_feed; } @@ -147,14 +161,15 @@ asset asset_object::amount_from_string(string amount_string) const string asset_object::amount_to_string(share_type amount) const { - share_type scaled_precision = 1; - for( uint8_t i = 0; i < precision; ++i ) - scaled_precision *= 10; - assert(scaled_precision > 0); + share_type scaled_precision = asset::scaled_precision( precision ); string result = fc::to_string(amount.value / scaled_precision.value); - auto decimals = amount.value % scaled_precision.value; + auto decimals = abs( amount.value % scaled_precision.value ); if( decimals ) + { + if( amount < 0 && result == "0" ) + result = "-0"; result += "." + fc::to_string(scaled_precision.value + decimals).erase(0,1); + } return result; } diff --git a/libraries/chain/balance_evaluator.cpp b/libraries/chain/balance_evaluator.cpp index 8d29c01d0f..8c0a48c95c 100644 --- a/libraries/chain/balance_evaluator.cpp +++ b/libraries/chain/balance_evaluator.cpp @@ -41,8 +41,6 @@ void_result balance_claim_evaluator::do_evaluate(const balance_claim_operation& ("op", op.balance_owner_key) ("bal", balance->owner) ); - if( !(d.get_node_properties().skip_flags & (database::skip_authority_check | - database::skip_transaction_signatures)) ) FC_ASSERT(op.total_claimed.asset_id == balance->asset_type()); diff --git a/libraries/chain/block_database.cpp b/libraries/chain/block_database.cpp index 214459f0d4..c5fa6636a8 100644 --- a/libraries/chain/block_database.cpp +++ b/libraries/chain/block_database.cpp @@ -24,7 +24,6 @@ #include #include #include -#include namespace graphene { namespace chain { @@ -45,14 +44,15 @@ void block_database::open( const fc::path& dbdir ) _block_num_to_pos.exceptions(std::ios_base::failbit | std::ios_base::badbit); _blocks.exceptions(std::ios_base::failbit | std::ios_base::badbit); - if( !fc::exists( dbdir/"index" ) ) + _index_filename = dbdir / "index"; + if( !fc::exists( _index_filename ) ) { - _block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc); + _block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc); _blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc); } else { - _block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out ); + _block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out ); _blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out ); } } FC_CAPTURE_AND_RETHROW( (dbdir) ) } @@ -82,8 +82,7 @@ void block_database::store( const block_id_type& _id, const signed_block& b ) id = b.id(); elog( "id argument of block_database::store() was not initialized for block ${id}", ("id", id) ); } - auto num = block_header::num_from_id(id); - _block_num_to_pos.seekp( sizeof( index_entry ) * num ); + _block_num_to_pos.seekp( sizeof( index_entry ) * int64_t(block_header::num_from_id(id)) ); index_entry e; _blocks.seekp( 0, _blocks.end ); auto vec = fc::raw::pack( b ); @@ -97,7 +96,7 @@ void block_database::store( const block_id_type& _id, const signed_block& b ) void block_database::remove( const block_id_type& id ) { try { index_entry e; - auto index_pos = sizeof(e)*block_header::num_from_id(id); + int64_t index_pos = sizeof(e) * int64_t(block_header::num_from_id(id)); _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); if ( _block_num_to_pos.tellg() <= index_pos ) FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database", ("id", id)); @@ -108,7 +107,7 @@ void block_database::remove( const block_id_type& id ) if( e.block_id == id ) { e.block_size = 0; - _block_num_to_pos.seekp( sizeof(e)*block_header::num_from_id(id) ); + _block_num_to_pos.seekp( sizeof(e) * int64_t(block_header::num_from_id(id)) ); _block_num_to_pos.write( (char*)&e, sizeof(e) ); } } FC_CAPTURE_AND_RETHROW( (id) ) } @@ -119,9 +118,9 @@ bool block_database::contains( const block_id_type& id )const return false; index_entry e; - auto index_pos = sizeof(e)*block_header::num_from_id(id); + int64_t index_pos = sizeof(e) * int64_t(block_header::num_from_id(id)); _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); - if ( _block_num_to_pos.tellg() <= index_pos ) + if ( _block_num_to_pos.tellg() < int64_t(index_pos + sizeof(e)) ) return false; _block_num_to_pos.seekg( index_pos ); _block_num_to_pos.read( (char*)&e, sizeof(e) ); @@ -133,9 +132,9 @@ block_id_type block_database::fetch_block_id( uint32_t block_num )const { assert( block_num != 0 ); index_entry e; - auto index_pos = sizeof(e)*block_num; + int64_t index_pos = sizeof(e) * int64_t(block_num); _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); - if ( _block_num_to_pos.tellg() <= int64_t(index_pos) ) + if ( _block_num_to_pos.tellg() <= index_pos ) FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block number ${block_num} not contained in block database", ("block_num", block_num)); _block_num_to_pos.seekg( index_pos ); @@ -150,7 +149,7 @@ optional block_database::fetch_optional( const block_id_type& id ) try { index_entry e; - auto index_pos = sizeof(e)*block_header::num_from_id(id); + int64_t index_pos = sizeof(e) * int64_t(block_header::num_from_id(id)); _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); if ( _block_num_to_pos.tellg() <= index_pos ) return {}; @@ -182,7 +181,7 @@ optional block_database::fetch_by_number( uint32_t block_num )cons try { index_entry e; - auto index_pos = sizeof(e)*block_num; + int64_t index_pos = sizeof(e) * int64_t(block_num); _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); if ( _block_num_to_pos.tellg() <= index_pos ) return {}; @@ -206,34 +205,47 @@ optional block_database::fetch_by_number( uint32_t block_num )cons return optional(); } -optional block_database::last()const -{ +optional block_database::last_index_entry()const { try { index_entry e; + _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); + std::streampos pos = _block_num_to_pos.tellg(); + if( pos < long(sizeof(index_entry)) ) + return optional(); - if( _block_num_to_pos.tellp() < sizeof(index_entry) ) - return optional(); + pos -= pos % sizeof(index_entry); - _block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end ); - _block_num_to_pos.read( (char*)&e, sizeof(e) ); - uint64_t pos = _block_num_to_pos.tellg(); - while( e.block_size == 0 && pos > 0 ) + _blocks.seekg( 0, _block_num_to_pos.end ); + const std::streampos blocks_size = _blocks.tellg(); + while( pos > 0 ) { pos -= sizeof(index_entry); _block_num_to_pos.seekg( pos ); _block_num_to_pos.read( (char*)&e, sizeof(e) ); + if( _block_num_to_pos.gcount() == sizeof(e) && e.block_size > 0 + && int64_t(e.block_pos + e.block_size) <= blocks_size ) + try + { + vector data( e.block_size ); + _blocks.seekg( e.block_pos ); + _blocks.read( data.data(), e.block_size ); + if( _blocks.gcount() == long(e.block_size) ) + { + const signed_block block = fc::raw::unpack(data); + if( block.id() == e.block_id ) + return e; + } + } + catch (const fc::exception&) + { + } + catch (const std::exception&) + { + } + fc::resize_file( _index_filename, pos ); } - - if( e.block_size == 0 ) - return optional(); - - vector data( e.block_size ); - _blocks.seekg( e.block_pos ); - _blocks.read( data.data(), e.block_size ); - auto result = fc::raw::unpack(data); - return result; } catch (const fc::exception&) { @@ -241,42 +253,32 @@ optional block_database::last()const catch (const std::exception&) { } + return optional(); +} + +optional block_database::last()const +{ + optional entry = last_index_entry(); + if( entry.valid() ) return fetch_by_number( block_header::num_from_id(entry->block_id) ); return optional(); } optional block_database::last_id()const { - try - { - index_entry e; - _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); - - if( _block_num_to_pos.tellp() < sizeof(index_entry) ) - return optional(); - - _block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end ); - _block_num_to_pos.read( (char*)&e, sizeof(e) ); - uint64_t pos = _block_num_to_pos.tellg(); - while( e.block_size == 0 && pos > 0 ) - { - pos -= sizeof(index_entry); - _block_num_to_pos.seekg( pos ); - _block_num_to_pos.read( (char*)&e, sizeof(e) ); - } - - if( e.block_size == 0 ) - return optional(); - - return e.block_id; - } - catch (const fc::exception&) - { - } - catch (const std::exception&) - { - } + optional entry = last_index_entry(); + if( entry.valid() ) return entry->block_id; return optional(); } +size_t block_database::blocks_current_position()const +{ + return (size_t)_blocks.tellg(); +} + +size_t block_database::total_block_size()const +{ + _blocks.seekg( 0, _blocks.end ); + return (size_t)_blocks.tellg(); +} } } diff --git a/libraries/chain/committee_member_evaluator.cpp b/libraries/chain/committee_member_evaluator.cpp index 4e7eb827e5..b01fa95faa 100644 --- a/libraries/chain/committee_member_evaluator.cpp +++ b/libraries/chain/committee_member_evaluator.cpp @@ -29,8 +29,6 @@ #include #include -#include - namespace graphene { namespace chain { void_result committee_member_create_evaluator::do_evaluate( const committee_member_create_operation& op ) diff --git a/libraries/chain/database.cpp b/libraries/chain/database.cpp index aa9f61273b..788a29f008 100644 --- a/libraries/chain/database.cpp +++ b/libraries/chain/database.cpp @@ -21,7 +21,6 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include #include "db_balance.cpp" #include "db_block.cpp" #include "db_debug.cpp" @@ -32,3 +31,4 @@ #include "db_market.cpp" #include "db_update.cpp" #include "db_witness_schedule.cpp" +#include "db_notify.cpp" \ No newline at end of file diff --git a/libraries/chain/db_balance.cpp b/libraries/chain/db_balance.cpp index a70f077bb6..caa1eff63b 100644 --- a/libraries/chain/db_balance.cpp +++ b/libraries/chain/db_balance.cpp @@ -33,11 +33,11 @@ namespace graphene { namespace chain { asset database::get_balance(account_id_type owner, asset_id_type asset_id) const { - auto& index = get_index_type().indices().get(); - auto itr = index.find(boost::make_tuple(owner, asset_id)); - if( itr == index.end() ) + auto& index = get_index_type< primary_index< account_balance_index > >().get_secondary_index(); + auto abo = index.get_account_balance( owner, asset_id ); + if( !abo ) return asset(0, asset_id); - return itr->get_balance(); + return abo->get_balance(); } asset database::get_balance(const account_object& owner, const asset_object& asset_obj) const @@ -55,9 +55,9 @@ void database::adjust_balance(account_id_type account, asset delta ) if( delta.amount == 0 ) return; - auto& index = get_index_type().indices().get(); - auto itr = index.find(boost::make_tuple(account, delta.asset_id)); - if(itr == index.end()) + auto& index = get_index_type< primary_index< account_balance_index > >().get_secondary_index(); + auto abo = index.get_account_balance( account, delta.asset_id ); + if( !abo ) { FC_ASSERT( delta.amount > 0, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account(*this).name) @@ -67,11 +67,14 @@ void database::adjust_balance(account_id_type account, asset delta ) b.owner = account; b.asset_type = delta.asset_id; b.balance = delta.amount.value; + if( b.asset_type == asset_id_type() ) // CORE asset + b.maintenance_flag = true; }); } else { if( delta.amount < 0 ) - FC_ASSERT( itr->get_balance() >= -delta, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account(*this).name)("b",to_pretty_string(itr->get_balance()))("r",to_pretty_string(-delta))); - modify(*itr, [delta](account_balance_object& b) { + FC_ASSERT( abo->get_balance() >= -delta, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", + ("a",account(*this).name)("b",to_pretty_string(abo->get_balance()))("r",to_pretty_string(-delta))); + modify(*abo, [delta](account_balance_object& b) { b.adjust_balance(delta); }); } @@ -139,7 +142,7 @@ void database::deposit_cashback(const account_object& acct, share_type amount, b acct.get_id() == GRAPHENE_TEMP_ACCOUNT ) { // The blockchain's accounts do not get cashback; it simply goes to the reserve pool. - modify(get(asset_id_type()).dynamic_asset_data_id(*this), [amount](asset_dynamic_data_object& d) { + modify( get_core_dynamic_data(), [amount](asset_dynamic_data_object& d) { d.current_supply -= amount; }); return; @@ -154,10 +157,14 @@ void database::deposit_cashback(const account_object& acct, share_type amount, b if( new_vbid.valid() ) { - modify( acct, [&]( account_object& _acct ) + modify( acct, [&new_vbid]( account_object& _acct ) { _acct.cashback_vb = *new_vbid; } ); + modify( acct.statistics( *this ), []( account_statistics_object& aso ) + { + aso.has_cashback_vb = true; + } ); } return; diff --git a/libraries/chain/db_block.cpp b/libraries/chain/db_block.cpp index 9291e15d15..e340d2dea1 100644 --- a/libraries/chain/db_block.cpp +++ b/libraries/chain/db_block.cpp @@ -29,6 +29,7 @@ #include #include #include + #include #include #include @@ -36,7 +37,7 @@ #include #include -#include +#include namespace graphene { namespace chain { @@ -75,7 +76,6 @@ optional database::fetch_block_by_number( uint32_t num )const return results[0]->data; else return _block_id_to_block.fetch_by_number(num); - return optional(); } const signed_transaction& database::get_recent_transaction(const transaction_id_type& trx_id) const @@ -112,7 +112,7 @@ std::vector database::get_block_ids_on_fork(block_id_type head_of */ bool database::push_block(const signed_block& new_block, uint32_t skip) { - //idump((new_block.block_num())(new_block.id())(new_block.timestamp)(new_block.previous)); +// idump((new_block.block_num())(new_block.id())(new_block.timestamp)(new_block.previous)); bool result; detail::with_skip_flags( *this, skip, [&]() { @@ -128,68 +128,74 @@ bool database::push_block(const signed_block& new_block, uint32_t skip) bool database::_push_block(const signed_block& new_block) { try { uint32_t skip = get_node_properties().skip_flags; - if( !(skip&skip_fork_db) ) - { - /// TODO: if the block is greater than the head block and before the next maitenance interval - // verify that the block signer is in the current set of active witnesses. + // TODO: If the block is greater than the head block and before the next maintenance interval + // verify that the block signer is in the current set of active witnesses. - shared_ptr new_head = _fork_db.push_block(new_block); - //If the head block from the longest chain does not build off of the current head, we need to switch forks. - if( new_head->data.previous != head_block_id() ) + shared_ptr new_head = _fork_db.push_block(new_block); + //If the head block from the longest chain does not build off of the current head, we need to switch forks. + if( new_head->data.previous != head_block_id() ) + { + //If the newly pushed block is the same height as head, we get head back in new_head + //Only switch forks if new_head is actually higher than head + if( new_head->data.block_num() > head_block_num() ) { - //If the newly pushed block is the same height as head, we get head back in new_head - //Only switch forks if new_head is actually higher than head - if( new_head->data.block_num() > head_block_num() ) + wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) ); + auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id()); + + // pop blocks until we hit the forked block + while( head_block_id() != branches.second.back()->data.previous ) { - wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) ); - auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id()); - - // pop blocks until we hit the forked block - while( head_block_id() != branches.second.back()->data.previous ) - pop_block(); - - // push all blocks on the new fork - for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) - { - ilog( "pushing blocks from fork ${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->data.id()) ); - optional except; - try { - undo_database::session session = _undo_db.start_undo_session(); - apply_block( (*ritr)->data, skip ); - _block_id_to_block.store( (*ritr)->id, (*ritr)->data ); - session.commit(); - } - catch ( const fc::exception& e ) { except = e; } - if( except ) - { - wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) ); - // remove the rest of branches.first from the fork_db, those blocks are invalid - while( ritr != branches.first.rend() ) - { - _fork_db.remove( (*ritr)->data.id() ); - ++ritr; - } - _fork_db.set_head( branches.second.front() ); - - // pop all blocks from the bad fork - while( head_block_id() != branches.second.back()->data.previous ) - pop_block(); - - // restore all blocks from the good fork - for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) - { - auto session = _undo_db.start_undo_session(); - apply_block( (*ritr)->data, skip ); - _block_id_to_block.store( new_block.id(), (*ritr)->data ); - session.commit(); - } - throw *except; - } - } - return true; + ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) ); + pop_block(); } - else return false; + + // push all blocks on the new fork + for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) + { + ilog( "pushing block from fork #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) ); + optional except; + try { + undo_database::session session = _undo_db.start_undo_session(); + apply_block( (*ritr)->data, skip ); + _block_id_to_block.store( (*ritr)->id, (*ritr)->data ); + session.commit(); + } + catch ( const fc::exception& e ) { except = e; } + if( except ) + { + wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) ); + // remove the rest of branches.first from the fork_db, those blocks are invalid + while( ritr != branches.first.rend() ) + { + ilog( "removing block from fork_db #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) ); + _fork_db.remove( (*ritr)->id ); + ++ritr; + } + _fork_db.set_head( branches.second.front() ); + + // pop all blocks from the bad fork + while( head_block_id() != branches.second.back()->data.previous ) + { + ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) ); + pop_block(); + } + + ilog( "Switching back to fork: ${id}", ("id",branches.second.front()->data.id()) ); + // restore all blocks from the good fork + for( auto ritr2 = branches.second.rbegin(); ritr2 != branches.second.rend(); ++ritr2 ) + { + ilog( "pushing block #${n} ${id}", ("n",(*ritr2)->data.block_num())("id",(*ritr2)->id) ); + auto session = _undo_db.start_undo_session(); + apply_block( (*ritr2)->data, skip ); + _block_id_to_block.store( (*ritr2)->id, (*ritr2)->data ); + session.commit(); + } + throw *except; + } + } + return true; } + else return false; } try { @@ -199,7 +205,7 @@ bool database::_push_block(const signed_block& new_block) session.commit(); } catch ( const fc::exception& e ) { elog("Failed to push new block:\n${e}", ("e", e.to_detail_string())); - _fork_db.remove(new_block.id()); + _fork_db.remove( new_block.id() ); throw; } @@ -215,8 +221,10 @@ bool database::_push_block(const signed_block& new_block) * queues full as well, it will be kept in the queue to be propagated later when a new block flushes out the pending * queues. */ -processed_transaction database::push_transaction( const signed_transaction& trx, uint32_t skip ) +processed_transaction database::push_transaction( const precomputable_transaction& trx, uint32_t skip ) { try { + // see https://github.com/bitshares/bitshares-core/issues/1573 + FC_ASSERT( fc::raw::pack_size( trx ) < (1024 * 1024), "Transaction exceeds maximum transaction size." ); processed_transaction result; detail::with_skip_flags( *this, skip, [&]() { @@ -225,7 +233,7 @@ processed_transaction database::push_transaction( const signed_transaction& trx, return result; } FC_CAPTURE_AND_RETHROW( (trx) ) } -processed_transaction database::_push_transaction( const signed_transaction& trx ) +processed_transaction database::_push_transaction( const precomputable_transaction& trx ) { // If this is the first transaction pushed after applying a block, start a new undo session. // This allows us to quickly rewind to the clean state of the head block, in case a new block arrives. @@ -241,12 +249,12 @@ processed_transaction database::_push_transaction( const signed_transaction& trx auto processed_trx = _apply_transaction( trx ); _pending_tx.push_back(processed_trx); - notify_changed_objects(); + // notify_changed_objects(); // The transaction applied successfully. Merge its changes into the pending block session. temp_session.merge(); // notify anyone listening to pending transactions - on_pending_transaction( trx ); + notify_on_pending_transaction( trx ); return processed_trx; } @@ -256,6 +264,24 @@ processed_transaction database::validate_transaction( const signed_transaction& return _apply_transaction( trx ); } +class push_proposal_nesting_guard { +public: + push_proposal_nesting_guard( uint32_t& nesting_counter, const database& db ) + : orig_value(nesting_counter), counter(nesting_counter) + { + FC_ASSERT( counter < db.get_global_properties().active_witnesses.size() * 2, "Max proposal nesting depth exceeded!" ); + counter++; + } + ~push_proposal_nesting_guard() + { + if( --counter != orig_value ) + elog( "Unexpected proposal nesting count value: ${n} != ${o}", ("n",counter)("o",orig_value) ); + } +private: + const uint32_t orig_value; + uint32_t& counter; +}; + processed_transaction database::push_proposal(const proposal_object& proposal) { try { transaction_evaluation_state eval_state(this); @@ -267,6 +293,9 @@ processed_transaction database::push_proposal(const proposal_object& proposal) size_t old_applied_ops_size = _applied_ops.size(); try { + push_proposal_nesting_guard guard( _push_proposal_nesting_depth, *this ); + if( _undo_db.size() >= _undo_db.max_size() ) + _undo_db.set_max_size( _undo_db.size() + 1 ); auto session = _undo_db.start_undo_session(true); for( auto& op : proposal.proposed_transaction.operations ) eval_state.operation_results.emplace_back(apply_operation(eval_state, op)); @@ -285,7 +314,7 @@ processed_transaction database::push_proposal(const proposal_object& proposal) { _applied_ops.resize( old_applied_ops_size ); } - elog( "e", ("e",e.to_detail_string() ) ); + wlog( "${e}", ("e",e.to_detail_string() ) ); throw; } @@ -321,17 +350,6 @@ signed_block database::_generate_block( witness_id_type scheduled_witness = get_scheduled_witness( slot_num ); FC_ASSERT( scheduled_witness == witness_id ); - const auto& witness_obj = witness_id(*this); - - if( !(skip & skip_witness_signature) ) - FC_ASSERT( witness_obj.signing_key == block_signing_private_key.get_public_key() ); - - static const size_t max_block_header_size = fc::raw::pack_size( signed_block_header() ) + 4; - auto maximum_block_size = get_global_properties().parameters.maximum_block_size; - size_t total_block_size = max_block_header_size; - - signed_block pending_block; - // // The following code throws away existing pending_tx_session and // rebuilds it by re-applying pending transactions. @@ -343,17 +361,40 @@ signed_block database::_generate_block( // the value of the "when" variable is known, which means we need to // re-apply pending transactions in this method. // + + // pop pending state (reset to head block state) _pending_tx_session.reset(); + + // Check witness signing key + if( !(skip & skip_witness_signature) ) + { + // Note: if this check failed (which won't happen in normal situations), + // we would have temporarily broken the invariant that + // _pending_tx_session is the result of applying _pending_tx. + // In this case, when the node received a new block, + // the push_block() call will re-create the _pending_tx_session. + FC_ASSERT( witness_id(*this).signing_key == block_signing_private_key.get_public_key() ); + } + + static const size_t max_partial_block_header_size = fc::raw::pack_size( signed_block_header() ) + - fc::raw::pack_size( witness_id_type() ) // witness_id + + 3; // max space to store size of transactions (out of block header), + // +3 means 3*7=21 bits so it's practically safe + const size_t max_block_header_size = max_partial_block_header_size + fc::raw::pack_size( witness_id ); + auto maximum_block_size = get_global_properties().parameters.maximum_block_size; + size_t total_block_size = max_block_header_size; + + signed_block pending_block; + _pending_tx_session = _undo_db.start_undo_session(); uint64_t postponed_tx_count = 0; - // pop pending state (reset to head block state) for( const processed_transaction& tx : _pending_tx ) { size_t new_total_size = total_block_size + fc::raw::pack_size( tx ); // postpone transaction if it would make block too big - if( new_total_size >= maximum_block_size ) + if( new_total_size > maximum_block_size ) { postponed_tx_count++; continue; @@ -363,12 +404,21 @@ signed_block database::_generate_block( { auto temp_session = _undo_db.start_undo_session(); processed_transaction ptx = _apply_transaction( tx ); - temp_session.merge(); // We have to recompute pack_size(ptx) because it may be different // than pack_size(tx) (i.e. if one or more results increased // their size) - total_block_size += fc::raw::pack_size( ptx ); + new_total_size = total_block_size + fc::raw::pack_size( ptx ); + // postpone transaction if it would make block too big + if( new_total_size > maximum_block_size ) + { + postponed_tx_count++; + continue; + } + + temp_session.merge(); + + total_block_size = new_total_size; pending_block.transactions.push_back( ptx ); } catch ( const fc::exception& e ) @@ -399,13 +449,7 @@ signed_block database::_generate_block( if( !(skip & skip_witness_signature) ) pending_block.sign( block_signing_private_key ); - // TODO: Move this to _push_block() so session is restored. - if( !(skip & skip_block_size_check) ) - { - FC_ASSERT( fc::raw::pack_size(pending_block) <= get_global_properties().parameters.maximum_block_size ); - } - - push_block( pending_block, skip ); + push_block( pending_block, skip | skip_transaction_signatures ); // skip authority check when pushing self-generated blocks return pending_block; } FC_CAPTURE_AND_RETHROW( (witness_id) ) } @@ -417,16 +461,17 @@ signed_block database::_generate_block( void database::pop_block() { try { _pending_tx_session.reset(); - auto head_id = head_block_id(); - optional head_block = fetch_block_by_id( head_id ); - GRAPHENE_ASSERT( head_block.valid(), pop_empty_chain, "there are no blocks to pop" ); - - _fork_db.pop_block(); - _block_id_to_block.remove( head_id ); + auto fork_db_head = _fork_db.head(); + FC_ASSERT( fork_db_head, "Trying to pop() from empty fork database!?" ); + if( fork_db_head->id == head_block_id() ) + _fork_db.pop_block(); + else + { + fork_db_head = _fork_db.fetch_block( head_block_id() ); + FC_ASSERT( fork_db_head, "Trying to pop() block that's not in fork database!?" ); + } pop_undo(); - - _popped_tx.insert( _popped_tx.begin(), head_block->transactions.begin(), head_block->transactions.end() ); - + _popped_tx.insert( _popped_tx.begin(), fork_db_head->data.transactions.begin(), fork_db_head->data.transactions.end() ); } FC_CAPTURE_AND_RETHROW() } void database::clear_pending() @@ -490,16 +535,28 @@ void database::_apply_block( const signed_block& next_block ) uint32_t skip = get_node_properties().skip_flags; _applied_ops.clear(); - FC_ASSERT( (skip & skip_merkle_check) || next_block.transaction_merkle_root == next_block.calculate_merkle_root(), "", ("next_block.transaction_merkle_root",next_block.transaction_merkle_root)("calc",next_block.calculate_merkle_root())("next_block",next_block)("id",next_block.id()) ); + if( !(skip & skip_block_size_check) ) + { + FC_ASSERT( fc::raw::pack_size(next_block) <= get_global_properties().parameters.maximum_block_size ); + } + + FC_ASSERT( (skip & skip_merkle_check) || next_block.transaction_merkle_root == next_block.calculate_merkle_root(), + "", + ("next_block.transaction_merkle_root",next_block.transaction_merkle_root) + ("calc",next_block.calculate_merkle_root()) + ("next_block",next_block) + ("id",next_block.id()) ); const witness_object& signing_witness = validate_block_header(skip, next_block); const auto& global_props = get_global_properties(); - const auto& dynamic_global_props = get(dynamic_global_property_id_type()); + const auto& dynamic_global_props = get_dynamic_global_properties(); bool maint_needed = (dynamic_global_props.next_maintenance_time <= next_block.timestamp); _current_block_num = next_block_num; _current_trx_in_block = 0; + _issue_453_affected_assets.clear(); + for( const auto& trx : next_block.transactions ) { /* We do not need to push the undo state for each transaction @@ -512,7 +569,8 @@ void database::_apply_block( const signed_block& next_block ) ++_current_trx_in_block; } - update_global_dynamic_data(next_block); + const uint32_t missed = update_witness_missed_blocks( next_block ); + update_global_dynamic_data( next_block, missed ); update_signing_witness(signing_witness, next_block); update_last_irreversible_block(); @@ -524,7 +582,8 @@ void database::_apply_block( const signed_block& next_block ) clear_expired_transactions(); clear_expired_proposals(); clear_expired_orders(); - update_expired_feeds(); + update_expired_feeds(); // this will update expired feeds and some core exchange rates + update_core_exchange_rates(); // this will update remaining core exchange rates update_withdraw_permissions(); // n.b., update_maintenance_flag() happens this late @@ -538,30 +597,13 @@ void database::_apply_block( const signed_block& next_block ) apply_debug_updates(); // notify observers that the block has been applied - applied_block( next_block ); //emit + notify_applied_block( next_block ); //emit _applied_ops.clear(); notify_changed_objects(); } FC_CAPTURE_AND_RETHROW( (next_block.block_num()) ) } -void database::notify_changed_objects() -{ try { - if( _undo_db.enabled() ) - { - const auto& head_undo = _undo_db.head(); - vector changed_ids; changed_ids.reserve(head_undo.old_values.size()); - for( const auto& item : head_undo.old_values ) changed_ids.push_back(item.first); - for( const auto& item : head_undo.new_ids ) changed_ids.push_back(item); - vector removed; - removed.reserve( head_undo.removed.size() ); - for( const auto& item : head_undo.removed ) - { - changed_ids.push_back( item.first ); - removed.emplace_back( item.second.get() ); - } - changed_objects(changed_ids); - } -} FC_CAPTURE_AND_RETHROW() } + processed_transaction database::apply_transaction(const signed_transaction& trx, uint32_t skip) { @@ -577,19 +619,17 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx { try { uint32_t skip = get_node_properties().skip_flags; - if( true || !(skip&skip_validate) ) /* issue #505 explains why this skip_flag is disabled */ - trx.validate(); + trx.validate(); auto& trx_idx = get_mutable_index_type(); const chain_id_type& chain_id = get_chain_id(); - auto trx_id = trx.id(); - FC_ASSERT( (skip & skip_transaction_dupe_check) || - trx_idx.indices().get().find(trx_id) == trx_idx.indices().get().end() ); + if( !(skip & skip_transaction_dupe_check) ) + FC_ASSERT( trx_idx.indices().get().find(trx.id()) == trx_idx.indices().get().end() ); transaction_evaluation_state eval_state(this); const chain_parameters& chain_parameters = get_global_properties().parameters; eval_state._trx = &trx; - if( !(skip & (skip_transaction_signatures | skip_authority_check) ) ) + if( !(skip & skip_transaction_signatures) ) { auto get_active = [&]( account_id_type id ) { return &id(*this).active; }; auto get_owner = [&]( account_id_type id ) { return &id(*this).owner; }; @@ -618,8 +658,8 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx //Insert transaction into unique transactions database. if( !(skip & skip_transaction_dupe_check) ) { - create([&](transaction_object& transaction) { - transaction.trx_id = trx_id; + create([&trx](transaction_object& transaction) { + transaction.trx_id = trx.id(); transaction.trx = trx; }); } @@ -636,11 +676,6 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx } ptrx.operation_results = std::move(eval_state.operation_results); - //Make sure the temp account has no non-zero balances - const auto& index = get_index_type().indices().get(); - auto range = index.equal_range( boost::make_tuple( GRAPHENE_TEMP_ACCOUNT ) ); - std::for_each(range.first, range.second, [](const account_balance_object& b) { FC_ASSERT(b.balance == 0); }); - return ptrx; } FC_CAPTURE_AND_RETHROW( (trx) ) } @@ -648,18 +683,15 @@ operation_result database::apply_operation(transaction_evaluation_state& eval_st { try { int i_which = op.which(); uint64_t u_which = uint64_t( i_which ); - if( i_which < 0 ) - assert( "Negative operation tag" && false ); - if( u_which >= _operation_evaluators.size() ) - assert( "No registered evaluator for this operation" && false ); + FC_ASSERT( i_which >= 0, "Negative operation tag in operation ${op}", ("op",op) ); + FC_ASSERT( u_which < _operation_evaluators.size(), "No registered evaluator for operation ${op}", ("op",op) ); unique_ptr& eval = _operation_evaluators[ u_which ]; - if( !eval ) - assert( "No registered evaluator for this operation" && false ); + FC_ASSERT( eval, "No registered evaluator for operation ${op}", ("op",op) ); auto op_id = push_applied_operation( op ); auto result = eval->evaluate( eval_state, op, true ); set_applied_operation_result( op_id, result ); return result; -} FC_CAPTURE_AND_RETHROW( ) } +} FC_CAPTURE_AND_RETHROW( (op) ) } const witness_object& database::validate_block_header( uint32_t skip, const signed_block& next_block )const { @@ -703,4 +735,65 @@ bool database::before_last_checkpoint()const return (_checkpoints.size() > 0) && (_checkpoints.rbegin()->first >= head_block_num()); } + +static const uint32_t skip_expensive = database::skip_transaction_signatures | database::skip_witness_signature + | database::skip_merkle_check | database::skip_transaction_dupe_check; + +template +void database::_precompute_parallel( const Trx* trx, const size_t count, const uint32_t skip )const +{ + for( size_t i = 0; i < count; ++i, ++trx ) + { + trx->validate(); // TODO - parallelize wrt confidential operations + if( !(skip&skip_transaction_dupe_check) ) + trx->id(); + if( !(skip&skip_transaction_signatures) ) + trx->get_signature_keys( get_chain_id() ); + } +} + +fc::future database::precompute_parallel( const signed_block& block, const uint32_t skip )const +{ try { + std::vector> workers; + if( !block.transactions.empty() ) + { + if( (skip & skip_expensive) == skip_expensive ) + _precompute_parallel( &block.transactions[0], block.transactions.size(), skip ); + else + { + uint32_t chunks = fc::asio::default_io_service_scope::get_num_threads(); + uint32_t chunk_size = ( block.transactions.size() + chunks - 1 ) / chunks; + workers.reserve( chunks + 1 ); + for( size_t base = 0; base < block.transactions.size(); base += chunk_size ) + workers.push_back( fc::do_parallel( [this,&block,base,chunk_size,skip] () { + _precompute_parallel( &block.transactions[base], + base + chunk_size < block.transactions.size() ? chunk_size : block.transactions.size() - base, + skip ); + }) ); + } + } + + if( !(skip&skip_witness_signature) ) + workers.push_back( fc::do_parallel( [&block] () { block.signee(); } ) ); + if( !(skip&skip_merkle_check) ) + block.calculate_merkle_root(); + block.id(); + + if( workers.empty() ) + return fc::future< void >( fc::promise< void >::ptr( new fc::promise< void >( true ) ) ); + + auto first = workers.begin(); + auto worker = first; + while( ++worker != workers.end() ) + worker->wait(); + return *first; +} FC_LOG_AND_RETHROW() } + +fc::future database::precompute_parallel( const precomputable_transaction& trx )const +{ + return fc::do_parallel([this,&trx] () { + _precompute_parallel( &trx, 1, skip_nothing ); + }); +} + } } diff --git a/libraries/chain/db_debug.cpp b/libraries/chain/db_debug.cpp index aa91fd449a..e6ab40fb70 100644 --- a/libraries/chain/db_debug.cpp +++ b/libraries/chain/db_debug.cpp @@ -29,6 +29,7 @@ #include #include #include +#include namespace graphene { namespace chain { @@ -42,7 +43,9 @@ void database::debug_dump() const asset_dynamic_data_object& core_asset_data = db.get_core_asset().dynamic_asset_data_id(db); const auto& balance_index = db.get_index_type().indices(); - const simple_index& statistics_index = db.get_index_type>(); + const auto& statistics_index = db.get_index_type().indices(); + const auto& bids = db.get_index_type().indices(); + const auto& settle_index = db.get_index_type().indices(); map total_balances; map total_debts; share_type core_in_orders; @@ -53,11 +56,21 @@ void database::debug_dump() // idump(("balance")(a)); total_balances[a.asset_type] += a.balance; } + for( const force_settlement_object& s : settle_index ) + { + total_balances[s.balance.asset_id] += s.balance.amount; + } + for( const vesting_balance_object& vbo : db.get_index_type< vesting_balance_index >().indices() ) + total_balances[ vbo.balance.asset_id ] += vbo.balance.amount; + for( const fba_accumulator_object& fba : db.get_index_type< simple_index< fba_accumulator_object > >() ) + total_balances[ asset_id_type() ] += fba.accumulated_fba_fees; for( const account_statistics_object& s : statistics_index ) { // idump(("statistics")(s)); reported_core_in_orders += s.total_core_in_orders; } + for( const collateral_bid_object& b : bids ) + total_balances[b.inv_swan_price.base.asset_id] += b.inv_swan_price.base.amount; for( const limit_order_object& o : db.get_index_type().indices() ) { // idump(("limit_order")(o)); @@ -82,7 +95,9 @@ void database::debug_dump() if( total_balances[asset_id_type()].value != core_asset_data.current_supply.value ) { - edump( (total_balances[asset_id_type()].value)(core_asset_data.current_supply.value )); + FC_THROW( "computed balance of CORE mismatch", + ("computed value",total_balances[asset_id_type()].value) + ("current supply",core_asset_data.current_supply.value) ); } @@ -143,25 +158,19 @@ void debug_apply_update( database& db, const fc::variant_object& vo ) switch( action ) { case db_action_create: - /* - idx.create( [&]( object& obj ) - { - idx.object_from_variant( vo, obj ); - } ); - */ FC_ASSERT( false ); break; case db_action_write: db.modify( db.get_object( oid ), [&]( object& obj ) { idx.object_default( obj ); - idx.object_from_variant( vo, obj ); + idx.object_from_variant( vo, obj, GRAPHENE_MAX_NESTED_OBJECTS ); } ); break; case db_action_update: db.modify( db.get_object( oid ), [&]( object& obj ) { - idx.object_from_variant( vo, obj ); + idx.object_from_variant( vo, obj, GRAPHENE_MAX_NESTED_OBJECTS ); } ); break; case db_action_delete: diff --git a/libraries/chain/db_getter.cpp b/libraries/chain/db_getter.cpp index 4af2df3e10..eea3e7ebb8 100644 --- a/libraries/chain/db_getter.cpp +++ b/libraries/chain/db_getter.cpp @@ -28,48 +28,51 @@ #include #include -#include - namespace graphene { namespace chain { const asset_object& database::get_core_asset() const { - return get(asset_id_type()); + return *_p_core_asset_obj; +} + +const asset_dynamic_data_object& database::get_core_dynamic_data() const +{ + return *_p_core_dynamic_data_obj; } const global_property_object& database::get_global_properties()const { - return get( global_property_id_type() ); + return *_p_global_prop_obj; } const chain_property_object& database::get_chain_properties()const { - return get( chain_property_id_type() ); + return *_p_chain_property_obj; } -const dynamic_global_property_object&database::get_dynamic_global_properties() const +const dynamic_global_property_object& database::get_dynamic_global_properties() const { - return get( dynamic_global_property_id_type() ); + return *_p_dyn_global_prop_obj; } const fee_schedule& database::current_fee_schedule()const { - return get_global_properties().parameters.current_fees; + return *get_global_properties().parameters.current_fees; } time_point_sec database::head_block_time()const { - return get( dynamic_global_property_id_type() ).time; + return get_dynamic_global_properties().time; } uint32_t database::head_block_num()const { - return get( dynamic_global_property_id_type() ).head_block_number; + return get_dynamic_global_properties().head_block_number; } block_id_type database::head_block_id()const { - return get( dynamic_global_property_id_type() ).head_block_id; + return get_dynamic_global_properties().head_block_id; } decltype( chain_parameters::block_interval ) database::block_interval( )const @@ -94,8 +97,27 @@ node_property_object& database::node_properties() uint32_t database::last_non_undoable_block_num() const { - return head_block_num() - _undo_db.size(); + //see https://github.com/bitshares/bitshares-core/issues/377 + /* + There is a case when a value of undo_db.size() is greater then head_block_num(), + and as result we get a wrong value for last_non_undoable_block_num. + To resolve it we should take into account a number of active_sessions in calculations of + last_non_undoable_block_num (active sessions are related to a new block which is under generation). + */ + return head_block_num() - ( _undo_db.size() - _undo_db.active_sessions() ); } +const account_statistics_object& database::get_account_stats_by_owner( account_id_type owner )const +{ + auto& idx = get_index_type().indices().get(); + auto itr = idx.find( owner ); + FC_ASSERT( itr != idx.end(), "Can not find account statistics object for owner ${a}", ("a",owner) ); + return *itr; +} + +const witness_schedule_object& database::get_witness_schedule_object()const +{ + return *_p_witness_schedule_obj; +} } } diff --git a/libraries/chain/db_init.cpp b/libraries/chain/db_init.cpp index 914b3fa8a3..8ffa5e8a5c 100644 --- a/libraries/chain/db_init.cpp +++ b/libraries/chain/db_init.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -64,7 +64,6 @@ #include -#include #include #include @@ -150,6 +149,7 @@ void database::initialize_evaluators() register_evaluator(); register_evaluator(); register_evaluator(); + register_evaluator(); register_evaluator(); register_evaluator(); register_evaluator(); @@ -171,6 +171,8 @@ void database::initialize_evaluators() register_evaluator(); register_evaluator(); register_evaluator(); + register_evaluator(); + register_evaluator(); } void database::initialize_indexes() @@ -179,15 +181,16 @@ void database::initialize_indexes() _undo_db.set_max_size( GRAPHENE_MIN_UNDO_HISTORY ); //Protocol object indexes - add_index< primary_index >(); + add_index< primary_index >(); // 8192 assets per chunk add_index< primary_index >(); + add_index< primary_index >(); - auto acnt_index = add_index< primary_index >(); + auto acnt_index = add_index< primary_index >(); // ~1 million accounts per chunk acnt_index->add_secondary_index(); acnt_index->add_secondary_index(); - add_index< primary_index >(); - add_index< primary_index >(); + add_index< primary_index >(); // 256 members per chunk + add_index< primary_index >(); // 1024 witnesses per chunk add_index< primary_index >(); add_index< primary_index >(); @@ -202,18 +205,22 @@ void database::initialize_indexes() //Implementation object indexes add_index< primary_index >(); - add_index< primary_index >(); - add_index< primary_index >(); + + auto bal_idx = add_index< primary_index >(); + bal_idx->add_secondary_index(); + + add_index< primary_index >(); // 8192 add_index< primary_index> >(); add_index< primary_index> >(); - add_index< primary_index> >(); + add_index< primary_index >(); // 1 Mi add_index< primary_index> >(); - add_index< primary_index> >(); + add_index< primary_index> >(); add_index< primary_index > >(); add_index< primary_index > >(); add_index< primary_index > >(); add_index< primary_index< special_authority_index > >(); add_index< primary_index< buyback_index > >(); + add_index< primary_index >(); add_index< primary_index< simple_index< fba_accumulator_object > > >(); } @@ -231,7 +238,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) _undo_db.disable(); struct auth_inhibitor { auth_inhibitor(database& db) : db(db), old_flags(db.node_properties().skip_flags) - { db.node_properties().skip_flags |= skip_authority_check; } + { db.node_properties().skip_flags |= skip_transaction_signatures; } ~auth_inhibitor() { db.node_properties().skip_flags = old_flags; } private: @@ -241,9 +248,6 @@ void database::init_genesis(const genesis_state_type& genesis_state) transaction_evaluation_state genesis_eval_state(this); - flat_index& bsi = get_mutable_index_type< flat_index >(); - bsi.resize(0xffff+1); - // Create blockchain accounts fc::ecc::private_key null_private_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key"))); create([](account_balance_object& b) { @@ -257,12 +261,19 @@ void database::init_genesis(const genesis_state_type& genesis_state) n.owner.weight_threshold = 1; n.active.weight_threshold = 1; n.name = "committee-account"; - n.statistics = create( [&](account_statistics_object& s){ s.owner = n.id; }).id; + n.statistics = create( [&n](account_statistics_object& s){ + s.owner = n.id; + s.name = n.name; + s.core_in_balance = GRAPHENE_MAX_SHARE_SUPPLY; + }).id; }); FC_ASSERT(committee_account.get_id() == GRAPHENE_COMMITTEE_ACCOUNT); FC_ASSERT(create([this](account_object& a) { a.name = "witness-account"; - a.statistics = create([&](account_statistics_object& s){s.owner = a.id;}).id; + a.statistics = create([&a](account_statistics_object& s){ + s.owner = a.id; + s.name = a.name; + }).id; a.owner.weight_threshold = 1; a.active.weight_threshold = 1; a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_WITNESS_ACCOUNT; @@ -272,7 +283,10 @@ void database::init_genesis(const genesis_state_type& genesis_state) }).get_id() == GRAPHENE_WITNESS_ACCOUNT); FC_ASSERT(create([this](account_object& a) { a.name = "relaxed-committee-account"; - a.statistics = create([&](account_statistics_object& s){s.owner = a.id;}).id; + a.statistics = create([&a](account_statistics_object& s){ + s.owner = a.id; + s.name = a.name; + }).id; a.owner.weight_threshold = 1; a.active.weight_threshold = 1; a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_RELAXED_COMMITTEE_ACCOUNT; @@ -282,7 +296,10 @@ void database::init_genesis(const genesis_state_type& genesis_state) }).get_id() == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT); FC_ASSERT(create([this](account_object& a) { a.name = "null-account"; - a.statistics = create([&](account_statistics_object& s){s.owner = a.id;}).id; + a.statistics = create([&a](account_statistics_object& s){ + s.owner = a.id; + s.name = a.name; + }).id; a.owner.weight_threshold = 1; a.active.weight_threshold = 1; a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_NULL_ACCOUNT; @@ -292,7 +309,10 @@ void database::init_genesis(const genesis_state_type& genesis_state) }).get_id() == GRAPHENE_NULL_ACCOUNT); FC_ASSERT(create([this](account_object& a) { a.name = "temp-account"; - a.statistics = create([&](account_statistics_object& s){s.owner = a.id;}).id; + a.statistics = create([&a](account_statistics_object& s){ + s.owner = a.id; + s.name = a.name; + }).id; a.owner.weight_threshold = 0; a.active.weight_threshold = 0; a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_TEMP_ACCOUNT; @@ -302,7 +322,10 @@ void database::init_genesis(const genesis_state_type& genesis_state) }).get_id() == GRAPHENE_TEMP_ACCOUNT); FC_ASSERT(create([this](account_object& a) { a.name = "proxy-to-self"; - a.statistics = create([&](account_statistics_object& s){s.owner = a.id;}).id; + a.statistics = create([&a](account_statistics_object& s){ + s.owner = a.id; + s.name = a.name; + }).id; a.owner.weight_threshold = 1; a.active.weight_threshold = 1; a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_NULL_ACCOUNT; @@ -317,9 +340,12 @@ void database::init_genesis(const genesis_state_type& genesis_state) uint64_t id = get_index().get_next_id().instance(); if( id >= genesis_state.immutable_parameters.num_special_accounts ) break; - const account_object& acct = create([&](account_object& a) { + const account_object& acct = create([this,id](account_object& a) { a.name = "special-account-" + std::to_string(id); - a.statistics = create([&](account_statistics_object& s){s.owner = a.id;}).id; + a.statistics = create([&a](account_statistics_object& s){ + s.owner = a.id; + s.name = a.name; + }).id; a.owner.weight_threshold = 1; a.active.weight_threshold = 1; a.registrar = a.lifetime_referrer = a.referrer = account_id_type(id); @@ -333,11 +359,11 @@ void database::init_genesis(const genesis_state_type& genesis_state) // Create core asset const asset_dynamic_data_object& dyn_asset = - create([&](asset_dynamic_data_object& a) { + create([](asset_dynamic_data_object& a) { a.current_supply = GRAPHENE_MAX_SHARE_SUPPLY; }); const asset_object& core_asset = - create( [&]( asset_object& a ) { + create( [&genesis_state,&dyn_asset]( asset_object& a ) { a.symbol = GRAPHENE_SYMBOL; a.options.max_supply = genesis_state.max_core_supply; a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS; @@ -350,8 +376,11 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.options.core_exchange_rate.quote.asset_id = asset_id_type(0); a.dynamic_asset_data_id = dyn_asset.id; }); - assert( asset_id_type(core_asset.id) == asset().asset_id ); - assert( get_balance(account_id_type(), asset_id_type()) == asset(dyn_asset.current_supply) ); + FC_ASSERT( dyn_asset.id == asset_dynamic_data_id_type() ); + FC_ASSERT( asset_id_type(core_asset.id) == asset().asset_id ); + FC_ASSERT( get_balance(account_id_type(), asset_id_type()) == asset(dyn_asset.current_supply) ); + _p_core_asset_obj = &core_asset; + _p_core_dynamic_data_obj = &dyn_asset; // Create more special assets while( true ) { @@ -359,10 +388,10 @@ void database::init_genesis(const genesis_state_type& genesis_state) if( id >= genesis_state.immutable_parameters.num_special_assets ) break; const asset_dynamic_data_object& dyn_asset = - create([&](asset_dynamic_data_object& a) { + create([](asset_dynamic_data_object& a) { a.current_supply = 0; }); - const asset_object& asset_obj = create( [&]( asset_object& a ) { + const asset_object& asset_obj = create( [id,&dyn_asset]( asset_object& a ) { a.symbol = "SPECIAL" + std::to_string( id ); a.options.max_supply = 0; a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS; @@ -382,14 +411,14 @@ void database::init_genesis(const genesis_state_type& genesis_state) chain_id_type chain_id = genesis_state.compute_chain_id(); // Create global properties - create([&](global_property_object& p) { + _p_global_prop_obj = & create([&genesis_state](global_property_object& p) { p.parameters = genesis_state.initial_parameters; // Set fees to zero initially, so that genesis initialization needs not pay them // We'll fix it at the end of the function p.parameters.current_fees->zero_all_fees(); }); - create([&](dynamic_global_property_object& p) { + _p_dyn_global_prop_obj = & create([&genesis_state](dynamic_global_property_object& p) { p.time = genesis_state.initial_timestamp; p.dynamic_flags = 0; p.witness_budget = 0; @@ -399,12 +428,13 @@ void database::init_genesis(const genesis_state_type& genesis_state) FC_ASSERT( (genesis_state.immutable_parameters.min_witness_count & 1) == 1, "min_witness_count must be odd" ); FC_ASSERT( (genesis_state.immutable_parameters.min_committee_member_count & 1) == 1, "min_committee_member_count must be odd" ); - create([&](chain_property_object& p) + _p_chain_property_obj = & create([chain_id,&genesis_state](chain_property_object& p) { p.chain_id = chain_id; p.immutable_parameters = genesis_state.immutable_parameters; } ); - create([&](block_summary_object&) {}); + for (uint32_t i = 0; i <= 0x10000; i++) + create( [&]( block_summary_object&) {}); // Create initial accounts for( const auto& account : genesis_state.initial_accounts ) @@ -448,11 +478,6 @@ void database::init_genesis(const genesis_state_type& genesis_state) const auto& assets_by_symbol = get_index_type().indices().get(); const auto get_asset_id = [&assets_by_symbol](const string& symbol) { auto itr = assets_by_symbol.find(symbol); - - // TODO: This is temporary for handling BTS snapshot - if( symbol == "BTS" ) - itr = assets_by_symbol.find(GRAPHENE_SYMBOL); - FC_ASSERT(itr != assets_by_symbol.end(), "Unable to find asset '${sym}'. Did you forget to add a record for it to initial_assets?", ("sym", symbol)); @@ -484,9 +509,9 @@ void database::init_genesis(const genesis_state_type& genesis_state) cop.active = cop.owner; account_id_type owner_account_id = apply_operation(genesis_eval_state, cop).get(); - modify( owner_account_id(*this).statistics(*this), [&]( account_statistics_object& o ) { - o.total_core_in_orders = collateral_rec.collateral; - }); + modify( owner_account_id(*this).statistics(*this), [&collateral_rec]( account_statistics_object& o ) { + o.total_core_in_orders = collateral_rec.collateral; + }); create([&](call_order_object& c) { c.borrower = owner_account_id; @@ -502,13 +527,14 @@ void database::init_genesis(const genesis_state_type& genesis_state) ++collateral_holder_number; } - bitasset_data_id = create([&](asset_bitasset_data_object& b) { + bitasset_data_id = create([&core_asset,new_asset_id](asset_bitasset_data_object& b) { b.options.short_backing_asset = core_asset.id; b.options.minimum_feeds = GRAPHENE_DEFAULT_MINIMUM_FEEDS; + b.asset_id = new_asset_id; }).id; } - dynamic_data_id = create([&](asset_dynamic_data_object& d) { + dynamic_data_id = create([&asset](asset_dynamic_data_object& d) { d.accumulated_fees = asset.accumulated_fees; }).id; @@ -522,7 +548,8 @@ void database::init_genesis(const genesis_state_type& genesis_state) a.issuer = get_account_id(issuer_name); a.options.max_supply = asset.max_supply; a.options.flags = witness_fed_asset; - a.options.issuer_permissions = charge_market_fee | global_settle | witness_fed_asset | committee_fed_asset; + a.options.issuer_permissions = charge_market_fee | override_authority | white_list | transfer_restricted | disable_confidential | + ( asset.is_bitasset ? disable_force_settle | global_settle | witness_fed_asset | committee_fed_asset : 0 ); a.dynamic_asset_data_id = dynamic_data_id; a.bitasset_data_id = bitasset_data_id; }); @@ -533,7 +560,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) for( const auto& handout : genesis_state.initial_balances ) { const auto asset_id = get_asset_id(handout.asset_symbol); - create([&handout,&get_asset_id,total_allocation,asset_id](balance_object& b) { + create([&handout,total_allocation,asset_id](balance_object& b) { b.balance = asset(handout.amount, asset_id); b.owner = handout.owner; }); @@ -588,6 +615,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) elog( "Genesis for asset ${aname} is not balanced\n" " Debt is ${debt}\n" " Supply is ${supply}\n", + ("aname", it->symbol) ("debt", debt_itr->second) ("supply", supply_itr->second) ); @@ -648,7 +676,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) }); // Set active witnesses - modify(get_global_properties(), [&](global_property_object& p) { + modify(get_global_properties(), [&genesis_state](global_property_object& p) { for( uint32_t i = 1; i <= genesis_state.initial_active_witnesses; ++i ) { p.active_witnesses.insert(witness_id_type(i)); @@ -661,7 +689,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) }); // Create witness scheduler - create([&]( witness_schedule_object& wso ) + _p_witness_schedule_obj = & create([this]( witness_schedule_object& wso ) { for( const witness_id_type& wid : get_global_properties().active_witnesses ) wso.current_shuffled_witnesses.push_back( wid ); @@ -697,7 +725,7 @@ void database::init_genesis(const genesis_state_type& genesis_state) FC_ASSERT( get_index().get_next_id() == fba_accumulator_id_type( fba_accumulator_id_count ) ); - debug_dump(); + //debug_dump(); _undo_db.enable(); } FC_CAPTURE_AND_RETHROW() } diff --git a/libraries/chain/db_maint.cpp b/libraries/chain/db_maint.cpp index d515a961b2..a0ddba36f8 100644 --- a/libraries/chain/db_maint.cpp +++ b/libraries/chain/db_maint.cpp @@ -24,7 +24,6 @@ #include -#include #include #include @@ -72,12 +71,45 @@ vector> database::sort return refs; } -template -void database::perform_account_maintenance(std::tuple helpers) +template +void database::perform_account_maintenance(Type tally_helper) { - const auto& idx = get_index_type().indices().get(); - for( const account_object& a : idx ) - detail::for_each(helpers, a, detail::gen_seq()); + const auto& bal_idx = get_index_type< account_balance_index >().indices().get< by_maintenance_flag >(); + if( bal_idx.begin() != bal_idx.end() ) + { + auto bal_itr = bal_idx.rbegin(); + while( bal_itr->maintenance_flag ) + { + const account_balance_object& bal_obj = *bal_itr; + + modify( get_account_stats_by_owner( bal_obj.owner ), [&bal_obj](account_statistics_object& aso) { + aso.core_in_balance = bal_obj.balance; + }); + + modify( bal_obj, []( account_balance_object& abo ) { + abo.maintenance_flag = false; + }); + + bal_itr = bal_idx.rbegin(); + } + } + + const auto& stats_idx = get_index_type< account_stats_index >().indices().get< by_maintenance_seq >(); + auto stats_itr = stats_idx.lower_bound( true ); + + while( stats_itr != stats_idx.end() ) + { + const account_statistics_object& acc_stat = *stats_itr; + const account_object& acc_obj = acc_stat.owner( *this ); + ++stats_itr; + + if( acc_stat.has_some_core_voting() ) + tally_helper( acc_obj, acc_stat ); + + if( acc_stat.has_pending_fees() ) + acc_stat.process_fees( acc_obj, *this ); + } + } /// @brief A visitor for @ref worker_type which calls pay_worker on the worker within @@ -98,14 +130,17 @@ struct worker_pay_visitor worker.pay_worker(pay, db); } }; + void database::update_worker_votes() { - auto& idx = get_index_type(); - auto itr = idx.indices().get().begin(); + const auto& idx = get_index_type().indices().get(); + auto itr = idx.begin(); + auto itr_end = idx.end(); bool allow_negative_votes = (head_block_time() < HARDFORK_607_TIME); - while( itr != idx.indices().get().end() ) + while( itr != itr_end ) { - modify( *itr, [&]( worker_object& obj ){ + modify( *itr, [this,allow_negative_votes]( worker_object& obj ) + { obj.total_votes_for = _vote_tally_buffer[obj.vote_for]; obj.total_votes_against = allow_negative_votes ? _vote_tally_buffer[obj.vote_against] : 0; }); @@ -115,18 +150,19 @@ void database::update_worker_votes() void database::pay_workers( share_type& budget ) { + const auto head_time = head_block_time(); // ilog("Processing payroll! Available budget is ${b}", ("b", budget)); vector> active_workers; - get_index_type().inspect_all_objects([this, &active_workers](const object& o) { + // TODO optimization: add by_expiration index to avoid iterating through all objects + get_index_type().inspect_all_objects([head_time, &active_workers](const object& o) { const worker_object& w = static_cast(o); - auto now = head_block_time(); - if( w.is_active(now) && w.approving_stake() > 0 ) + if( w.is_active(head_time) && w.approving_stake() > 0 ) active_workers.emplace_back(w); }); // worker with more votes is preferred // if two workers exactly tie for votes, worker with lower ID is preferred - std::sort(active_workers.begin(), active_workers.end(), [this](const worker_object& wa, const worker_object& wb) { + std::sort(active_workers.begin(), active_workers.end(), [](const worker_object& wa, const worker_object& wb) { share_type wa_vote = wa.approving_stake(); share_type wb_vote = wb.approving_stake(); if( wa_vote != wb_vote ) @@ -134,17 +170,22 @@ void database::pay_workers( share_type& budget ) return wa.id < wb.id; }); + const auto last_budget_time = get_dynamic_global_properties().last_budget_time; + const auto passed_time_ms = head_time - last_budget_time; + const auto passed_time_count = passed_time_ms.count(); + const auto day_count = fc::days(1).count(); for( uint32_t i = 0; i < active_workers.size() && budget > 0; ++i ) { const worker_object& active_worker = active_workers[i]; share_type requested_pay = active_worker.daily_pay; - if( head_block_time() - get_dynamic_global_properties().last_budget_time != fc::days(1) ) - { - fc::uint128 pay(requested_pay.value); - pay *= (head_block_time() - get_dynamic_global_properties().last_budget_time).count(); - pay /= fc::days(1).count(); - requested_pay = pay.to_uint64(); - } + + // Note: if there is a good chance that passed_time_count == day_count, + // for better performance, can avoid the 128 bit calculation by adding a check. + // Since it's not the case on BitShares mainnet, we're not using a check here. + fc::uint128 pay(requested_pay.value); + pay *= passed_time_count; + pay /= day_count; + requested_pay = pay.to_uint64(); share_type actual_pay = std::min(budget, requested_pay); //ilog(" ==> Paying ${a} to worker ${w}", ("w", active_worker.id)("a", actual_pay)); @@ -177,21 +218,37 @@ void database::update_active_witnesses() } const chain_property_object& cpo = get_chain_properties(); - auto wits = sort_votable_objects(std::max(witness_count*2+1, (size_t)cpo.immutable_parameters.min_witness_count)); + + witness_count = std::max( witness_count*2+1, (size_t)cpo.immutable_parameters.min_witness_count ); + auto wits = sort_votable_objects( witness_count ); const global_property_object& gpo = get_global_properties(); - const auto& all_witnesses = get_index_type().indices(); + auto update_witness_total_votes = [this]( const witness_object& wit ) { + modify( wit, [this]( witness_object& obj ) + { + obj.total_votes = _vote_tally_buffer[obj.vote_id]; + }); + }; - for( const witness_object& wit : all_witnesses ) + if( _track_standby_votes ) + { + const auto& all_witnesses = get_index_type().indices(); + for( const witness_object& wit : all_witnesses ) + { + update_witness_total_votes( wit ); + } + } + else { - modify( wit, [&]( witness_object& obj ){ - obj.total_votes = _vote_tally_buffer[wit.vote_id]; - }); + for( const witness_object& wit : wits ) + { + update_witness_total_votes( wit ); + } } // Update witness authority - modify( get(GRAPHENE_WITNESS_ACCOUNT), [&]( account_object& a ) + modify( get(GRAPHENE_WITNESS_ACCOUNT), [this,&wits]( account_object& a ) { if( head_block_time() < HARDFORK_533_TIME ) { @@ -229,7 +286,8 @@ void database::update_active_witnesses() } } ); - modify(gpo, [&]( global_property_object& gp ){ + modify( gpo, [&wits]( global_property_object& gp ) + { gp.active_witnesses.clear(); gp.active_witnesses.reserve(wits.size()); std::transform(wits.begin(), wits.end(), @@ -244,31 +302,54 @@ void database::update_active_witnesses() void database::update_active_committee_members() { try { assert( _committee_count_histogram_buffer.size() > 0 ); - share_type stake_target = (_total_voting_stake-_witness_count_histogram_buffer[0]) / 2; + share_type stake_target = (_total_voting_stake-_committee_count_histogram_buffer[0]) / 2; /// accounts that vote for 0 or 1 witness do not get to express an opinion on /// the number of witnesses to have (they abstain and are non-voting accounts) uint64_t stake_tally = 0; // _committee_count_histogram_buffer[0]; size_t committee_member_count = 0; if( stake_target > 0 ) + { while( (committee_member_count < _committee_count_histogram_buffer.size() - 1) - && (stake_tally <= stake_target) ) + && (stake_tally <= stake_target.value) ) + { stake_tally += _committee_count_histogram_buffer[++committee_member_count]; + } + } const chain_property_object& cpo = get_chain_properties(); - auto committee_members = sort_votable_objects(std::max(committee_member_count*2+1, (size_t)cpo.immutable_parameters.min_committee_member_count)); - for( const committee_member_object& del : committee_members ) + committee_member_count = std::max( committee_member_count*2+1, (size_t)cpo.immutable_parameters.min_committee_member_count ); + auto committee_members = sort_votable_objects( committee_member_count ); + + auto update_committee_member_total_votes = [this]( const committee_member_object& cm ) { + modify( cm, [this]( committee_member_object& obj ) + { + obj.total_votes = _vote_tally_buffer[obj.vote_id]; + }); + }; + + if( _track_standby_votes ) + { + const auto& all_committee_members = get_index_type().indices(); + for( const committee_member_object& cm : all_committee_members ) + { + update_committee_member_total_votes( cm ); + } + } + else { - modify( del, [&]( committee_member_object& obj ){ - obj.total_votes = _vote_tally_buffer[del.vote_id]; - }); + for( const committee_member_object& cm : committee_members ) + { + update_committee_member_total_votes( cm ); + } } // Update committee authorities if( !committee_members.empty() ) { - modify(get(GRAPHENE_COMMITTEE_ACCOUNT), [&](account_object& a) + const account_object& committee_account = get(GRAPHENE_COMMITTEE_ACCOUNT); + modify( committee_account, [this,&committee_members](account_object& a) { if( head_block_time() < HARDFORK_533_TIME ) { @@ -277,10 +358,10 @@ void database::update_active_committee_members() a.active.weight_threshold = 0; a.active.clear(); - for( const committee_member_object& del : committee_members ) + for( const committee_member_object& cm : committee_members ) { - weights.emplace(del.committee_member_account, _vote_tally_buffer[del.vote_id]); - total_votes += _vote_tally_buffer[del.vote_id]; + weights.emplace( cm.committee_member_account, _vote_tally_buffer[cm.vote_id] ); + total_votes += _vote_tally_buffer[cm.vote_id]; } // total_votes is 64 bits. Subtract the number of leading low bits from 64 to get the number of useful bits, @@ -304,12 +385,14 @@ void database::update_active_committee_members() vc.add( cm.committee_member_account, _vote_tally_buffer[cm.vote_id] ); vc.finish( a.active ); } - } ); - modify(get(GRAPHENE_RELAXED_COMMITTEE_ACCOUNT), [&](account_object& a) { - a.active = get(GRAPHENE_COMMITTEE_ACCOUNT).active; + }); + modify( get(GRAPHENE_RELAXED_COMMITTEE_ACCOUNT), [&committee_account](account_object& a) + { + a.active = committee_account.active; }); } - modify(get_global_properties(), [&](global_property_object& gp) { + modify( get_global_properties(), [&committee_members](global_property_object& gp) + { gp.active_committee_members.clear(); std::transform(committee_members.begin(), committee_members.end(), std::inserter(gp.active_committee_members, gp.active_committee_members.begin()), @@ -320,8 +403,8 @@ void database::update_active_committee_members() void database::initialize_budget_record( fc::time_point_sec now, budget_record& rec )const { const dynamic_global_property_object& dpo = get_dynamic_global_properties(); - const asset_object& core = asset_id_type(0)(*this); - const asset_dynamic_data_object& core_dd = core.dynamic_asset_data_id(*this); + const asset_object& core = get_core_asset(); + const asset_dynamic_data_object& core_dd = get_core_dynamic_data(); rec.from_initial_reserve = core.reserved(*this); rec.from_accumulated_fees = core_dd.accumulated_fees; @@ -356,7 +439,6 @@ void database::initialize_budget_record( fc::time_point_sec now, budget_record& // be able to use the entire reserve budget_u128 += ((uint64_t(1) << GRAPHENE_CORE_ASSET_CYCLE_RATE_BITS) - 1); budget_u128 >>= GRAPHENE_CORE_ASSET_CYCLE_RATE_BITS; - share_type budget; if( budget_u128 < reserve.value ) rec.total_budget = share_type(budget_u128.to_uint64()); else @@ -374,8 +456,7 @@ void database::process_budget() { const global_property_object& gpo = get_global_properties(); const dynamic_global_property_object& dpo = get_dynamic_global_properties(); - const asset_dynamic_data_object& core = - asset_id_type(0)(*this).dynamic_asset_data_id(*this); + const asset_dynamic_data_object& core = get_core_dynamic_data(); fc::time_point_sec now = head_block_time(); int64_t time_to_maint = (dpo.next_maintenance_time - now).to_seconds(); @@ -535,8 +616,7 @@ void split_fba_balance( if( fba.accumulated_fba_fees == 0 ) return; - const asset_object& core = asset_id_type(0)(db); - const asset_dynamic_data_object& core_dd = core.dynamic_asset_data_id(db); + const asset_dynamic_data_object& core_dd = db.get_core_dynamic_data(); if( !fba.is_configured(db) ) { @@ -612,7 +692,7 @@ void distribute_fba_balances( database& db ) void create_buyback_orders( database& db ) { const auto& bbo_idx = db.get_index_type< buyback_index >().indices().get(); - const auto& bal_idx = db.get_index_type< account_balance_index >().indices().get< by_account_asset >(); + const auto& bal_idx = db.get_index_type< primary_index< account_balance_index > >().get_secondary_index< balances_by_account_index >(); for( const buyback_object& bbo : bbo_idx ) { @@ -620,7 +700,6 @@ void create_buyback_orders( database& db ) assert( asset_to_buy.buyback_account.valid() ); const account_object& buyback_account = (*(asset_to_buy.buyback_account))(db); - asset_id_type next_asset = asset_id_type(); if( !buyback_account.allowed_assets.valid() ) { @@ -628,16 +707,11 @@ void create_buyback_orders( database& db ) continue; } - while( true ) + for( const auto& entry : bal_idx.get_account_balances( buyback_account.id ) ) { - auto it = bal_idx.lower_bound( boost::make_tuple( buyback_account.id, next_asset ) ); - if( it == bal_idx.end() ) - break; - if( it->owner != buyback_account.id ) - break; + const auto* it = entry.second; asset_id_type asset_to_sell = it->asset_type; share_type amount_to_sell = it->balance; - next_asset = asset_to_sell + 1; if( asset_to_sell == asset_to_buy.id ) continue; if( amount_to_sell == 0 ) @@ -716,6 +790,290 @@ void deprecate_annual_members( database& db ) return; } +void database::process_bids( const asset_bitasset_data_object& bad ) +{ + if( bad.is_prediction_market ) return; + if( bad.current_feed.settlement_price.is_null() ) return; + + asset_id_type to_revive_id = (asset( 0, bad.options.short_backing_asset ) * bad.settlement_price).asset_id; + const asset_object& to_revive = to_revive_id( *this ); + const asset_dynamic_data_object& bdd = to_revive.dynamic_data( *this ); + + const auto& bid_idx = get_index_type< collateral_bid_index >().indices().get(); + const auto start = bid_idx.lower_bound( boost::make_tuple( to_revive_id, price::max( bad.options.short_backing_asset, to_revive_id ), collateral_bid_id_type() ) ); + + share_type covered = 0; + auto itr = start; + while( covered < bdd.current_supply && itr != bid_idx.end() && itr->inv_swan_price.quote.asset_id == to_revive_id ) + { + const collateral_bid_object& bid = *itr; + asset debt_in_bid = bid.inv_swan_price.quote; + if( debt_in_bid.amount > bdd.current_supply ) + debt_in_bid.amount = bdd.current_supply; + asset total_collateral = debt_in_bid * bad.settlement_price; + total_collateral += bid.inv_swan_price.base; + price call_price = price::call_price( debt_in_bid, total_collateral, bad.current_feed.maintenance_collateral_ratio ); + if( ~call_price >= bad.current_feed.settlement_price ) break; + covered += debt_in_bid.amount; + ++itr; + } + if( covered < bdd.current_supply ) return; + + const auto end = itr; + share_type to_cover = bdd.current_supply; + share_type remaining_fund = bad.settlement_fund; + for( itr = start; itr != end; ) + { + const collateral_bid_object& bid = *itr; + ++itr; + asset debt_in_bid = bid.inv_swan_price.quote; + if( debt_in_bid.amount > bdd.current_supply ) + debt_in_bid.amount = bdd.current_supply; + share_type debt = debt_in_bid.amount; + share_type collateral = (debt_in_bid * bad.settlement_price).amount; + if( debt >= to_cover ) + { + debt = to_cover; + collateral = remaining_fund; + } + to_cover -= debt; + remaining_fund -= collateral; + execute_bid( bid, debt, collateral, bad.current_feed ); + } + FC_ASSERT( remaining_fund == 0 ); + FC_ASSERT( to_cover == 0 ); + + _cancel_bids_and_revive_mpa( to_revive, bad ); +} + +void update_and_match_call_orders( database& db ) +{ + // Update call_price + wlog( "Updating all call orders for hardfork core-343 at block ${n}", ("n",db.head_block_num()) ); + asset_id_type current_asset; + const asset_bitasset_data_object* abd = nullptr; + // by_collateral index won't change after call_price updated, so it's safe to iterate + for( const auto& call_obj : db.get_index_type().indices().get() ) + { + if( current_asset != call_obj.debt_type() ) // debt type won't be asset_id_type(), abd will always get initialized + { + current_asset = call_obj.debt_type(); + abd = ¤t_asset(db).bitasset_data(db); + } + if( !abd || abd->is_prediction_market ) // nothing to do with PM's; check !abd just to be safe + continue; + db.modify( call_obj, [abd]( call_order_object& call ) { + call.call_price = price::call_price( call.get_debt(), call.get_collateral(), + abd->current_feed.maintenance_collateral_ratio ); + }); + } + // Match call orders + const auto& asset_idx = db.get_index_type().indices().get(); + auto itr = asset_idx.lower_bound( true /** market issued */ ); + while( itr != asset_idx.end() ) + { + const asset_object& a = *itr; + ++itr; + // be here, next_maintenance_time should have been updated already + db.check_call_orders( a, true, false ); // allow black swan, and call orders are taker + } + wlog( "Done updating all call orders for hardfork core-343 at block ${n}", ("n",db.head_block_num()) ); +} + +void database::process_bitassets() +{ + time_point_sec head_time = head_block_time(); + uint32_t head_epoch_seconds = head_time.sec_since_epoch(); + bool after_hf_core_518 = ( head_time >= HARDFORK_CORE_518_TIME ); // clear expired feeds + + const auto update_bitasset = [this,head_time,head_epoch_seconds,after_hf_core_518]( asset_bitasset_data_object &o ) + { + o.force_settled_volume = 0; // Reset all BitAsset force settlement volumes to zero + + // clear expired feeds + if( after_hf_core_518 ) + { + const auto &asset = get( o.asset_id ); + auto flags = asset.options.flags; + if ( ( flags & ( witness_fed_asset | committee_fed_asset ) ) && + o.options.feed_lifetime_sec < head_epoch_seconds ) // if smartcoin && check overflow + { + fc::time_point_sec calculated = head_time - o.options.feed_lifetime_sec; + for( auto itr = o.feeds.rbegin(); itr != o.feeds.rend(); ) // loop feeds + { + auto feed_time = itr->second.first; + std::advance( itr, 1 ); + if( feed_time < calculated ) + o.feeds.erase( itr.base() ); // delete expired feed + } + } + } + }; + + for( const auto& d : get_index_type().indices() ) + { + modify( d, update_bitasset ); + if( d.has_settlement() ) + process_bids(d); + } +} + +/****** + * @brief one-time data process for hard fork core-868-890 + * + * Prior to hardfork 868, switching a bitasset's shorting asset would not reset its + * feeds. This method will run at the hardfork time, and erase (or nullify) feeds + * that have incorrect backing assets. + * https://github.com/bitshares/bitshares-core/issues/868 + * + * Prior to hardfork 890, changing a bitasset's feed expiration time would not + * trigger a median feed update. This method will run at the hardfork time, and + * correct all median feed data. + * https://github.com/bitshares/bitshares-core/issues/890 + * + * @param db the database + * @param skip_check_call_orders true if check_call_orders() should not be called + */ +// TODO: for better performance, this function can be removed if it actually updated nothing at hf time. +// * Also need to update related test cases +// * NOTE: the removal can't be applied to testnet +void process_hf_868_890( database& db, bool skip_check_call_orders ) +{ + const auto head_time = db.head_block_time(); + const auto head_num = db.head_block_num(); + wlog( "Processing hard fork core-868-890 at block ${n}", ("n",head_num) ); + // for each market issued asset + const auto& asset_idx = db.get_index_type().indices().get(); + for( auto asset_itr = asset_idx.lower_bound(true); asset_itr != asset_idx.end(); ++asset_itr ) + { + const auto& current_asset = *asset_itr; + // Incorrect witness & committee feeds can simply be removed. + // For non-witness-fed and non-committee-fed assets, set incorrect + // feeds to price(), since we can't simply remove them. For more information: + // https://github.com/bitshares/bitshares-core/pull/832#issuecomment-384112633 + bool is_witness_or_committee_fed = false; + if ( current_asset.options.flags & ( witness_fed_asset | committee_fed_asset ) ) + is_witness_or_committee_fed = true; + + // for each feed + const asset_bitasset_data_object& bitasset_data = current_asset.bitasset_data(db); + // NOTE: We'll only need old_feed if HF343 hasn't rolled out yet + auto old_feed = bitasset_data.current_feed; + bool feeds_changed = false; // did any feed change + auto itr = bitasset_data.feeds.begin(); + while( itr != bitasset_data.feeds.end() ) + { + // If the feed is invalid + if ( itr->second.second.settlement_price.quote.asset_id != bitasset_data.options.short_backing_asset + && ( is_witness_or_committee_fed || itr->second.second.settlement_price != price() ) ) + { + feeds_changed = true; + db.modify( bitasset_data, [&itr, is_witness_or_committee_fed]( asset_bitasset_data_object& obj ) + { + if( is_witness_or_committee_fed ) + { + // erase the invalid feed + itr = obj.feeds.erase(itr); + } + else + { + // nullify the invalid feed + obj.feeds[itr->first].second.settlement_price = price(); + ++itr; + } + }); + } + else + { + // Feed is valid. Skip it. + ++itr; + } + } // end loop of each feed + + // if any feed was modified, print a warning message + if( feeds_changed ) + { + wlog( "Found invalid feed for asset ${asset_sym} (${asset_id}) during hardfork core-868-890", + ("asset_sym", current_asset.symbol)("asset_id", current_asset.id) ); + } + + // always update the median feed due to https://github.com/bitshares/bitshares-core/issues/890 + db.modify( bitasset_data, [&head_time]( asset_bitasset_data_object &obj ) { + obj.update_median_feeds( head_time ); + }); + + bool median_changed = ( old_feed.settlement_price != bitasset_data.current_feed.settlement_price ); + bool median_feed_changed = ( !( old_feed == bitasset_data.current_feed ) ); + if( median_feed_changed ) + { + wlog( "Median feed for asset ${asset_sym} (${asset_id}) changed during hardfork core-868-890", + ("asset_sym", current_asset.symbol)("asset_id", current_asset.id) ); + } + + // Note: due to bitshares-core issue #935, the check below (using median_changed) is incorrect. + // However, `skip_check_call_orders` will likely be true in both testnet and mainnet, + // so effectively the incorrect code won't make a difference. + // Additionally, we have code to update all call orders again during hardfork core-935 + // TODO cleanup after hard fork + if( !skip_check_call_orders && median_changed ) // check_call_orders should be called + { + db.check_call_orders( current_asset ); + } + else if( !skip_check_call_orders && median_feed_changed ) + { + wlog( "Incorrectly skipped check_call_orders for asset ${asset_sym} (${asset_id}) during hardfork core-868-890", + ("asset_sym", current_asset.symbol)("asset_id", current_asset.id) ); + } + } // for each market issued asset + wlog( "Done processing hard fork core-868-890 at block ${n}", ("n",head_num) ); +} + +/****** + * @brief one-time data process for hard fork core-935 + * + * Prior to hardfork 935, `check_call_orders` may be unintendedly skipped when + * median price feed has changed. This method will run at the hardfork time, and + * call `check_call_orders` for all markets. + * https://github.com/bitshares/bitshares-core/issues/935 + * + * @param db the database + */ +// TODO: for better performance, this function can be removed if it actually updated nothing at hf time. +// * Also need to update related test cases +// * NOTE: perhaps the removal can't be applied to testnet +void process_hf_935( database& db ) +{ + bool changed_something = false; + const asset_bitasset_data_object* bitasset = nullptr; + bool settled_before_check_call; + bool settled_after_check_call; + // for each market issued asset + const auto& asset_idx = db.get_index_type().indices().get(); + for( auto asset_itr = asset_idx.lower_bound(true); asset_itr != asset_idx.end(); ++asset_itr ) + { + const auto& current_asset = *asset_itr; + + if( !changed_something ) + { + bitasset = ¤t_asset.bitasset_data( db ); + settled_before_check_call = bitasset->has_settlement(); // whether already force settled + } + + bool called_some = db.check_call_orders( current_asset ); + + if( !changed_something ) + { + settled_after_check_call = bitasset->has_settlement(); // whether already force settled + + if( settled_before_check_call != settled_after_check_call || called_some ) + { + changed_something = true; + wlog( "process_hf_935 changed something" ); + } + } + } +} + void database::perform_chain_maintenance(const signed_block& next_block, const global_property_object& global_props) { const auto& gpo = get_global_properties(); @@ -736,7 +1094,8 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g d._total_voting_stake = 0; } - void operator()(const account_object& stake_account) { + void operator()( const account_object& stake_account, const account_statistics_object& stats ) + { if( props.parameters.count_non_member_votes || stake_account.is_member(d.head_block_time()) ) { // There may be a difference between the account whose stake is voting and the one specifying opinions. @@ -747,10 +1106,9 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g GRAPHENE_PROXY_TO_SELF_ACCOUNT)? stake_account : d.get(stake_account.options.voting_account); - const auto& stats = stake_account.statistics(d); uint64_t voting_stake = stats.total_core_in_orders.value + (stake_account.cashback_vb.valid() ? (*stake_account.cashback_vb)(d).balance.amount.value: 0) - + d.get_balance(stake_account.get_id(), asset_id_type()).amount.value; + + stats.core_in_balance.value; for( vote_id_type id : opinion_account.options.votes ) { @@ -787,22 +1145,8 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g } } } tally_helper(*this, gpo); - struct process_fees_helper { - database& d; - const global_property_object& props; - - process_fees_helper(database& d, const global_property_object& gpo) - : d(d), props(gpo) {} - void operator()(const account_object& a) { - a.statistics(d).process_fees(a, d); - } - } fee_helper(*this, gpo); - - perform_account_maintenance(std::tie( - tally_helper, - fee_helper - )); + perform_account_maintenance( tally_helper ); struct clear_canary { clear_canary(vector& target): target(target){} @@ -819,9 +1163,10 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g update_active_committee_members(); update_worker_votes(); - modify(gpo, [this](global_property_object& p) { + const auto& dgpo = get_dynamic_global_properties(); + + modify(gpo, [&dgpo](global_property_object& p) { // Remove scaling of account registration fee - const auto& dgpo = get_dynamic_global_properties(); p.parameters.current_fees->get().basic_fee >>= p.parameters.account_fee_scale_bitshifts * (dgpo.accounts_registered_this_interval / p.parameters.accounts_per_fee_scale); @@ -832,7 +1177,7 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g } }); - auto next_maintenance_time = get(dynamic_global_property_id_type()).next_maintenance_time; + auto next_maintenance_time = dgpo.next_maintenance_time; auto maintenance_interval = gpo.parameters.maintenance_interval; if( next_maintenance_time <= next_block.timestamp ) @@ -862,19 +1207,33 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g } } - const dynamic_global_property_object& dgpo = get_dynamic_global_properties(); - if( (dgpo.next_maintenance_time < HARDFORK_613_TIME) && (next_maintenance_time >= HARDFORK_613_TIME) ) deprecate_annual_members(*this); + // To reset call_price of all call orders, then match by new rule + bool to_update_and_match_call_orders = false; + if( (dgpo.next_maintenance_time <= HARDFORK_CORE_343_TIME) && (next_maintenance_time > HARDFORK_CORE_343_TIME) ) + to_update_and_match_call_orders = true; + + // Process inconsistent price feeds + if( (dgpo.next_maintenance_time <= HARDFORK_CORE_868_890_TIME) && (next_maintenance_time > HARDFORK_CORE_868_890_TIME) ) + process_hf_868_890( *this, to_update_and_match_call_orders ); + + // Explicitly call check_call_orders of all markets + if( (dgpo.next_maintenance_time <= HARDFORK_CORE_935_TIME) && (next_maintenance_time > HARDFORK_CORE_935_TIME) + && !to_update_and_match_call_orders ) + process_hf_935( *this ); + modify(dgpo, [next_maintenance_time](dynamic_global_property_object& d) { d.next_maintenance_time = next_maintenance_time; d.accounts_registered_this_interval = 0; }); - // Reset all BitAsset force settlement volumes to zero - for( const asset_bitasset_data_object* d : get_index_type() ) - modify(*d, [](asset_bitasset_data_object& d) { d.force_settled_volume = 0; }); + // We need to do it after updated next_maintenance_time, to apply new rules here + if( to_update_and_match_call_orders ) + update_and_match_call_orders(*this); + + process_bitassets(); // process_budget needs to run at the bottom because // it needs to know the next_maintenance_time diff --git a/libraries/chain/db_management.cpp b/libraries/chain/db_management.cpp index e348fc953e..d09533b92c 100644 --- a/libraries/chain/db_management.cpp +++ b/libraries/chain/db_management.cpp @@ -24,6 +24,9 @@ #include +#include +#include +#include #include #include @@ -32,6 +35,8 @@ #include #include #include +#include +#include namespace graphene { namespace chain { @@ -46,53 +51,104 @@ database::~database() clear_pending(); } -void database::reindex(fc::path data_dir, const genesis_state_type& initial_allocation) +void database::reindex( fc::path data_dir ) { try { - ilog( "reindexing blockchain" ); - wipe(data_dir, false); - open(data_dir, [&initial_allocation]{return initial_allocation;}); - - auto start = fc::time_point::now(); auto last_block = _block_id_to_block.last(); if( !last_block ) { elog( "!no last block" ); edump((last_block)); return; } + if( last_block->block_num() <= head_block_num()) return; + ilog( "reindexing blockchain" ); + auto start = fc::time_point::now(); const auto last_block_num = last_block->block_num(); + uint32_t flush_point = last_block_num < 10000 ? 0 : last_block_num - 10000; + uint32_t undo_point = last_block_num < 50 ? 0 : last_block_num - 50; - ilog( "Replaying blocks..." ); - _undo_db.disable(); - for( uint32_t i = 1; i <= last_block_num; ++i ) + ilog( "Replaying blocks, starting at ${next}...", ("next",head_block_num() + 1) ); + if( head_block_num() >= undo_point ) { - if( i % 2000 == 0 ) std::cerr << " " << double(i*100)/last_block_num << "% "< block = _block_id_to_block.fetch_by_number(i); - if( !block.valid() ) + if( head_block_num() > 0 ) + _fork_db.start_block( *fetch_block_by_number( head_block_num() ) ); + } + else + _undo_db.disable(); + + uint32_t skip = node_properties().skip_flags; + + size_t total_block_size = _block_id_to_block.total_block_size(); + const auto& gpo = get_global_properties(); + std::queue< std::tuple< size_t, signed_block, fc::future< void > > > blocks; + uint32_t next_block_num = head_block_num() + 1; + uint32_t i = next_block_num; + while( next_block_num <= last_block_num || !blocks.empty() ) + { + if( next_block_num <= last_block_num && blocks.size() < 20 ) { - wlog( "Reindexing terminated due to gap: Block ${i} does not exist!", ("i", i) ); - uint32_t dropped_count = 0; - while( true ) + const size_t processed_block_size = _block_id_to_block.blocks_current_position(); + fc::optional< signed_block > block = _block_id_to_block.fetch_by_number( next_block_num++ ); + if( block.valid() ) { - fc::optional< block_id_type > last_id = _block_id_to_block.last_id(); - // this can trigger if we attempt to e.g. read a file that has block #2 but no block #1 - if( !last_id.valid() ) - break; - // we've caught up to the gap - if( block_header::num_from_id( *last_id ) <= i ) - break; - _block_id_to_block.remove( *last_id ); - dropped_count++; + if( block->timestamp >= last_block->timestamp - gpo.parameters.maximum_time_until_expiration ) + skip &= ~skip_transaction_dupe_check; + blocks.emplace( processed_block_size, std::move(*block), fc::future() ); + std::get<2>(blocks.back()) = precompute_parallel( std::get<1>(blocks.back()), skip ); + } + else + { + wlog( "Reindexing terminated due to gap: Block ${i} does not exist!", ("i", i) ); + uint32_t dropped_count = 0; + while( true ) + { + fc::optional< block_id_type > last_id = _block_id_to_block.last_id(); + // this can trigger if we attempt to e.g. read a file that has block #2 but no block #1 + if( !last_id.valid() ) + break; + // we've caught up to the gap + if( block_header::num_from_id( *last_id ) <= i ) + break; + _block_id_to_block.remove( *last_id ); + dropped_count++; + } + wlog( "Dropped ${n} blocks from after the gap", ("n", dropped_count) ); + next_block_num = last_block_num + 1; // don't load more blocks } - wlog( "Dropped ${n} blocks from after the gap", ("n", dropped_count) ); - break; } - apply_block(*block, skip_witness_signature | - skip_transaction_signatures | - skip_transaction_dupe_check | - skip_tapos_check | - skip_witness_schedule_check | - skip_authority_check); + else + { + std::get<2>(blocks.front()).wait(); + const signed_block& block = std::get<1>(blocks.front()); + + if( i % 10000 == 0 ) + { + ilog( + " [by size: ${size}% ${processed} of ${total}] [by num: ${num}% ${i} of ${last}]", + ("size", double(std::get<0>(blocks.front())) / total_block_size * 100) + ("processed", std::get<0>(blocks.front())) + ("total", total_block_size) + ("num", double(i*100)/last_block_num) + ("i", i) + ("last", last_block_num) + ); + } + if( i == flush_point ) + { + ilog( "Writing database to disk at block ${i}", ("i",i) ); + flush(); + ilog( "Done" ); + } + if( i < undo_point ) + apply_block( block, skip ); + else + { + _undo_db.enable(); + push_block( block, skip ); + } + blocks.pop(); + i++; + } } _undo_db.enable(); auto end = fc::time_point::now(); @@ -102,7 +158,9 @@ void database::reindex(fc::path data_dir, const genesis_state_type& initial_allo void database::wipe(const fc::path& data_dir, bool include_blocks) { ilog("Wiping database", ("include_blocks", include_blocks)); - close(); + if (_opened) { + close(); + } object_database::wipe(data_dir); if( include_blocks ) fc::remove_all( data_dir / "database" ); @@ -110,34 +168,63 @@ void database::wipe(const fc::path& data_dir, bool include_blocks) void database::open( const fc::path& data_dir, - std::function genesis_loader) + std::function genesis_loader, + const std::string& db_version) { try { + bool wipe_object_db = false; + if( !fc::exists( data_dir / "db_version" ) ) + wipe_object_db = true; + else + { + std::string version_string; + fc::read_file_contents( data_dir / "db_version", version_string ); + wipe_object_db = ( version_string != db_version ); + } + if( wipe_object_db ) { + ilog("Wiping object_database due to missing or wrong version"); + object_database::wipe( data_dir ); + std::ofstream version_file( (data_dir / "db_version").generic_string().c_str(), + std::ios::out | std::ios::binary | std::ios::trunc ); + version_file.write( db_version.c_str(), db_version.size() ); + version_file.close(); + } + object_database::open(data_dir); _block_id_to_block.open(data_dir / "database" / "block_num_to_block"); if( !find(global_property_id_type()) ) init_genesis(genesis_loader()); + else + { + _p_core_asset_obj = &get( asset_id_type() ); + _p_core_dynamic_data_obj = &get( asset_dynamic_data_id_type() ); + _p_global_prop_obj = &get( global_property_id_type() ); + _p_chain_property_obj = &get( chain_property_id_type() ); + _p_dyn_global_prop_obj = &get( dynamic_global_property_id_type() ); + _p_witness_schedule_obj = &get( witness_schedule_id_type() ); + } - fc::optional last_block = _block_id_to_block.last(); + fc::optional last_block = _block_id_to_block.last_id(); if( last_block.valid() ) { - _fork_db.start_block( *last_block ); - idump((last_block->id())(last_block->block_num())); - if( last_block->id() != head_block_id() ) - { - FC_ASSERT( head_block_num() == 0, "last block ID does not match current chain state" ); - } + FC_ASSERT( *last_block >= head_block_id(), + "last block ID does not match current chain state", + ("last_block->id", last_block)("head_block_id",head_block_num()) ); + reindex( data_dir ); } - //idump((head_block_id())(head_block_num())); + _opened = true; } FC_CAPTURE_LOG_AND_RETHROW( (data_dir) ) } void database::close(bool rewind) { + if (!_opened) + return; + // TODO: Save pending tx's on close() clear_pending(); @@ -149,23 +236,17 @@ void database::close(bool rewind) { uint32_t cutoff = get_dynamic_global_properties().last_irreversible_block_num; + ilog( "Rewinding from ${head} to ${cutoff}", ("head",head_block_num())("cutoff",cutoff) ); while( head_block_num() > cutoff ) { - // elog("pop"); block_id_type popped_block_id = head_block_id(); pop_block(); _fork_db.remove(popped_block_id); // doesn't throw on missing - try - { - _block_id_to_block.remove(popped_block_id); - } - catch (const fc::key_not_found_exception&) - { - } } } - catch (...) + catch ( const fc::exception& e ) { + wlog( "Database close unexpected exception: ${e}", ("e", e) ); } } @@ -181,6 +262,8 @@ void database::close(bool rewind) _block_id_to_block.close(); _fork_db.reset(); + + _opened = false; } } } diff --git a/libraries/chain/db_market.cpp b/libraries/chain/db_market.cpp index 2c8251ca53..6b8f67ea1c 100644 --- a/libraries/chain/db_market.cpp +++ b/libraries/chain/db_market.cpp @@ -37,18 +37,11 @@ namespace graphene { namespace chain { * All margin positions are force closed at the swan price * Collateral received goes into a force-settlement fund * No new margin positions can be created for this asset - * No more price feed updates * Force settlement happens without delay at the swan price, deducting from force-settlement fund * No more asset updates may be issued. */ void database::globally_settle_asset( const asset_object& mia, const price& settlement_price ) { try { - /* - elog( "BLACK SWAN!" ); - debug_dump(); - edump( (mia.symbol)(settlement_price) ); - */ - const asset_bitasset_data_object& bitasset = mia.bitasset_data(*this); FC_ASSERT( !bitasset.has_settlement(), "black swan already occurred, it should not happen again" ); @@ -61,12 +54,25 @@ void database::globally_settle_asset( const asset_object& mia, const price& sett const call_order_index& call_index = get_index_type(); const auto& call_price_index = call_index.indices().get(); + auto maint_time = get_dynamic_global_properties().next_maintenance_time; + bool before_core_hardfork_342 = ( maint_time <= HARDFORK_CORE_342_TIME ); // better rounding + // cancel all call orders and accumulate it into collateral_gathered auto call_itr = call_price_index.lower_bound( price::min( bitasset.options.short_backing_asset, mia.id ) ); auto call_end = call_price_index.upper_bound( price::max( bitasset.options.short_backing_asset, mia.id ) ); + asset pays; while( call_itr != call_end ) { - auto pays = call_itr->get_debt() * settlement_price; + if( before_core_hardfork_342 ) + { + pays = call_itr->get_debt() * settlement_price; // round down, in favor of call order + + // Be here, the call order can be paying nothing + if( pays.amount == 0 && !bitasset.is_prediction_market ) // TODO remove this warning after hard fork core-342 + wlog( "Something for nothing issue (#184, variant E) occurred at block #${block}", ("block",head_block_num()) ); + } + else + pays = call_itr->get_debt().multiply_and_round_up( settlement_price ); // round up, in favor of global settlement fund if( pays > call_itr->get_collateral() ) pays = call_itr->get_collateral(); @@ -74,7 +80,7 @@ void database::globally_settle_asset( const asset_object& mia, const price& sett collateral_gathered += pays; const auto& order = *call_itr; ++call_itr; - FC_ASSERT( fill_order( order, pays, order.get_debt() ) ); + FC_ASSERT( fill_call_order( order, pays, order.get_debt(), settlement_price, true ) ); // call order is maker } modify( bitasset, [&]( asset_bitasset_data_object& obj ){ @@ -93,7 +99,95 @@ void database::globally_settle_asset( const asset_object& mia, const price& sett } FC_CAPTURE_AND_RETHROW( (mia)(settlement_price) ) } -void database::cancel_order(const force_settlement_object& order, bool create_virtual_op) +void database::revive_bitasset( const asset_object& bitasset ) +{ try { + FC_ASSERT( bitasset.is_market_issued() ); + const asset_bitasset_data_object& bad = bitasset.bitasset_data(*this); + FC_ASSERT( bad.has_settlement() ); + const asset_dynamic_data_object& bdd = bitasset.dynamic_asset_data_id(*this); + FC_ASSERT( !bad.is_prediction_market ); + FC_ASSERT( !bad.current_feed.settlement_price.is_null() ); + + if( bdd.current_supply > 0 ) + { + // Create + execute a "bid" with 0 additional collateral + const collateral_bid_object& pseudo_bid = create([&](collateral_bid_object& bid) { + bid.bidder = bitasset.issuer; + bid.inv_swan_price = asset(0, bad.options.short_backing_asset) + / asset(bdd.current_supply, bitasset.id); + }); + execute_bid( pseudo_bid, bdd.current_supply, bad.settlement_fund, bad.current_feed ); + } else + FC_ASSERT( bad.settlement_fund == 0 ); + + _cancel_bids_and_revive_mpa( bitasset, bad ); +} FC_CAPTURE_AND_RETHROW( (bitasset) ) } + +void database::_cancel_bids_and_revive_mpa( const asset_object& bitasset, const asset_bitasset_data_object& bad ) +{ try { + FC_ASSERT( bitasset.is_market_issued() ); + FC_ASSERT( bad.has_settlement() ); + FC_ASSERT( !bad.is_prediction_market ); + + // cancel remaining bids + const auto& bid_idx = get_index_type< collateral_bid_index >().indices().get(); + auto itr = bid_idx.lower_bound( boost::make_tuple( bitasset.id, + price::max( bad.options.short_backing_asset, bitasset.id ), + collateral_bid_id_type() ) ); + while( itr != bid_idx.end() && itr->inv_swan_price.quote.asset_id == bitasset.id ) + { + const collateral_bid_object& bid = *itr; + ++itr; + cancel_bid( bid ); + } + + // revive + modify( bad, [&]( asset_bitasset_data_object& obj ){ + obj.settlement_price = price(); + obj.settlement_fund = 0; + }); +} FC_CAPTURE_AND_RETHROW( (bitasset) ) } + +void database::cancel_bid(const collateral_bid_object& bid, bool create_virtual_op) +{ + adjust_balance(bid.bidder, bid.inv_swan_price.base); + + if( create_virtual_op ) + { + bid_collateral_operation vop; + vop.bidder = bid.bidder; + vop.additional_collateral = bid.inv_swan_price.base; + vop.debt_covered = asset( 0, bid.inv_swan_price.quote.asset_id ); + push_applied_operation( vop ); + } + remove(bid); +} + +void database::execute_bid( const collateral_bid_object& bid, share_type debt_covered, share_type collateral_from_fund, + const price_feed& current_feed ) +{ + const call_order_object& call_obj = create( [&](call_order_object& call ){ + call.borrower = bid.bidder; + call.collateral = bid.inv_swan_price.base.amount + collateral_from_fund; + call.debt = debt_covered; + call.call_price = price::call_price(asset(debt_covered, bid.inv_swan_price.quote.asset_id), + asset(call.collateral, bid.inv_swan_price.base.asset_id), + current_feed.maintenance_collateral_ratio); + }); + + // Note: CORE asset in collateral_bid_object is not counted in account_stats.total_core_in_orders + if( bid.inv_swan_price.base.asset_id == asset_id_type() ) + modify( get_account_stats_by_owner(bid.bidder), [&](account_statistics_object& stats) { + stats.total_core_in_orders += call_obj.collateral; + }); + + push_applied_operation( execute_bid_operation( bid.bidder, asset( call_obj.collateral, bid.inv_swan_price.base.asset_id ), + asset( debt_covered, bid.inv_swan_price.quote.asset_id ) ) ); + + remove(bid); +} + +void database::cancel_settle_order(const force_settlement_object& order, bool create_virtual_op) { adjust_balance(order.owner, order.balance); @@ -108,27 +202,99 @@ void database::cancel_order(const force_settlement_object& order, bool create_vi remove(order); } -void database::cancel_order( const limit_order_object& order, bool create_virtual_op ) +void database::cancel_limit_order( const limit_order_object& order, bool create_virtual_op, bool skip_cancel_fee ) { - auto refunded = order.amount_for_sale(); - - modify( order.seller(*this).statistics(*this),[&]( account_statistics_object& obj ){ - if( refunded.asset_id == asset_id_type() ) + // if need to create a virtual op, try deduct a cancellation fee here. + // there are two scenarios when order is cancelled and need to create a virtual op: + // 1. due to expiration: always deduct a fee if there is any fee deferred + // 2. due to cull_small: deduct a fee after hard fork 604, but not before (will set skip_cancel_fee) + const account_statistics_object* seller_acc_stats = nullptr; + const asset_dynamic_data_object* fee_asset_dyn_data = nullptr; + limit_order_cancel_operation vop; + share_type deferred_fee = order.deferred_fee; + asset deferred_paid_fee = order.deferred_paid_fee; + if( create_virtual_op ) + { + vop.order = order.id; + vop.fee_paying_account = order.seller; + // only deduct fee if not skipping fee, and there is any fee deferred + if( !skip_cancel_fee && deferred_fee > 0 ) { - obj.total_core_in_orders -= refunded.amount; + asset core_cancel_fee = current_fee_schedule().calculate_fee( vop ); + // cap the fee + if( core_cancel_fee.amount > deferred_fee ) + core_cancel_fee.amount = deferred_fee; + // if there is any CORE fee to deduct, redirect it to referral program + if( core_cancel_fee.amount > 0 ) + { + seller_acc_stats = &order.seller( *this ).statistics( *this ); + modify( *seller_acc_stats, [&]( account_statistics_object& obj ) { + obj.pay_fee( core_cancel_fee.amount, get_global_properties().parameters.cashback_vesting_threshold ); + } ); + deferred_fee -= core_cancel_fee.amount; + // handle originally paid fee if any: + // to_deduct = round_up( paid_fee * core_cancel_fee / deferred_core_fee_before_deduct ) + if( deferred_paid_fee.amount == 0 ) + { + vop.fee = core_cancel_fee; + } + else + { + fc::uint128 fee128( deferred_paid_fee.amount.value ); + fee128 *= core_cancel_fee.amount.value; + // to round up + fee128 += order.deferred_fee.value; + fee128 -= 1; + fee128 /= order.deferred_fee.value; + share_type cancel_fee_amount = fee128.to_uint64(); + // cancel_fee should be positive, pay it to asset's accumulated_fees + fee_asset_dyn_data = &deferred_paid_fee.asset_id(*this).dynamic_asset_data_id(*this); + modify( *fee_asset_dyn_data, [&](asset_dynamic_data_object& addo) { + addo.accumulated_fees += cancel_fee_amount; + }); + // cancel_fee should be no more than deferred_paid_fee + deferred_paid_fee.amount -= cancel_fee_amount; + vop.fee = asset( cancel_fee_amount, deferred_paid_fee.asset_id ); + } + } } - }); + } + + // refund funds in order + auto refunded = order.amount_for_sale(); + if( refunded.asset_id == asset_id_type() ) + { + if( seller_acc_stats == nullptr ) + seller_acc_stats = &order.seller( *this ).statistics( *this ); + modify( *seller_acc_stats, [&]( account_statistics_object& obj ) { + obj.total_core_in_orders -= refunded.amount; + }); + } adjust_balance(order.seller, refunded); - adjust_balance(order.seller, order.deferred_fee); - if( create_virtual_op ) + // refund fee + // could be virtual op or real op here + if( order.deferred_paid_fee.amount == 0 ) { - limit_order_cancel_operation vop; - vop.order = order.id; - vop.fee_paying_account = order.seller; - push_applied_operation( vop ); + // be here, order.create_time <= HARDFORK_CORE_604_TIME, or fee paid in CORE, or no fee to refund. + // if order was created before hard fork 604 then cancelled no matter before or after hard fork 604, + // see it as fee paid in CORE, deferred_fee should be refunded to order owner but not fee pool + adjust_balance( order.seller, deferred_fee ); + } + else // need to refund fee in originally paid asset + { + adjust_balance(order.seller, deferred_paid_fee); + // be here, must have: fee_asset != CORE + if( fee_asset_dyn_data == nullptr ) + fee_asset_dyn_data = &deferred_paid_fee.asset_id(*this).dynamic_asset_data_id(*this); + modify( *fee_asset_dyn_data, [&](asset_dynamic_data_object& addo) { + addo.fee_pool += deferred_fee; + }); } + if( create_virtual_op ) + push_applied_operation( vop ); + remove(order); } @@ -146,14 +312,19 @@ bool maybe_cull_small_order( database& db, const limit_order_object& order ) */ if( order.amount_to_receive().amount == 0 ) { - ilog( "applied epsilon logic" ); - db.cancel_order(order); + if( order.deferred_fee > 0 && db.head_block_time() <= HARDFORK_CORE_604_TIME ) + { // TODO remove this warning after hard fork core-604 + wlog( "At block ${n}, cancelling order without charging a fee: ${o}", ("n",db.head_block_num())("o",order) ); + db.cancel_limit_order( order, true, true ); + } + else + db.cancel_limit_order( order ); return true; } return false; } -bool database::apply_order(const limit_order_object& new_order_object, bool allow_black_swan) +bool database::apply_order_before_hardfork_625(const limit_order_object& new_order_object, bool allow_black_swan) { auto order_id = new_order_object.id; const asset_object& sell_asset = get(new_order_object.amount_for_sale().asset_id); @@ -162,8 +333,8 @@ bool database::apply_order(const limit_order_object& new_order_object, bool allo // Possible optimization: We only need to check calls if both are true: // - The new order is at the front of the book // - The new order is below the call limit price - bool called_some = check_call_orders(sell_asset, allow_black_swan); - called_some |= check_call_orders(receive_asset, allow_black_swan); + bool called_some = check_call_orders(sell_asset, allow_black_swan, true); // the first time when checking, call order is maker + called_some |= check_call_orders(receive_asset, allow_black_swan, true); // the other side, same as above if( called_some && !find_object(order_id) ) // then we were filled by call order return true; @@ -189,8 +360,9 @@ bool database::apply_order(const limit_order_object& new_order_object, bool allo //Possible optimization: only check calls if the new order completely filled some old order //Do I need to check both assets? - check_call_orders(sell_asset, allow_black_swan); - check_call_orders(receive_asset, allow_black_swan); + check_call_orders(sell_asset, allow_black_swan); // after the new limit order filled some orders on the book, + // if a call order matches another order, the call order is taker + check_call_orders(receive_asset, allow_black_swan); // the other side, same as above const limit_order_object* updated_order_object = find< limit_order_object >( order_id ); if( updated_order_object == nullptr ) @@ -203,32 +375,178 @@ bool database::apply_order(const limit_order_object& new_order_object, bool allo return maybe_cull_small_order( *this, *updated_order_object ); } +bool database::apply_order(const limit_order_object& new_order_object, bool allow_black_swan) +{ + auto order_id = new_order_object.id; + asset_id_type sell_asset_id = new_order_object.sell_asset_id(); + asset_id_type recv_asset_id = new_order_object.receive_asset_id(); + + // We only need to check if the new order will match with others if it is at the front of the book + const auto& limit_price_idx = get_index_type().indices().get(); + auto limit_itr = limit_price_idx.lower_bound( boost::make_tuple( new_order_object.sell_price, order_id ) ); + if( limit_itr != limit_price_idx.begin() ) + { + --limit_itr; + if( limit_itr->sell_asset_id() == sell_asset_id && limit_itr->receive_asset_id() == recv_asset_id ) + return false; + } + + // this is the opposite side (on the book) + auto max_price = ~new_order_object.sell_price; + limit_itr = limit_price_idx.lower_bound( max_price.max() ); + auto limit_end = limit_price_idx.upper_bound( max_price ); + + // Order matching should be in favor of the taker. + // When a new limit order is created, e.g. an ask, need to check if it will match the highest bid. + // We were checking call orders first. However, due to MSSR (maximum_short_squeeze_ratio), + // effective price of call orders may be worse than limit orders, so we should also check limit orders here. + + // Question: will a new limit order trigger a black swan event? + // + // 1. as of writing, it's possible due to the call-order-and-limit-order overlapping issue: + // https://github.com/bitshares/bitshares-core/issues/606 . + // when it happens, a call order can be very big but don't match with the opposite, + // even when price feed is too far away, further than swan price, + // if the new limit order is in the same direction with the call orders, it can eat up all the opposite, + // then the call order will lose support and trigger a black swan event. + // 2. after issue 606 is fixed, there will be no limit order on the opposite side "supporting" the call order, + // so a new order in the same direction with the call order won't trigger a black swan event. + // 3. calling is one direction. if the new limit order is on the opposite direction, + // no matter if matches with the call, it won't trigger a black swan event. + // (if a match at MSSP caused a black swan event, it means the call order is already undercollateralized, + // which should trigger a black swan event earlier.) + // + // Since it won't trigger a black swan, no need to check here. + + // currently we don't do cross-market (triangle) matching. + // the limit order will only match with a call order if meet all of these: + // 1. it's buying collateral, which means sell_asset is the MIA, receive_asset is the backing asset. + // 2. sell_asset is not a prediction market + // 3. sell_asset is not globally settled + // 4. sell_asset has a valid price feed + // 5. the call order's collateral ratio is below or equals to MCR + // 6. the limit order provided a good price + + bool to_check_call_orders = false; + const asset_object& sell_asset = sell_asset_id( *this ); + const asset_bitasset_data_object* sell_abd = nullptr; + price call_match_price; + if( sell_asset.is_market_issued() ) + { + sell_abd = &sell_asset.bitasset_data( *this ); + if( sell_abd->options.short_backing_asset == recv_asset_id + && !sell_abd->is_prediction_market + && !sell_abd->has_settlement() + && !sell_abd->current_feed.settlement_price.is_null() ) + { + call_match_price = ~sell_abd->current_feed.max_short_squeeze_price(); + if( ~new_order_object.sell_price <= call_match_price ) // new limit order price is good enough to match a call + to_check_call_orders = true; + } + } + + bool finished = false; // whether the new order is gone + if( to_check_call_orders ) + { + // check limit orders first, match the ones with better price in comparison to call orders + while( !finished && limit_itr != limit_end && limit_itr->sell_price > call_match_price ) + { + auto old_limit_itr = limit_itr; + ++limit_itr; + // match returns 2 when only the old order was fully filled. In this case, we keep matching; otherwise, we stop. + finished = ( match( new_order_object, *old_limit_itr, old_limit_itr->sell_price ) != 2 ); + } + + if( !finished ) + { + // check if there are margin calls + const auto& call_price_idx = get_index_type().indices().get(); + auto call_min = price::min( recv_asset_id, sell_asset_id ); + while( !finished ) + { + // assume hard fork core-343 and core-625 will take place at same time, always check call order with least call_price + auto call_itr = call_price_idx.lower_bound( call_min ); + if( call_itr == call_price_idx.end() + || call_itr->debt_type() != sell_asset_id + // feed protected https://github.com/cryptonomex/graphene/issues/436 + || call_itr->call_price > ~sell_abd->current_feed.settlement_price ) + break; + // assume hard fork core-338 and core-625 will take place at same time, not checking HARDFORK_CORE_338_TIME here. + int match_result = match( new_order_object, *call_itr, call_match_price, + sell_abd->current_feed.settlement_price, + sell_abd->current_feed.maintenance_collateral_ratio ); + // match returns 1 or 3 when the new order was fully filled. In this case, we stop matching; otherwise keep matching. + // since match can return 0 due to BSIP38 (hard fork core-834), we no longer only check if the result is 2. + if( match_result == 1 || match_result == 3 ) + finished = true; + } + } + } + + // still need to check limit orders + while( !finished && limit_itr != limit_end ) + { + auto old_limit_itr = limit_itr; + ++limit_itr; + // match returns 2 when only the old order was fully filled. In this case, we keep matching; otherwise, we stop. + finished = ( match( new_order_object, *old_limit_itr, old_limit_itr->sell_price ) != 2 ); + } + + const limit_order_object* updated_order_object = find< limit_order_object >( order_id ); + if( updated_order_object == nullptr ) + return true; + + // before #555 we would have done maybe_cull_small_order() logic as a result of fill_order() being called by match() above + // however after #555 we need to get rid of small orders -- #555 hardfork defers logic that was done too eagerly before, and + // this is the point it's deferred to. + return maybe_cull_small_order( *this, *updated_order_object ); +} + /** - * Matches the two orders, + * Matches the two orders, the first parameter is taker, the second is maker. * * @return a bit field indicating which orders were filled (and thus removed) * * 0 - no orders were matched - * 1 - bid was filled - * 2 - ask was filled + * 1 - taker was filled + * 2 - maker was filled * 3 - both were filled */ -template -int database::match( const limit_order_object& usd, const OrderType& core, const price& match_price ) +int database::match( const limit_order_object& usd, const limit_order_object& core, const price& match_price ) { - assert( usd.sell_price.quote.asset_id == core.sell_price.base.asset_id ); - assert( usd.sell_price.base.asset_id == core.sell_price.quote.asset_id ); - assert( usd.for_sale > 0 && core.for_sale > 0 ); + FC_ASSERT( usd.sell_price.quote.asset_id == core.sell_price.base.asset_id ); + FC_ASSERT( usd.sell_price.base.asset_id == core.sell_price.quote.asset_id ); + FC_ASSERT( usd.for_sale > 0 && core.for_sale > 0 ); auto usd_for_sale = usd.amount_for_sale(); auto core_for_sale = core.amount_for_sale(); asset usd_pays, usd_receives, core_pays, core_receives; - if( usd_for_sale <= core_for_sale * match_price ) + auto maint_time = get_dynamic_global_properties().next_maintenance_time; + bool before_core_hardfork_342 = ( maint_time <= HARDFORK_CORE_342_TIME ); // better rounding + + bool cull_taker = false; + if( usd_for_sale <= core_for_sale * match_price ) // rounding down here should be fine { - core_receives = usd_for_sale; - usd_receives = usd_for_sale * match_price; + usd_receives = usd_for_sale * match_price; // round down, in favor of bigger order + + // Be here, it's possible that taker is paying something for nothing due to partially filled in last loop. + // In this case, we see it as filled and cancel it later + if( usd_receives.amount == 0 && maint_time > HARDFORK_CORE_184_TIME ) + return 1; + + if( before_core_hardfork_342 ) + core_receives = usd_for_sale; + else + { + // The remaining amount in order `usd` would be too small, + // so we should cull the order in fill_limit_order() below. + // The order would receive 0 even at `match_price`, so it would receive 0 at its own price, + // so calling maybe_cull_small() will always cull it. + core_receives = usd_receives.multiply_and_round_up( match_price ); + cull_taker = true; + } } else { @@ -236,42 +554,186 @@ int database::match( const limit_order_object& usd, const OrderType& core, const //This assert is not always true -- see trade_amount_equals_zero in operation_tests.cpp //Although usd_for_sale is greater than core_for_sale * match_price, core_for_sale == usd_for_sale * match_price //Removing the assert seems to be safe -- apparently no asset is created or destroyed. - usd_receives = core_for_sale; - core_receives = core_for_sale * match_price; + + // The maker won't be paying something for nothing, since if it would, it would have been cancelled already. + core_receives = core_for_sale * match_price; // round down, in favor of bigger order + if( before_core_hardfork_342 ) + usd_receives = core_for_sale; + else + // The remaining amount in order `core` would be too small, + // so the order will be culled in fill_limit_order() below + usd_receives = core_receives.multiply_and_round_up( match_price ); } core_pays = usd_receives; usd_pays = core_receives; - assert( usd_pays == usd.amount_for_sale() || - core_pays == core.amount_for_sale() ); + if( before_core_hardfork_342 ) + FC_ASSERT( usd_pays == usd.amount_for_sale() || + core_pays == core.amount_for_sale() ); int result = 0; - result |= fill_order( usd, usd_pays, usd_receives, false ); - result |= fill_order( core, core_pays, core_receives, true ) << 1; - assert( result != 0 ); + result |= fill_limit_order( usd, usd_pays, usd_receives, cull_taker, match_price, false ); // the first param is taker + result |= fill_limit_order( core, core_pays, core_receives, true, match_price, true ) << 1; // the second param is maker + FC_ASSERT( result != 0 ); return result; } -int database::match( const limit_order_object& bid, const limit_order_object& ask, const price& match_price ) +int database::match( const limit_order_object& bid, const call_order_object& ask, const price& match_price, + const price& feed_price, const uint16_t maintenance_collateral_ratio ) { - return match( bid, ask, match_price ); + FC_ASSERT( bid.sell_asset_id() == ask.debt_type() ); + FC_ASSERT( bid.receive_asset_id() == ask.collateral_type() ); + FC_ASSERT( bid.for_sale > 0 && ask.debt > 0 && ask.collateral > 0 ); + + auto maint_time = get_dynamic_global_properties().next_maintenance_time; + // TODO remove when we're sure it's always false + bool before_core_hardfork_184 = ( maint_time <= HARDFORK_CORE_184_TIME ); // something-for-nothing + // TODO remove when we're sure it's always false + bool before_core_hardfork_342 = ( maint_time <= HARDFORK_CORE_342_TIME ); // better rounding + // TODO remove when we're sure it's always false + bool before_core_hardfork_834 = ( maint_time <= HARDFORK_CORE_834_TIME ); // target collateral ratio option + if( before_core_hardfork_184 ) + ilog( "match(limit,call) is called before hardfork core-184 at block #${block}", ("block",head_block_num()) ); + if( before_core_hardfork_342 ) + ilog( "match(limit,call) is called before hardfork core-342 at block #${block}", ("block",head_block_num()) ); + if( before_core_hardfork_834 ) + ilog( "match(limit,call) is called before hardfork core-834 at block #${block}", ("block",head_block_num()) ); + + bool cull_taker = false; + + asset usd_for_sale = bid.amount_for_sale(); + // TODO if we're sure `before_core_hardfork_834` is always false, remove the check + asset usd_to_buy = ( before_core_hardfork_834 ? + ask.get_debt() : + asset( ask.get_max_debt_to_cover( match_price, feed_price, maintenance_collateral_ratio ), + ask.debt_type() ) ); + + asset call_pays, call_receives, order_pays, order_receives; + if( usd_to_buy > usd_for_sale ) + { // fill limit order + order_receives = usd_for_sale * match_price; // round down here, in favor of call order + + // Be here, it's possible that taker is paying something for nothing due to partially filled in last loop. + // In this case, we see it as filled and cancel it later + // TODO remove hardfork check when we're sure it's always after hard fork (but keep the zero amount check) + if( order_receives.amount == 0 && !before_core_hardfork_184 ) + return 1; + + if( before_core_hardfork_342 ) // TODO remove this "if" when we're sure it's always false (keep the code in else) + call_receives = usd_for_sale; + else + { + // The remaining amount in the limit order would be too small, + // so we should cull the order in fill_limit_order() below. + // The order would receive 0 even at `match_price`, so it would receive 0 at its own price, + // so calling maybe_cull_small() will always cull it. + call_receives = order_receives.multiply_and_round_up( match_price ); + cull_taker = true; + } + } + else + { // fill call order + call_receives = usd_to_buy; + if( before_core_hardfork_342 ) // TODO remove this "if" when we're sure it's always false (keep the code in else) + { + order_receives = usd_to_buy * match_price; // round down here, in favor of call order + // TODO remove hardfork check when we're sure it's always after hard fork (but keep the zero amount check) + if( order_receives.amount == 0 && !before_core_hardfork_184 ) + return 1; + } + else // has hardfork core-342 + order_receives = usd_to_buy.multiply_and_round_up( match_price ); // round up here, in favor of limit order + } + + call_pays = order_receives; + order_pays = call_receives; + + int result = 0; + result |= fill_limit_order( bid, order_pays, order_receives, cull_taker, match_price, false ); // the limit order is taker + result |= fill_call_order( ask, call_pays, call_receives, match_price, true ) << 1; // the call order is maker + // result can be 0 when call order has target_collateral_ratio option set. + + return result; } asset database::match( const call_order_object& call, const force_settlement_object& settle, const price& match_price, - asset max_settlement ) + asset max_settlement, + const price& fill_price ) { try { FC_ASSERT(call.get_debt().asset_id == settle.balance.asset_id ); FC_ASSERT(call.debt > 0 && call.collateral > 0 && settle.balance.amount > 0); + auto maint_time = get_dynamic_global_properties().next_maintenance_time; + bool before_core_hardfork_342 = ( maint_time <= HARDFORK_CORE_342_TIME ); // better rounding + auto settle_for_sale = std::min(settle.balance, max_settlement); auto call_debt = call.get_debt(); asset call_receives = std::min(settle_for_sale, call_debt); - asset call_pays = call_receives * match_price; + asset call_pays = call_receives * match_price; // round down here, in favor of call order, for first check + // TODO possible optimization: check need to round up or down first + + // Be here, the call order may be paying nothing. + bool cull_settle_order = false; // whether need to cancel dust settle order + if( call_pays.amount == 0 ) + { + if( maint_time > HARDFORK_CORE_184_TIME ) + { + if( call_receives == call_debt ) // the call order is smaller than or equal to the settle order + { + wlog( "Something for nothing issue (#184, variant C-1) handled at block #${block}", ("block",head_block_num()) ); + call_pays.amount = 1; + } + else + { + if( call_receives == settle.balance ) // the settle order is smaller + { + wlog( "Something for nothing issue (#184, variant C-2) handled at block #${block}", ("block",head_block_num()) ); + cancel_settle_order( settle ); + } + // else do nothing: neither order will be completely filled, perhaps due to max_settlement too small + + return asset( 0, settle.balance.asset_id ); + } + } + else // TODO remove this warning after hard fork core-184 + wlog( "Something for nothing issue (#184, variant C) occurred at block #${block}", ("block",head_block_num()) ); + } + else // the call order is not paying nothing, but still possible it's paying more than minimum required due to rounding + { + if( !before_core_hardfork_342 ) + { + if( call_receives == call_debt ) // the call order is smaller than or equal to the settle order + { + call_pays = call_receives.multiply_and_round_up( match_price ); // round up here, in favor of settle order + // be here, we should have: call_pays <= call_collateral + } + else + { + // be here, call_pays has been rounded down + + // be here, we should have: call_pays <= call_collateral + + if( call_receives == settle.balance ) // the settle order will be completely filled, assuming we need to cull it + cull_settle_order = true; + // else do nothing, since we can't cull the settle order + + call_receives = call_pays.multiply_and_round_up( match_price ); // round up here to mitigate rounding issue (core-342). + // It is important to understand here that the newly + // rounded up call_receives won't be greater than the + // old call_receives. + + if( call_receives == settle.balance ) // the settle order will be completely filled, no need to cull + cull_settle_order = false; + // else do nothing, since we still need to cull the settle order or still can't cull the settle order + } + } + } + asset settle_pays = call_receives; asset settle_receives = call_pays; @@ -282,17 +744,28 @@ asset database::match( const call_order_object& call, * can trigger a black swan. So now we must cancel the forced settlement * object. */ - GRAPHENE_ASSERT( call_pays < call.get_collateral(), black_swan_exception, "" ); + if( before_core_hardfork_342 ) + { + auto call_collateral = call.get_collateral(); + if( call_pays == call_collateral ) // TODO remove warning after hard fork core-342 + wlog( "Incorrectly captured black swan event at block #${block}", ("block",head_block_num()) ); + GRAPHENE_ASSERT( call_pays < call_collateral, black_swan_exception, "" ); - assert( settle_pays == settle_for_sale || call_receives == call.get_debt() ); + assert( settle_pays == settle_for_sale || call_receives == call.get_debt() ); + } + // else do nothing, since black swan event won't happen, and the assertion is no longer true + + fill_call_order( call, call_pays, call_receives, fill_price, true ); // call order is maker + fill_settle_order( settle, settle_pays, settle_receives, fill_price, false ); // force settlement order is taker - fill_order(call, call_pays, call_receives); - fill_order(settle, settle_pays, settle_receives); + if( cull_settle_order ) + cancel_settle_order( settle ); return call_receives; } FC_CAPTURE_AND_RETHROW( (call)(settle)(match_price)(max_settlement) ) } -bool database::fill_order( const limit_order_object& order, const asset& pays, const asset& receives, bool cull_if_small ) +bool database::fill_limit_order( const limit_order_object& order, const asset& pays, const asset& receives, bool cull_if_small, + const price& fill_price, const bool is_maker ) { try { cull_if_small |= (head_block_time() < HARDFORK_555_TIME); @@ -306,7 +779,7 @@ bool database::fill_order( const limit_order_object& order, const asset& pays, c pay_order( seller, receives - issuer_fees, pays ); assert( pays.asset_id != receives.asset_id ); - push_applied_operation( fill_order_operation( order.id, order.seller, pays, receives, issuer_fees ) ); + push_applied_operation( fill_order_operation( order.id, order.seller, pays, receives, issuer_fees, fill_price, is_maker ) ); // conditional because cheap integer comparison may allow us to avoid two expensive modify() and object lookups if( order.deferred_fee > 0 ) @@ -317,6 +790,14 @@ bool database::fill_order( const limit_order_object& order, const asset& pays, c } ); } + if( order.deferred_paid_fee.amount > 0 ) // implies head_block_time() > HARDFORK_CORE_604_TIME + { + const auto& fee_asset_dyn_data = order.deferred_paid_fee.asset_id(*this).dynamic_asset_data_id(*this); + modify( fee_asset_dyn_data, [&](asset_dynamic_data_object& addo) { + addo.accumulated_fees += order.deferred_paid_fee.amount; + }); + } + if( pays == order.amount_for_sale() ) { remove( order ); @@ -327,6 +808,7 @@ bool database::fill_order( const limit_order_object& order, const asset& pays, c modify( order, [&]( limit_order_object& b ) { b.for_sale -= pays.amount; b.deferred_fee = 0; + b.deferred_paid_fee.amount = 0; }); if( cull_if_small ) return maybe_cull_small_order( *this, order ); @@ -335,12 +817,16 @@ bool database::fill_order( const limit_order_object& order, const asset& pays, c } FC_CAPTURE_AND_RETHROW( (order)(pays)(receives) ) } -bool database::fill_order( const call_order_object& order, const asset& pays, const asset& receives ) +bool database::fill_call_order( const call_order_object& order, const asset& pays, const asset& receives, + const price& fill_price, const bool is_maker ) { try { - //idump((pays)(receives)(order)); - FC_ASSERT( order.get_debt().asset_id == receives.asset_id ); - FC_ASSERT( order.get_collateral().asset_id == pays.asset_id ); - FC_ASSERT( order.get_collateral() >= pays ); + FC_ASSERT( order.debt_type() == receives.asset_id ); + FC_ASSERT( order.collateral_type() == pays.asset_id ); + FC_ASSERT( order.collateral >= pays.amount ); + + // TODO pass in mia and bitasset_data for better performance + const asset_object& mia = receives.asset_id(*this); + FC_ASSERT( mia.is_market_issued() ); optional collateral_freed; modify( order, [&]( call_order_object& o ){ @@ -351,44 +837,43 @@ bool database::fill_order( const call_order_object& order, const asset& pays, co collateral_freed = o.get_collateral(); o.collateral = 0; } - }); - const asset_object& mia = receives.asset_id(*this); - assert( mia.is_market_issued() ); + else if( get_dynamic_global_properties().next_maintenance_time > HARDFORK_CORE_343_TIME ) + o.call_price = price::call_price( o.get_debt(), o.get_collateral(), + mia.bitasset_data(*this).current_feed.maintenance_collateral_ratio ); + }); + // update current supply const asset_dynamic_data_object& mia_ddo = mia.dynamic_asset_data_id(*this); - modify( mia_ddo, [&]( asset_dynamic_data_object& ao ){ - //idump((receives)); - ao.current_supply -= receives.amount; + modify( mia_ddo, [&receives]( asset_dynamic_data_object& ao ){ + ao.current_supply -= receives.amount; }); - const account_object& borrower = order.borrower(*this); - if( collateral_freed || pays.asset_id == asset_id_type() ) - { - const account_statistics_object& borrower_statistics = borrower.statistics(*this); - if( collateral_freed ) - adjust_balance(borrower.get_id(), *collateral_freed); - - modify( borrower_statistics, [&]( account_statistics_object& b ){ - if( collateral_freed && collateral_freed->amount > 0 ) - b.total_core_in_orders -= collateral_freed->amount; - if( pays.asset_id == asset_id_type() ) - b.total_core_in_orders -= pays.amount; + // Adjust balance + if( collateral_freed.valid() ) + adjust_balance( order.borrower, *collateral_freed ); - assert( b.total_core_in_orders >= 0 ); - }); + // Update account statistics. We know that order.collateral_type() == pays.asset_id + if( pays.asset_id == asset_id_type() ) + { + modify( get_account_stats_by_owner(order.borrower), [&collateral_freed,&pays]( account_statistics_object& b ){ + b.total_core_in_orders -= pays.amount; + if( collateral_freed.valid() ) + b.total_core_in_orders -= collateral_freed->amount; + }); } - assert( pays.asset_id != receives.asset_id ); - push_applied_operation( fill_order_operation{ order.id, order.borrower, pays, receives, asset(0, pays.asset_id) } ); + push_applied_operation( fill_order_operation( order.id, order.borrower, pays, receives, + asset(0, pays.asset_id), fill_price, is_maker ) ); - if( collateral_freed ) + if( collateral_freed.valid() ) remove( order ); return collateral_freed.valid(); } FC_CAPTURE_AND_RETHROW( (order)(pays)(receives) ) } -bool database::fill_order(const force_settlement_object& settle, const asset& pays, const asset& receives) +bool database::fill_settle_order( const force_settlement_object& settle, const asset& pays, const asset& receives, + const price& fill_price, const bool is_maker ) { try { bool filled = false; @@ -406,7 +891,7 @@ bool database::fill_order(const force_settlement_object& settle, const asset& pa adjust_balance(settle.owner, receives - issuer_fees); assert( pays.asset_id != receives.asset_id ); - push_applied_operation( fill_order_operation{ settle.id, settle.owner, pays, receives, issuer_fees } ); + push_applied_operation( fill_order_operation( settle.id, settle.owner, pays, receives, issuer_fees, fill_price, is_maker ) ); if (filled) remove(settle); @@ -423,23 +908,29 @@ bool database::fill_order(const force_settlement_object& settle, const asset& pa * @param mia - the market issued asset that should be called. * @param enable_black_swan - when adjusting collateral, triggering a black swan is invalid and will throw * if enable_black_swan is not set to true. + * @param for_new_limit_order - true if this function is called when matching call orders with a new limit order + * @param bitasset_ptr - an optional pointer to the bitasset_data object of the asset * * @return true if a margin call was executed. */ -bool database::check_call_orders(const asset_object& mia, bool enable_black_swan) +bool database::check_call_orders( const asset_object& mia, bool enable_black_swan, bool for_new_limit_order, + const asset_bitasset_data_object* bitasset_ptr ) { try { + const auto& dyn_prop = get_dynamic_global_properties(); + auto maint_time = dyn_prop.next_maintenance_time; + if( for_new_limit_order ) + FC_ASSERT( maint_time <= HARDFORK_CORE_625_TIME ); // `for_new_limit_order` is only true before HF 338 / 625 + if( !mia.is_market_issued() ) return false; - if( check_for_blackswan( mia, enable_black_swan ) ) + const asset_bitasset_data_object& bitasset = ( bitasset_ptr ? *bitasset_ptr : mia.bitasset_data(*this) ); + + if( check_for_blackswan( mia, enable_black_swan, &bitasset ) ) return false; - const asset_bitasset_data_object& bitasset = mia.bitasset_data(*this); if( bitasset.is_prediction_market ) return false; if( bitasset.current_feed.settlement_price.is_null() ) return false; - const call_order_index& call_index = get_index_type(); - const auto& call_price_index = call_index.indices().get(); - const limit_order_index& limit_index = get_index_type(); const auto& limit_price_index = limit_index.indices().get(); @@ -456,6 +947,9 @@ bool database::check_call_orders(const asset_object& mia, bool enable_black_swan if( limit_itr == limit_end ) return false; + const call_order_index& call_index = get_index_type(); + const auto& call_price_index = call_index.indices().get(); + auto call_min = price::min( bitasset.options.short_backing_asset, mia.id ); auto call_max = price::max( bitasset.options.short_backing_asset, mia.id ); auto call_itr = call_price_index.lower_bound( call_min ); @@ -464,84 +958,132 @@ bool database::check_call_orders(const asset_object& mia, bool enable_black_swan bool filled_limit = false; bool margin_called = false; - while( !check_for_blackswan( mia, enable_black_swan ) && call_itr != call_end ) + auto head_time = head_block_time(); + auto head_num = head_block_num(); + + bool before_hardfork_615 = ( head_time < HARDFORK_615_TIME ); + bool after_hardfork_436 = ( head_time > HARDFORK_436_TIME ); + + bool before_core_hardfork_184 = ( maint_time <= HARDFORK_CORE_184_TIME ); // something-for-nothing + bool before_core_hardfork_342 = ( maint_time <= HARDFORK_CORE_342_TIME ); // better rounding + bool before_core_hardfork_343 = ( maint_time <= HARDFORK_CORE_343_TIME ); // update call_price after partially filled + bool before_core_hardfork_453 = ( maint_time <= HARDFORK_CORE_453_TIME ); // multiple matching issue + bool before_core_hardfork_606 = ( maint_time <= HARDFORK_CORE_606_TIME ); // feed always trigger call + bool before_core_hardfork_834 = ( maint_time <= HARDFORK_CORE_834_TIME ); // target collateral ratio option + + while( !check_for_blackswan( mia, enable_black_swan, &bitasset ) // TODO perhaps improve performance by passing in iterators + && call_itr != call_end + && limit_itr != limit_end ) { bool filled_call = false; - price match_price; - asset usd_for_sale; - if( limit_itr != limit_end ) - { - assert( limit_itr != limit_price_index.end() ); - match_price = limit_itr->sell_price; - usd_for_sale = limit_itr->amount_for_sale(); - } - else return margin_called; - - match_price.validate(); - // would be margin called, but there is no matching order #436 - bool feed_protected = ( bitasset.current_feed.settlement_price > ~call_itr->call_price ); - if( feed_protected && (head_block_time() > HARDFORK_436_TIME) ) - return margin_called; + const call_order_object& call_order = *call_itr; - // would be margin called, but there is no matching order - if( match_price > ~call_itr->call_price ) + // Feed protected (don't call if CR>MCR) https://github.com/cryptonomex/graphene/issues/436 + if( after_hardfork_436 && bitasset.current_feed.settlement_price > ~call_order.call_price ) return margin_called; - if( feed_protected ) - { - ilog( "Feed protected margin call executing (HARDFORK_436_TIME not here yet)" ); - idump( (*call_itr) ); - idump( (*limit_itr) ); - } + const limit_order_object& limit_order = *limit_itr; + price match_price = limit_order.sell_price; + // There was a check `match_price.validate();` here, which is removed now because it always passes - // idump((*call_itr)); - // idump((*limit_itr)); - - // ilog( "match_price <= ~call_itr->call_price performing a margin call" ); + // Old rule: margin calls can only buy high https://github.com/bitshares/bitshares-core/issues/606 + if( before_core_hardfork_606 && match_price > ~call_order.call_price ) + return margin_called; margin_called = true; - auto usd_to_buy = call_itr->get_debt(); - - if( usd_to_buy * match_price > call_itr->get_collateral() ) + auto usd_to_buy = call_order.get_debt(); + if( usd_to_buy * match_price > call_order.get_collateral() ) { - elog( "black swan detected" ); + elog( "black swan detected on asset ${symbol} (${id}) at block ${b}", + ("id",mia.id)("symbol",mia.symbol)("b",head_num) ); edump((enable_black_swan)); FC_ASSERT( enable_black_swan ); globally_settle_asset(mia, bitasset.current_feed.settlement_price ); return true; } + if( !before_core_hardfork_834 ) + usd_to_buy.amount = call_order.get_max_debt_to_cover( match_price, + bitasset.current_feed.settlement_price, + bitasset.current_feed.maintenance_collateral_ratio ); + + asset usd_for_sale = limit_order.amount_for_sale(); asset call_pays, call_receives, order_pays, order_receives; - if( usd_to_buy >= usd_for_sale ) + if( usd_to_buy > usd_for_sale ) { // fill order - call_receives = usd_for_sale; - order_receives = usd_for_sale * match_price; - call_pays = order_receives; - order_pays = usd_for_sale; + order_receives = usd_for_sale * match_price; // round down, in favor of call order + + // Be here, the limit order won't be paying something for nothing, since if it would, it would have + // been cancelled elsewhere already (a maker limit order won't be paying something for nothing): + // * after hard fork core-625, the limit order will be always a maker if entered this function; + // * before hard fork core-625, + // * when the limit order is a taker, it could be paying something for nothing only when + // the call order is smaller and is too small + // * when the limit order is a maker, it won't be paying something for nothing + if( order_receives.amount == 0 ) // TODO this should not happen. remove the warning after confirmed + { + if( before_core_hardfork_184 ) + wlog( "Something for nothing issue (#184, variant D-1) occurred at block #${block}", ("block",head_num) ); + else + wlog( "Something for nothing issue (#184, variant D-2) occurred at block #${block}", ("block",head_num) ); + } + + if( before_core_hardfork_342 ) + call_receives = usd_for_sale; + else + // The remaining amount in the limit order would be too small, + // so we should cull the order in fill_limit_order() below. + // The order would receive 0 even at `match_price`, so it would receive 0 at its own price, + // so calling maybe_cull_small() will always cull it. + call_receives = order_receives.multiply_and_round_up( match_price ); filled_limit = true; - filled_call = (usd_to_buy == usd_for_sale); + } else { // fill call call_receives = usd_to_buy; - order_receives = usd_to_buy * match_price; - call_pays = order_receives; - order_pays = usd_to_buy; - filled_call = true; + if( before_core_hardfork_342 ) + { + order_receives = usd_to_buy * match_price; // round down, in favor of call order + + // Be here, the limit order would be paying something for nothing + if( order_receives.amount == 0 ) // TODO remove warning after hard fork core-342 + wlog( "Something for nothing issue (#184, variant D) occurred at block #${block}", ("block",head_num) ); + } + else + order_receives = usd_to_buy.multiply_and_round_up( match_price ); // round up, in favor of limit order + + filled_call = true; // this is safe, since BSIP38 (hard fork core-834) depends on BSIP31 (hard fork core-343) + + if( usd_to_buy == usd_for_sale ) + filled_limit = true; + else if( filled_limit && maint_time <= HARDFORK_CORE_453_TIME ) // TODO remove warning after hard fork core-453 + { + wlog( "Multiple limit match problem (issue 453) occurred at block #${block}", ("block",head_num) ); + if( before_hardfork_615 ) + _issue_453_affected_assets.insert( bitasset.asset_id ); + } } - FC_ASSERT( filled_call || filled_limit ); + call_pays = order_receives; + order_pays = call_receives; - auto old_call_itr = call_itr; - if( filled_call ) ++call_itr; - fill_order(*old_call_itr, call_pays, call_receives); + if( filled_call && before_core_hardfork_343 ) + ++call_itr; + // when for_new_limit_order is true, the call order is maker, otherwise the call order is taker + fill_call_order( call_order, call_pays, call_receives, match_price, for_new_limit_order ); + if( !before_core_hardfork_343 ) + call_itr = call_price_index.lower_bound( call_min ); - auto old_limit_itr = filled_limit ? limit_itr++ : limit_itr; - fill_order(*old_limit_itr, order_pays, order_receives, true); + auto next_limit_itr = std::next( limit_itr ); + // when for_new_limit_order is true, the limit order is taker, otherwise the limit order is maker + bool really_filled = fill_limit_order( limit_order, order_pays, order_receives, true, match_price, !for_new_limit_order ); + if( really_filled || ( filled_limit && before_core_hardfork_453 ) ) + limit_itr = next_limit_itr; - } // whlie call_itr != call_end + } // while call_itr != call_end return margin_called; } FC_CAPTURE_AND_RETHROW() } diff --git a/libraries/chain/db_notify.cpp b/libraries/chain/db_notify.cpp new file mode 100644 index 0000000000..39ff36ff4f --- /dev/null +++ b/libraries/chain/db_notify.cpp @@ -0,0 +1,484 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace fc; +using namespace graphene::chain; + +// TODO: Review all of these, especially no-ops +struct get_impacted_account_visitor +{ + flat_set& _impacted; + get_impacted_account_visitor( flat_set& impact ):_impacted(impact) {} + typedef void result_type; + + void operator()( const transfer_operation& op ) + { + _impacted.insert( op.to ); + _impacted.insert( op.fee_payer() ); // from + } + void operator()( const asset_claim_fees_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + } + void operator()( const asset_claim_pool_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + } + void operator()( const limit_order_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // seller + } + void operator()( const limit_order_cancel_operation& op ) + { + _impacted.insert( op.fee_payer() ); // fee_paying_account + } + void operator()( const call_order_update_operation& op ) + { + _impacted.insert( op.fee_payer() ); // funding_account + } + void operator()( const bid_collateral_operation& op ) + { + _impacted.insert( op.fee_payer() ); // bidder + } + void operator()( const fill_order_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account_id + } + void operator()( const execute_bid_operation& op ) + { + _impacted.insert( op.fee_payer() ); // bidder + } + void operator()( const account_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // registrar + _impacted.insert( op.referrer ); + add_authority_accounts( _impacted, op.owner ); + add_authority_accounts( _impacted, op.active ); + } + void operator()( const account_update_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + if( op.owner ) + add_authority_accounts( _impacted, *(op.owner) ); + if( op.active ) + add_authority_accounts( _impacted, *(op.active) ); + } + void operator()( const account_whitelist_operation& op ) + { + _impacted.insert( op.fee_payer() ); // authorizing_account + _impacted.insert( op.account_to_list ); + } + void operator()( const account_upgrade_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account_to_upgrade + } + void operator()( const account_transfer_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account_id + } + void operator()( const asset_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + } + void operator()( const asset_update_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + if( op.new_issuer ) + _impacted.insert( *(op.new_issuer) ); + } + void operator()( const asset_update_issuer_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + _impacted.insert( op.new_issuer ); + } + void operator()( const asset_update_bitasset_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + } + void operator()( const asset_update_feed_producers_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + } + void operator()( const asset_issue_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + _impacted.insert( op.issue_to_account ); + } + void operator()( const asset_reserve_operation& op ) + { + _impacted.insert( op.fee_payer() ); // payer + } + void operator()( const asset_fund_fee_pool_operation& op ) + { + _impacted.insert( op.fee_payer() ); // from_account + } + void operator()( const asset_settle_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + } + void operator()( const asset_global_settle_operation& op ) + { + _impacted.insert( op.fee_payer() ); // issuer + } + void operator()( const asset_publish_feed_operation& op ) + { + _impacted.insert( op.fee_payer() ); // publisher + } + void operator()( const witness_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // witness_account + } + void operator()( const witness_update_operation& op ) + { + _impacted.insert( op.fee_payer() ); // witness_account + } + void operator()( const proposal_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // fee_paying_account + vector other; + for( const auto& proposed_op : op.proposed_ops ) + operation_get_required_authorities( proposed_op.op, _impacted, _impacted, other ); + for( auto& o : other ) + add_authority_accounts( _impacted, o ); + } + void operator()( const proposal_update_operation& op ) + { + _impacted.insert( op.fee_payer() ); // fee_paying_account + } + void operator()( const proposal_delete_operation& op ) + { + _impacted.insert( op.fee_payer() ); // fee_paying_account + } + void operator()( const withdraw_permission_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // withdraw_from_account + _impacted.insert( op.authorized_account ); + } + void operator()( const withdraw_permission_update_operation& op ) + { + _impacted.insert( op.fee_payer() ); // withdraw_from_account + _impacted.insert( op.authorized_account ); + } + void operator()( const withdraw_permission_claim_operation& op ) + { + _impacted.insert( op.fee_payer() ); // withdraw_to_account + _impacted.insert( op.withdraw_from_account ); + } + void operator()( const withdraw_permission_delete_operation& op ) + { + _impacted.insert( op.fee_payer() ); // withdraw_from_account + _impacted.insert( op.authorized_account ); + } + void operator()( const committee_member_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // committee_member_account + } + void operator()( const committee_member_update_operation& op ) + { + _impacted.insert( op.fee_payer() ); // committee_member_account + } + void operator()( const committee_member_update_global_parameters_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account_id_type() + } + void operator()( const vesting_balance_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // creator + _impacted.insert( op.owner ); + } + void operator()( const vesting_balance_withdraw_operation& op ) + { + _impacted.insert( op.fee_payer() ); // owner + } + void operator()( const worker_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // owner + } + void operator()( const custom_operation& op ) + { + _impacted.insert( op.fee_payer() ); // payer + } + void operator()( const assert_operation& op ) + { + _impacted.insert( op.fee_payer() ); // fee_paying_account + } + void operator()( const balance_claim_operation& op ) + { + _impacted.insert( op.fee_payer() ); // deposit_to_account + } + void operator()( const override_transfer_operation& op ) + { + _impacted.insert( op.to ); + _impacted.insert( op.from ); + _impacted.insert( op.fee_payer() ); // issuer + } + void operator()( const transfer_to_blind_operation& op ) + { + _impacted.insert( op.fee_payer() ); // from + for( const auto& out : op.outputs ) + add_authority_accounts( _impacted, out.owner ); + } + void operator()( const blind_transfer_operation& op ) + { + _impacted.insert( op.fee_payer() ); // GRAPHENE_TEMP_ACCOUNT + for( const auto& in : op.inputs ) + add_authority_accounts( _impacted, in.owner ); + for( const auto& out : op.outputs ) + add_authority_accounts( _impacted, out.owner ); + } + void operator()( const transfer_from_blind_operation& op ) + { + _impacted.insert( op.fee_payer() ); // GRAPHENE_TEMP_ACCOUNT + _impacted.insert( op.to ); + for( const auto& in : op.inputs ) + add_authority_accounts( _impacted, in.owner ); + } + void operator()( const asset_settle_cancel_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + } + void operator()( const fba_distribute_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account_id + } +}; + +void graphene::chain::operation_get_impacted_accounts( const operation& op, flat_set& result ) +{ + get_impacted_account_visitor vtor = get_impacted_account_visitor( result ); + op.visit( vtor ); +} + +void graphene::chain::transaction_get_impacted_accounts( const transaction& tx, flat_set& result ) +{ + for( const auto& op : tx.operations ) + operation_get_impacted_accounts( op, result ); +} + +void get_relevant_accounts( const object* obj, flat_set& accounts ) +{ + if( obj->id.space() == protocol_ids ) + { + switch( (object_type)obj->id.type() ) + { + case null_object_type: + case base_object_type: + case OBJECT_TYPE_COUNT: + return; + case account_object_type:{ + accounts.insert( obj->id ); + break; + } case asset_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->issuer ); + break; + } case force_settlement_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->owner ); + break; + } case committee_member_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->committee_member_account ); + break; + } case witness_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->witness_account ); + break; + } case limit_order_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->seller ); + break; + } case call_order_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->borrower ); + break; + } case custom_object_type:{ + break; + } case proposal_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + transaction_get_impacted_accounts( aobj->proposed_transaction, accounts ); + break; + } case operation_history_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + operation_get_impacted_accounts( aobj->op, accounts ); + break; + } case withdraw_permission_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->withdraw_from_account ); + accounts.insert( aobj->authorized_account ); + break; + } case vesting_balance_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->owner ); + break; + } case worker_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->worker_account ); + break; + } case balance_object_type:{ + /** these are free from any accounts */ + break; + } + } + } + else if( obj->id.space() == implementation_ids ) + { + switch( (impl_object_type)obj->id.type() ) + { + case impl_global_property_object_type: + break; + case impl_dynamic_global_property_object_type: + break; + case impl_reserved0_object_type: + break; + case impl_asset_dynamic_data_type: + break; + case impl_asset_bitasset_data_type: + break; + case impl_account_balance_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->owner ); + break; + } case impl_account_statistics_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->owner ); + break; + } case impl_transaction_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + transaction_get_impacted_accounts( aobj->trx, accounts ); + break; + } case impl_blinded_balance_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + for( const auto& a : aobj->owner.account_auths ) + accounts.insert( a.first ); + break; + } case impl_block_summary_object_type: + break; + case impl_account_transaction_history_object_type: { + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->account ); + break; + } case impl_chain_property_object_type: + break; + case impl_witness_schedule_object_type: + break; + case impl_budget_record_object_type: + break; + case impl_special_authority_object_type: + break; + case impl_buyback_object_type: + break; + case impl_fba_accumulator_object_type: + break; + case impl_collateral_bid_object_type:{ + const auto& aobj = dynamic_cast(obj); + FC_ASSERT( aobj != nullptr ); + accounts.insert( aobj->bidder ); + break; + } + } + } +} // end get_relevant_accounts( const object* obj, flat_set& accounts ) + +namespace graphene { namespace chain { + +void database::notify_applied_block( const signed_block& block ) +{ + GRAPHENE_TRY_NOTIFY( applied_block, block ) +} + +void database::notify_on_pending_transaction( const signed_transaction& tx ) +{ + GRAPHENE_TRY_NOTIFY( on_pending_transaction, tx ) +} + +void database::notify_changed_objects() +{ try { + if( _undo_db.enabled() ) + { + const auto& head_undo = _undo_db.head(); + + // New + if( !new_objects.empty() ) + { + vector new_ids; new_ids.reserve(head_undo.new_ids.size()); + flat_set new_accounts_impacted; + for( const auto& item : head_undo.new_ids ) + { + new_ids.push_back(item); + auto obj = find_object(item); + if(obj != nullptr) + get_relevant_accounts(obj, new_accounts_impacted); + } + + if( new_ids.size() ) + GRAPHENE_TRY_NOTIFY( new_objects, new_ids, new_accounts_impacted) + } + + // Changed + if( !changed_objects.empty() ) + { + vector changed_ids; changed_ids.reserve(head_undo.old_values.size()); + flat_set changed_accounts_impacted; + for( const auto& item : head_undo.old_values ) + { + changed_ids.push_back(item.first); + get_relevant_accounts(item.second.get(), changed_accounts_impacted); + } + + if( changed_ids.size() ) + GRAPHENE_TRY_NOTIFY( changed_objects, changed_ids, changed_accounts_impacted) + } + + // Removed + if( !removed_objects.empty() ) + { + vector removed_ids; removed_ids.reserve( head_undo.removed.size() ); + vector removed; removed.reserve( head_undo.removed.size() ); + flat_set removed_accounts_impacted; + for( const auto& item : head_undo.removed ) + { + removed_ids.emplace_back( item.first ); + auto obj = item.second.get(); + removed.emplace_back( obj ); + get_relevant_accounts(obj, removed_accounts_impacted); + } + + if( removed_ids.size() ) + GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted) + } + } +} FC_CAPTURE_AND_LOG( (0) ) } + +} } diff --git a/libraries/chain/db_update.cpp b/libraries/chain/db_update.cpp index 2219136e1c..9a5bcad1eb 100644 --- a/libraries/chain/db_update.cpp +++ b/libraries/chain/db_update.cpp @@ -40,34 +40,16 @@ namespace graphene { namespace chain { -void database::update_global_dynamic_data( const signed_block& b ) +void database::update_global_dynamic_data( const signed_block& b, const uint32_t missed_blocks ) { - const dynamic_global_property_object& _dgp = - dynamic_global_property_id_type(0)(*this); - - uint32_t missed_blocks = get_slot_at_time( b.timestamp ); - assert( missed_blocks != 0 ); - missed_blocks--; - for( uint32_t i = 0; i < missed_blocks; ++i ) { - const auto& witness_missed = get_scheduled_witness( i+1 )(*this); - if( witness_missed.id != b.witness ) { - /* - const auto& witness_account = witness_missed.witness_account(*this); - if( (fc::time_point::now() - b.timestamp) < fc::seconds(30) ) - wlog( "Witness ${name} missed block ${n} around ${t}", ("name",witness_account.name)("n",b.block_num())("t",b.timestamp) ); - */ - - modify( witness_missed, [&]( witness_object& w ) { - w.total_missed++; - }); - } - } + const dynamic_global_property_object& _dgp = get_dynamic_global_properties(); // dynamic global properties updating - modify( _dgp, [&]( dynamic_global_property_object& dgp ){ - if( BOOST_UNLIKELY( b.block_num() == 1 ) ) + modify( _dgp, [&b,this,missed_blocks]( dynamic_global_property_object& dgp ){ + const uint32_t block_num = b.block_num(); + if( BOOST_UNLIKELY( block_num == 1 ) ) dgp.recently_missed_count = 0; - else if( _checkpoints.size() && _checkpoints.rbegin()->first >= b.block_num() ) + else if( _checkpoints.size() && _checkpoints.rbegin()->first >= block_num ) dgp.recently_missed_count = 0; else if( missed_blocks ) dgp.recently_missed_count += GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT*missed_blocks; @@ -76,7 +58,7 @@ void database::update_global_dynamic_data( const signed_block& b ) else if( dgp.recently_missed_count > 0 ) dgp.recently_missed_count--; - dgp.head_block_number = b.block_num(); + dgp.head_block_number = block_num; dgp.head_block_id = b.id(); dgp.time = b.timestamp; dgp.current_witness = b.witness; @@ -126,6 +108,7 @@ void database::update_last_irreversible_block() const global_property_object& gpo = get_global_properties(); const dynamic_global_property_object& dpo = get_dynamic_global_properties(); + // TODO for better performance, move this to db_maint, because only need to do it once per maintenance interval vector< const witness_object* > wit_objs; wit_objs.reserve( gpo.active_witnesses.size() ); for( const witness_id_type& wid : gpo.active_witnesses ) @@ -133,9 +116,10 @@ void database::update_last_irreversible_block() static_assert( GRAPHENE_IRREVERSIBLE_THRESHOLD > 0, "irreversible threshold must be nonzero" ); - // 1 1 1 2 2 2 2 2 2 2 -> 2 .7*10 = 7 + // 1 1 1 2 2 2 2 2 2 2 -> 2 .3*10 = 3 // 1 1 1 1 1 1 1 2 2 2 -> 1 // 3 3 3 3 3 3 3 3 3 3 -> 3 + // 3 3 3 4 4 4 4 4 4 4 -> 4 size_t offset = ((GRAPHENE_100_PERCENT - GRAPHENE_IRREVERSIBLE_THRESHOLD) * wit_objs.size() / GRAPHENE_100_PERCENT); @@ -162,8 +146,8 @@ void database::clear_expired_transactions() //Transactions must have expired by at least two forking windows in order to be removed. auto& transaction_idx = static_cast(get_mutable_index(implementation_ids, impl_transaction_object_type)); const auto& dedupe_index = transaction_idx.indices().get(); - while( (!dedupe_index.empty()) && (head_block_time() > dedupe_index.rbegin()->trx.expiration) ) - transaction_idx.remove(*dedupe_index.rbegin()); + while( (!dedupe_index.empty()) && (head_block_time() > dedupe_index.begin()->trx.expiration) ) + transaction_idx.remove(*dedupe_index.begin()); } FC_CAPTURE_AND_RETHROW() } void database::clear_expired_proposals() @@ -197,11 +181,12 @@ void database::clear_expired_proposals() * * A black swan occurs if MAX(HB,SP) <= LC */ -bool database::check_for_blackswan( const asset_object& mia, bool enable_black_swan ) +bool database::check_for_blackswan( const asset_object& mia, bool enable_black_swan, + const asset_bitasset_data_object* bitasset_ptr ) { if( !mia.is_market_issued() ) return false; - const asset_bitasset_data_object& bitasset = mia.bitasset_data(*this); + const asset_bitasset_data_object& bitasset = ( bitasset_ptr ? *bitasset_ptr : mia.bitasset_data(*this) ); if( bitasset.has_settlement() ) return true; // already force settled auto settle_price = bitasset.current_feed.settlement_price; if( settle_price.is_null() ) return false; // no feed @@ -209,6 +194,21 @@ bool database::check_for_blackswan( const asset_object& mia, bool enable_black_s const call_order_index& call_index = get_index_type(); const auto& call_price_index = call_index.indices().get(); + auto call_min = price::min( bitasset.options.short_backing_asset, mia.id ); + auto call_max = price::max( bitasset.options.short_backing_asset, mia.id ); + auto call_itr = call_price_index.lower_bound( call_min ); + auto call_end = call_price_index.upper_bound( call_max ); + + if( call_itr == call_end ) return false; // no call orders + + price highest = settle_price; + + const auto& dyn_prop = get_dynamic_global_properties(); + auto maint_time = dyn_prop.next_maintenance_time; + if( maint_time > HARDFORK_CORE_338_TIME ) + // due to #338, we won't check for black swan on incoming limit order, so need to check with MSSP here + highest = bitasset.current_feed.max_short_squeeze_price(); + const limit_order_index& limit_index = get_index_type(); const auto& limit_price_index = limit_index.indices().get(); @@ -217,38 +217,37 @@ bool database::check_for_blackswan( const asset_object& mia, bool enable_black_s // stop when limit orders are selling too little USD for too much CORE auto lowest_possible_bid = price::min( mia.id, bitasset.options.short_backing_asset ); - assert( highest_possible_bid.base.asset_id == lowest_possible_bid.base.asset_id ); + FC_ASSERT( highest_possible_bid.base.asset_id == lowest_possible_bid.base.asset_id ); // NOTE limit_price_index is sorted from greatest to least auto limit_itr = limit_price_index.lower_bound( highest_possible_bid ); auto limit_end = limit_price_index.upper_bound( lowest_possible_bid ); - auto call_min = price::min( bitasset.options.short_backing_asset, mia.id ); - auto call_max = price::max( bitasset.options.short_backing_asset, mia.id ); - auto call_itr = call_price_index.lower_bound( call_min ); - auto call_end = call_price_index.upper_bound( call_max ); - - if( call_itr == call_end ) return false; // no call orders - - price highest = settle_price; if( limit_itr != limit_end ) { - assert( settle_price.base.asset_id == limit_itr->sell_price.base.asset_id ); - highest = std::max( limit_itr->sell_price, settle_price ); + FC_ASSERT( highest.base.asset_id == limit_itr->sell_price.base.asset_id ); + highest = std::max( limit_itr->sell_price, highest ); } auto least_collateral = call_itr->collateralization(); if( ~least_collateral >= highest ) { - elog( "Black Swan detected: \n" + wdump( (*call_itr) ); + elog( "Black Swan detected on asset ${symbol} (${id}) at block ${b}: \n" " Least collateralized call: ${lc} ${~lc}\n" // " Highest Bid: ${hb} ${~hb}\n" - " Settle Price: ${sp} ${~sp}\n" - " Max: ${h} ${~h}\n", + " Settle Price: ${~sp} ${sp}\n" + " Max: ${~h} ${h}\n", + ("id",mia.id)("symbol",mia.symbol)("b",head_block_num()) ("lc",least_collateral.to_real())("~lc",(~least_collateral).to_real()) // ("hb",limit_itr->sell_price.to_real())("~hb",(~limit_itr->sell_price).to_real()) ("sp",settle_price.to_real())("~sp",(~settle_price).to_real()) ("h",highest.to_real())("~h",(~highest).to_real()) ); + edump((enable_black_swan)); FC_ASSERT( enable_black_swan, "Black swan was detected during a margin update which is not allowed to trigger a blackswan" ); - globally_settle_asset(mia, ~least_collateral ); + if( maint_time > HARDFORK_CORE_338_TIME && ~least_collateral <= settle_price ) + // global settle at feed price if possible + globally_settle_asset(mia, settle_price ); + else + globally_settle_asset(mia, ~least_collateral ); return true; } return false; @@ -256,33 +255,32 @@ bool database::check_for_blackswan( const asset_object& mia, bool enable_black_s void database::clear_expired_orders() { try { - detail::with_skip_flags( *this, - get_node_properties().skip_flags | skip_authority_check, [&](){ - transaction_evaluation_state cancel_context(this); - //Cancel expired limit orders + auto head_time = head_block_time(); + auto maint_time = get_dynamic_global_properties().next_maintenance_time; + + bool before_core_hardfork_184 = ( maint_time <= HARDFORK_CORE_184_TIME ); // something-for-nothing + bool before_core_hardfork_342 = ( maint_time <= HARDFORK_CORE_342_TIME ); // better rounding + bool before_core_hardfork_606 = ( maint_time <= HARDFORK_CORE_606_TIME ); // feed always trigger call + auto& limit_index = get_index_type().indices().get(); - while( !limit_index.empty() && limit_index.begin()->expiration <= head_block_time() ) + while( !limit_index.empty() && limit_index.begin()->expiration <= head_time ) { - limit_order_cancel_operation canceler; const limit_order_object& order = *limit_index.begin(); - canceler.fee_paying_account = order.seller; - canceler.order = order.id; - canceler.fee = current_fee_schedule().calculate_fee( canceler ); - if( canceler.fee.amount > order.deferred_fee ) + auto base_asset = order.sell_price.base.asset_id; + auto quote_asset = order.sell_price.quote.asset_id; + cancel_limit_order( order ); + if( before_core_hardfork_606 ) { - // Cap auto-cancel fees at deferred_fee; see #549 - wlog( "At block ${b}, fee for clearing expired order ${oid} was capped at deferred_fee ${fee}", ("b", head_block_num())("oid", order.id)("fee", order.deferred_fee) ); - canceler.fee = asset( order.deferred_fee, asset_id_type() ); + // check call orders + // Comments below are copied from limit_order_cancel_evaluator::do_apply(...) + // Possible optimization: order can be called by cancelling a limit order + // if the canceled order was at the top of the book. + // Do I need to check calls in both assets? + check_call_orders( base_asset( *this ) ); + check_call_orders( quote_asset( *this ) ); } - // we know the fee for this op is set correctly since it is set by the chain. - // this allows us to avoid a hung chain: - // - if #549 case above triggers - // - if the fee is incorrect, which may happen due to #435 (although since cancel is a fixed-fee op, it shouldn't) - cancel_context.skip_fee_schedule_check = true; - apply_operation(cancel_context, canceler); } - }); //Process expired force settlement orders auto& settlement_index = get_index_type().indices().get(); @@ -290,9 +288,12 @@ void database::clear_expired_orders() { asset_id_type current_asset = settlement_index.begin()->settlement_asset_id(); asset max_settlement_volume; + price settlement_fill_price; + price settlement_price; + bool current_asset_finished = false; bool extra_dump = false; - auto next_asset = [¤t_asset, &settlement_index, &extra_dump] { + auto next_asset = [¤t_asset, ¤t_asset_finished, &settlement_index, &extra_dump] { auto bound = settlement_index.upper_bound(current_asset); if( bound == settlement_index.end() ) { @@ -307,6 +308,7 @@ void database::clear_expired_orders() ilog( "next_asset returning true, bound is ${b}", ("b", *bound) ); } current_asset = bound->settlement_asset_id(); + current_asset_finished = false; return true; }; @@ -335,12 +337,12 @@ void database::clear_expired_orders() if( mia.has_settlement() ) { ilog( "Canceling a force settlement because of black swan" ); - cancel_order( order ); + cancel_settle_order( order ); continue; } // Has this order not reached its settlement date? - if( order.settlement_date > head_block_time() ) + if( order.settlement_date > head_time ) { if( next_asset() ) { @@ -357,12 +359,14 @@ void database::clear_expired_orders() { ilog("Canceling a force settlement in ${asset} because settlement price is null", ("asset", mia_object.symbol)); - cancel_order(order); + cancel_settle_order(order); continue; } if( max_settlement_volume.asset_id != current_asset ) max_settlement_volume = mia_object.amount(mia.max_force_settlement_volume(mia_object.dynamic_data(*this).current_supply)); - if( mia.force_settled_volume >= max_settlement_volume.amount ) + // When current_asset_finished is true, this would be the 2nd time processing the same order. + // In this case, we move to the next asset. + if( mia.force_settled_volume >= max_settlement_volume.amount || current_asset_finished ) { /* ilog("Skipping force settlement in ${asset}; settled ${settled_volume} / ${max_volume}", @@ -380,13 +384,23 @@ void database::clear_expired_orders() break; } - auto& pays = order.balance; - auto receives = (order.balance * mia.current_feed.settlement_price); - receives.amount = (fc::uint128_t(receives.amount.value) * - (GRAPHENE_100_PERCENT - mia.options.force_settlement_offset_percent) / GRAPHENE_100_PERCENT).to_uint64(); - assert(receives <= order.balance * mia.current_feed.settlement_price); + if( settlement_fill_price.base.asset_id != current_asset ) // only calculate once per asset + settlement_fill_price = mia.current_feed.settlement_price + / ratio_type( GRAPHENE_100_PERCENT - mia.options.force_settlement_offset_percent, + GRAPHENE_100_PERCENT ); - price settlement_price = pays / receives; + if( before_core_hardfork_342 ) + { + auto& pays = order.balance; + auto receives = (order.balance * mia.current_feed.settlement_price); + receives.amount = ( fc::uint128_t(receives.amount.value) * + (GRAPHENE_100_PERCENT - mia.options.force_settlement_offset_percent) / + GRAPHENE_100_PERCENT ).to_uint64(); + assert(receives <= order.balance * mia.current_feed.settlement_price); + settlement_price = pays / receives; + } + else if( settlement_price.base.asset_id != current_asset ) // only calculate once per asset + settlement_price = settlement_fill_price; auto& call_index = get_index_type().indices().get(); asset settled = mia_object.amount(mia.force_settled_volume); @@ -402,15 +416,33 @@ void database::clear_expired_orders() if( order.balance.amount == 0 ) { wlog( "0 settlement detected" ); - cancel_order( order ); + cancel_settle_order( order ); break; } try { - settled += match(*itr, order, settlement_price, max_settlement); + asset new_settled = match(*itr, order, settlement_price, max_settlement, settlement_fill_price); + if( !before_core_hardfork_184 && new_settled.amount == 0 ) // unable to fill this settle order + { + if( find_object( order_id ) ) // the settle order hasn't been cancelled + current_asset_finished = true; + break; + } + settled += new_settled; + // before hard fork core-342, `new_settled > 0` is always true, we'll have: + // * call order is completely filled (thus itr will change in next loop), or + // * settle order is completely filled (thus find_object(order_id) will be false so will break out), or + // * reached max_settlement_volume limit (thus new_settled == max_settlement so will break out). + // + // after hard fork core-342, if new_settled > 0, we'll have: + // * call order is completely filled (thus itr will change in next loop), or + // * settle order is completely filled (thus find_object(order_id) will be false so will break out), or + // * reached max_settlement_volume limit, but it's possible that new_settled < max_settlement, + // in this case, new_settled will be zero in next iteration of the loop, so no need to check here. } catch ( const black_swan_exception& e ) { - wlog( "black swan detected: ${e}", ("e", e.to_detail_string() ) ); - cancel_order( order ); + wlog( "Cancelling a settle_order since it may trigger a black swan: ${o}, ${e}", + ("o", order)("e", e.to_detail_string()) ); + cancel_settle_order( order ); break; } } @@ -426,32 +458,84 @@ void database::clear_expired_orders() void database::update_expired_feeds() { - auto& asset_idx = get_index_type().indices().get(); - auto itr = asset_idx.lower_bound( true /** market issued */ ); - while( itr != asset_idx.end() ) + const auto head_time = head_block_time(); + bool after_hardfork_615 = ( head_time >= HARDFORK_615_TIME ); + + const auto& idx = get_index_type().indices().get(); + auto itr = idx.begin(); + while( itr != idx.end() && itr->feed_is_expired( head_time ) ) { - const asset_object& a = *itr; - ++itr; - assert( a.is_market_issued() ); - - const asset_bitasset_data_object& b = a.bitasset_data(*this); - bool feed_is_expired; - if( head_block_time() < HARDFORK_615_TIME ) - feed_is_expired = b.feed_is_expired_before_hardfork_615( head_block_time() ); - else - feed_is_expired = b.feed_is_expired( head_block_time() ); - if( feed_is_expired ) + const asset_bitasset_data_object& b = *itr; + ++itr; // not always process begin() because old code skipped updating some assets before hf 615 + bool update_cer = false; // for better performance, to only update bitasset once, also check CER in this function + const asset_object* asset_ptr = nullptr; + // update feeds, check margin calls + if( after_hardfork_615 || b.feed_is_expired_before_hardfork_615( head_time ) ) { - modify(b, [this](asset_bitasset_data_object& a) { - a.update_median_feeds(head_block_time()); + auto old_median_feed = b.current_feed; + modify( b, [head_time,&update_cer]( asset_bitasset_data_object& abdo ) + { + abdo.update_median_feeds( head_time ); + if( abdo.need_to_update_cer() ) + { + update_cer = true; + abdo.asset_cer_updated = false; + abdo.feed_cer_updated = false; + } }); - check_call_orders(b.current_feed.settlement_price.base.asset_id(*this)); + if( !b.current_feed.settlement_price.is_null() && !( b.current_feed == old_median_feed ) ) // `==` check is safe here + { + asset_ptr = &b.asset_id( *this ); + check_call_orders( *asset_ptr, true, false, &b ); + } + } + // update CER + if( update_cer ) + { + if( !asset_ptr ) + asset_ptr = &b.asset_id( *this ); + if( asset_ptr->options.core_exchange_rate != b.current_feed.core_exchange_rate ) + { + modify( *asset_ptr, [&b]( asset_object& ao ) + { + ao.options.core_exchange_rate = b.current_feed.core_exchange_rate; + }); + } } - if( !b.current_feed.core_exchange_rate.is_null() && - a.options.core_exchange_rate != b.current_feed.core_exchange_rate ) - modify(a, [&b](asset_object& a) { - a.options.core_exchange_rate = b.current_feed.core_exchange_rate; + } // for each asset whose feed is expired + + // process assets affected by bitshares-core issue 453 before hard fork 615 + if( !after_hardfork_615 ) + { + for( asset_id_type a : _issue_453_affected_assets ) + { + check_call_orders( a(*this) ); + } + } +} + +void database::update_core_exchange_rates() +{ + const auto& idx = get_index_type().indices().get(); + if( idx.begin() != idx.end() ) + { + for( auto itr = idx.rbegin(); itr->need_to_update_cer(); itr = idx.rbegin() ) + { + const asset_bitasset_data_object& b = *itr; + const asset_object& a = b.asset_id( *this ); + if( a.options.core_exchange_rate != b.current_feed.core_exchange_rate ) + { + modify( a, [&b]( asset_object& ao ) + { + ao.options.core_exchange_rate = b.current_feed.core_exchange_rate; + }); + } + modify( b, []( asset_bitasset_data_object& abdo ) + { + abdo.asset_cer_updated = false; + abdo.feed_cer_updated = false; }); + } } } diff --git a/libraries/chain/db_witness_schedule.cpp b/libraries/chain/db_witness_schedule.cpp index 66db87eefe..5fd5cfc2a1 100644 --- a/libraries/chain/db_witness_schedule.cpp +++ b/libraries/chain/db_witness_schedule.cpp @@ -34,7 +34,7 @@ using boost::container::flat_set; witness_id_type database::get_scheduled_witness( uint32_t slot_num )const { const dynamic_global_property_object& dpo = get_dynamic_global_properties(); - const witness_schedule_object& wso = witness_schedule_id_type()(*this); + const witness_schedule_object& wso = get_witness_schedule_object(); uint64_t current_aslot = dpo.current_aslot + slot_num; return wso.current_shuffled_witnesses[ current_aslot % wso.current_shuffled_witnesses.size() ]; } @@ -77,6 +77,22 @@ uint32_t database::get_slot_at_time(fc::time_point_sec when)const return (when - first_slot_time).to_seconds() / block_interval() + 1; } +uint32_t database::update_witness_missed_blocks( const signed_block& b ) +{ + uint32_t missed_blocks = get_slot_at_time( b.timestamp ); + FC_ASSERT( missed_blocks != 0, "Trying to push double-produced block onto current block?!" ); + missed_blocks--; + const auto& witnesses = witness_schedule_id_type()(*this).current_shuffled_witnesses; + if( missed_blocks < witnesses.size() ) + for( uint32_t i = 0; i < missed_blocks; ++i ) { + const auto& witness_missed = get_scheduled_witness( i+1 )(*this); + modify( witness_missed, []( witness_object& w ) { + w.total_missed++; + }); + } + return missed_blocks; +} + uint32_t database::witness_participation_rate()const { const dynamic_global_property_object& dpo = get_dynamic_global_properties(); @@ -85,7 +101,7 @@ uint32_t database::witness_participation_rate()const void database::update_witness_schedule() { - const witness_schedule_object& wso = witness_schedule_id_type()(*this); + const witness_schedule_object& wso = get_witness_schedule_object(); const global_property_object& gpo = get_global_properties(); if( head_block_num() % gpo.active_witnesses.size() == 0 ) diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 83d4880c4f..c77b7ee58e 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -23,8 +23,6 @@ */ #include #include -#include -#include namespace graphene { namespace chain { fork_database::fork_database() @@ -38,10 +36,10 @@ void fork_database::reset() void fork_database::pop_block() { - FC_ASSERT( _head, "no blocks to pop" ); + FC_ASSERT( _head, "no block to pop" ); auto prev = _head->prev.lock(); - FC_ASSERT( prev, "poping block would leave head block null" ); - _head = prev; + FC_ASSERT( prev, "popping block would leave head block null" ); + _head = prev; } void fork_database::start_block(signed_block b) @@ -52,7 +50,7 @@ void fork_database::start_block(signed_block b) } /** - * Pushes the block into the fork database and caches it if it doesn't link + * Pushes the block into the fork database * */ shared_ptr fork_database::push_block(const signed_block& b) @@ -66,7 +64,6 @@ shared_ptr fork_database::push_block(const signed_block& b) wlog( "Pushing block to fork database that failed to link: ${id}, ${num}", ("id",b.id())("num",b.block_num()) ); wlog( "Head: ${num}, ${id}", ("num",_head->data.block_num())("id",_head->data.id()) ); throw; - _unlinked_index.insert( item ); } return _head; } @@ -85,7 +82,6 @@ void fork_database::_push_block(const item_ptr& item) auto& index = _index.get(); auto itr = index.find(item->previous_id()); GRAPHENE_ASSERT(itr != index.end(), unlinkable_block_exception, "block does not link to known chain"); - FC_ASSERT(!(*itr)->invalid); item->prev = *itr; } @@ -95,35 +91,10 @@ void fork_database::_push_block(const item_ptr& item) { _head = item; uint32_t min_num = _head->num - std::min( _max_size, _head->num ); -// ilog( "min block in fork DB ${n}, max_size: ${m}", ("n",min_num)("m",_max_size) ); auto& num_idx = _index.get(); while( num_idx.size() && (*num_idx.begin())->num < min_num ) num_idx.erase( num_idx.begin() ); - - _unlinked_index.get().erase(_head->num - _max_size); } - //_push_next( item ); -} - -/** - * Iterate through the unlinked cache and insert anything that - * links to the newly inserted item. This will start a recursive - * set of calls performing a depth-first insertion of pending blocks as - * _push_next(..) calls _push_block(...) which will in turn call _push_next - */ -void fork_database::_push_next( const item_ptr& new_item ) -{ - auto& prev_idx = _unlinked_index.get(); - - auto itr = prev_idx.find( new_item->id ); - while( itr != prev_idx.end() ) - { - auto tmp = *itr; - prev_idx.erase( itr ); - _push_block( tmp ); - - itr = prev_idx.find( new_item->id ); - } } void fork_database::set_max_size( uint32_t s ) @@ -131,29 +102,15 @@ void fork_database::set_max_size( uint32_t s ) _max_size = s; if( !_head ) return; - { /// index - auto& by_num_idx = _index.get(); - auto itr = by_num_idx.begin(); - while( itr != by_num_idx.end() ) - { - if( (*itr)->num < std::max(int64_t(0),int64_t(_head->num) - _max_size) ) - by_num_idx.erase(itr); - else - break; - itr = by_num_idx.begin(); - } - } - { /// unlinked_index - auto& by_num_idx = _unlinked_index.get(); - auto itr = by_num_idx.begin(); - while( itr != by_num_idx.end() ) - { - if( (*itr)->num < std::max(int64_t(0),int64_t(_head->num) - _max_size) ) - by_num_idx.erase(itr); - else - break; - itr = by_num_idx.begin(); - } + auto& by_num_idx = _index.get(); + auto itr = by_num_idx.begin(); + while( itr != by_num_idx.end() ) + { + if( (*itr)->num < std::max(int64_t(0),int64_t(_head->num) - _max_size) ) + by_num_idx.erase(itr); + else + break; + itr = by_num_idx.begin(); } } @@ -161,11 +118,7 @@ bool fork_database::is_known_block(const block_id_type& id)const { auto& index = _index.get(); auto itr = index.find(id); - if( itr != index.end() ) - return true; - auto& unlinked_index = _unlinked_index.get(); - auto unlinked_itr = unlinked_index.find(id); - return unlinked_itr != unlinked_index.end(); + return itr != index.end(); } item_ptr fork_database::fetch_block(const block_id_type& id)const @@ -174,10 +127,6 @@ item_ptr fork_database::fetch_block(const block_id_type& id)const auto itr = index.find(id); if( itr != index.end() ) return *itr; - auto& unlinked_index = _unlinked_index.get(); - auto unlinked_itr = unlinked_index.find(id); - if( unlinked_itr != unlinked_index.end() ) - return *unlinked_itr; return item_ptr(); } @@ -248,6 +197,18 @@ void fork_database::set_head(shared_ptr h) void fork_database::remove(block_id_type id) { _index.get().erase(id); + // If we're removing head, try to pop it + if( _head && _head->id == id ) + { + try + { + pop_block(); + } + catch( fc::exception& e ) // If unable to pop normally, E.G. if head's prev is null, reset it + { + _head.reset(); + } + } } } } // graphene::chain diff --git a/libraries/chain/genesis_state.cpp b/libraries/chain/genesis_state.cpp index a278b68005..aed80c4831 100644 --- a/libraries/chain/genesis_state.cpp +++ b/libraries/chain/genesis_state.cpp @@ -24,10 +24,6 @@ #include -// these are required to serialize a genesis_state -#include // required for gcc in release mode -#include - namespace graphene { namespace chain { chain_id_type genesis_state_type::compute_chain_id() const diff --git a/libraries/chain/get_config.cpp b/libraries/chain/get_config.cpp index ca8468bf21..b8fc7a93c7 100644 --- a/libraries/chain/get_config.cpp +++ b/libraries/chain/get_config.cpp @@ -39,7 +39,6 @@ fc::variant_object get_config() result[ "GRAPHENE_MIN_ASSET_SYMBOL_LENGTH" ] = GRAPHENE_MIN_ASSET_SYMBOL_LENGTH; result[ "GRAPHENE_MAX_ASSET_SYMBOL_LENGTH" ] = GRAPHENE_MAX_ASSET_SYMBOL_LENGTH; result[ "GRAPHENE_MAX_SHARE_SUPPLY" ] = GRAPHENE_MAX_SHARE_SUPPLY; - result[ "GRAPHENE_MAX_PAY_RATE" ] = GRAPHENE_MAX_PAY_RATE; result[ "GRAPHENE_MAX_SIG_CHECK_DEPTH" ] = GRAPHENE_MAX_SIG_CHECK_DEPTH; result[ "GRAPHENE_MIN_TRANSACTION_SIZE_LIMIT" ] = GRAPHENE_MIN_TRANSACTION_SIZE_LIMIT; result[ "GRAPHENE_MIN_BLOCK_INTERVAL" ] = GRAPHENE_MIN_BLOCK_INTERVAL; @@ -53,11 +52,8 @@ fc::variant_object get_config() result[ "GRAPHENE_MIN_UNDO_HISTORY" ] = GRAPHENE_MIN_UNDO_HISTORY; result[ "GRAPHENE_MAX_UNDO_HISTORY" ] = GRAPHENE_MAX_UNDO_HISTORY; result[ "GRAPHENE_MIN_BLOCK_SIZE_LIMIT" ] = GRAPHENE_MIN_BLOCK_SIZE_LIMIT; - result[ "GRAPHENE_MIN_TRANSACTION_EXPIRATION_LIMIT" ] = GRAPHENE_MIN_TRANSACTION_EXPIRATION_LIMIT; result[ "GRAPHENE_BLOCKCHAIN_PRECISION" ] = GRAPHENE_BLOCKCHAIN_PRECISION; result[ "GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS" ] = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS; - result[ "GRAPHENE_DEFAULT_TRANSFER_FEE" ] = GRAPHENE_DEFAULT_TRANSFER_FEE; - result[ "GRAPHENE_MAX_INSTANCE_ID" ] = GRAPHENE_MAX_INSTANCE_ID; result[ "GRAPHENE_100_PERCENT" ] = GRAPHENE_100_PERCENT; result[ "GRAPHENE_1_PERCENT" ] = GRAPHENE_1_PERCENT; result[ "GRAPHENE_MAX_MARKET_FEE_PERCENT" ] = GRAPHENE_MAX_MARKET_FEE_PERCENT; @@ -65,7 +61,6 @@ fc::variant_object get_config() result[ "GRAPHENE_DEFAULT_FORCE_SETTLEMENT_OFFSET" ] = GRAPHENE_DEFAULT_FORCE_SETTLEMENT_OFFSET; result[ "GRAPHENE_DEFAULT_FORCE_SETTLEMENT_MAX_VOLUME" ] = GRAPHENE_DEFAULT_FORCE_SETTLEMENT_MAX_VOLUME; result[ "GRAPHENE_DEFAULT_PRICE_FEED_LIFETIME" ] = GRAPHENE_DEFAULT_PRICE_FEED_LIFETIME; - result[ "GRAPHENE_MAX_FEED_PRODUCERS" ] = GRAPHENE_MAX_FEED_PRODUCERS; result[ "GRAPHENE_DEFAULT_MAX_AUTHORITY_MEMBERSHIP" ] = GRAPHENE_DEFAULT_MAX_AUTHORITY_MEMBERSHIP; result[ "GRAPHENE_DEFAULT_MAX_ASSET_WHITELIST_AUTHORITIES" ] = GRAPHENE_DEFAULT_MAX_ASSET_WHITELIST_AUTHORITIES; result[ "GRAPHENE_DEFAULT_MAX_ASSET_FEED_PUBLISHERS" ] = GRAPHENE_DEFAULT_MAX_ASSET_FEED_PUBLISHERS; @@ -81,32 +76,25 @@ fc::variant_object get_config() result[ "GRAPHENE_DEFAULT_COMMITTEE_PROPOSAL_REVIEW_PERIOD_SEC" ] = GRAPHENE_DEFAULT_COMMITTEE_PROPOSAL_REVIEW_PERIOD_SEC; result[ "GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE" ] = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE; result[ "GRAPHENE_DEFAULT_LIFETIME_REFERRER_PERCENT_OF_FEE" ] = GRAPHENE_DEFAULT_LIFETIME_REFERRER_PERCENT_OF_FEE; - result[ "GRAPHENE_DEFAULT_MAX_BULK_DISCOUNT_PERCENT" ] = GRAPHENE_DEFAULT_MAX_BULK_DISCOUNT_PERCENT; - result[ "GRAPHENE_DEFAULT_BULK_DISCOUNT_THRESHOLD_MIN" ] = GRAPHENE_DEFAULT_BULK_DISCOUNT_THRESHOLD_MIN; - result[ "GRAPHENE_DEFAULT_BULK_DISCOUNT_THRESHOLD_MAX" ] = GRAPHENE_DEFAULT_BULK_DISCOUNT_THRESHOLD_MAX; result[ "GRAPHENE_DEFAULT_CASHBACK_VESTING_PERIOD_SEC" ] = GRAPHENE_DEFAULT_CASHBACK_VESTING_PERIOD_SEC; result[ "GRAPHENE_DEFAULT_CASHBACK_VESTING_THRESHOLD" ] = GRAPHENE_DEFAULT_CASHBACK_VESTING_THRESHOLD; result[ "GRAPHENE_DEFAULT_BURN_PERCENT_OF_FEE" ] = GRAPHENE_DEFAULT_BURN_PERCENT_OF_FEE; - result[ "GRAPHENE_WITNESS_PAY_PERCENT_PRECISION" ] = GRAPHENE_WITNESS_PAY_PERCENT_PRECISION; result[ "GRAPHENE_DEFAULT_MAX_ASSERT_OPCODE" ] = GRAPHENE_DEFAULT_MAX_ASSERT_OPCODE; result[ "GRAPHENE_DEFAULT_FEE_LIQUIDATION_THRESHOLD" ] = GRAPHENE_DEFAULT_FEE_LIQUIDATION_THRESHOLD; result[ "GRAPHENE_DEFAULT_ACCOUNTS_PER_FEE_SCALE" ] = GRAPHENE_DEFAULT_ACCOUNTS_PER_FEE_SCALE; result[ "GRAPHENE_DEFAULT_ACCOUNT_FEE_SCALE_BITSHIFTS" ] = GRAPHENE_DEFAULT_ACCOUNT_FEE_SCALE_BITSHIFTS; result[ "GRAPHENE_MAX_WORKER_NAME_LENGTH" ] = GRAPHENE_MAX_WORKER_NAME_LENGTH; result[ "GRAPHENE_MAX_URL_LENGTH" ] = GRAPHENE_MAX_URL_LENGTH; - result[ "GRAPHENE_NEAR_SCHEDULE_CTR_IV" ] = GRAPHENE_NEAR_SCHEDULE_CTR_IV; - result[ "GRAPHENE_FAR_SCHEDULE_CTR_IV" ] = GRAPHENE_FAR_SCHEDULE_CTR_IV; result[ "GRAPHENE_CORE_ASSET_CYCLE_RATE" ] = GRAPHENE_CORE_ASSET_CYCLE_RATE; result[ "GRAPHENE_CORE_ASSET_CYCLE_RATE_BITS" ] = GRAPHENE_CORE_ASSET_CYCLE_RATE_BITS; result[ "GRAPHENE_DEFAULT_WITNESS_PAY_PER_BLOCK" ] = GRAPHENE_DEFAULT_WITNESS_PAY_PER_BLOCK; result[ "GRAPHENE_DEFAULT_WITNESS_PAY_VESTING_SECONDS" ] = GRAPHENE_DEFAULT_WITNESS_PAY_VESTING_SECONDS; result[ "GRAPHENE_DEFAULT_WORKER_BUDGET_PER_DAY" ] = GRAPHENE_DEFAULT_WORKER_BUDGET_PER_DAY; - result[ "GRAPHENE_MAX_INTEREST_APR" ] = GRAPHENE_MAX_INTEREST_APR; - result[ "GRAPHENE_COMMITTEE_ACCOUNT" ] = GRAPHENE_COMMITTEE_ACCOUNT; - result[ "GRAPHENE_WITNESS_ACCOUNT" ] = GRAPHENE_WITNESS_ACCOUNT; - result[ "GRAPHENE_RELAXED_COMMITTEE_ACCOUNT" ] = GRAPHENE_RELAXED_COMMITTEE_ACCOUNT; - result[ "GRAPHENE_NULL_ACCOUNT" ] = GRAPHENE_NULL_ACCOUNT; - result[ "GRAPHENE_TEMP_ACCOUNT" ] = GRAPHENE_TEMP_ACCOUNT; + result[ "GRAPHENE_COMMITTEE_ACCOUNT" ] = fc::variant(GRAPHENE_COMMITTEE_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS); + result[ "GRAPHENE_WITNESS_ACCOUNT" ] = fc::variant(GRAPHENE_WITNESS_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS); + result[ "GRAPHENE_RELAXED_COMMITTEE_ACCOUNT" ] = fc::variant(GRAPHENE_RELAXED_COMMITTEE_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS); + result[ "GRAPHENE_NULL_ACCOUNT" ] = fc::variant(GRAPHENE_NULL_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS); + result[ "GRAPHENE_TEMP_ACCOUNT" ] = fc::variant(GRAPHENE_TEMP_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS); return result; } diff --git a/libraries/chain/hardfork.d/23.hf b/libraries/chain/hardfork.d/23.hf new file mode 100644 index 0000000000..a44b070590 --- /dev/null +++ b/libraries/chain/hardfork.d/23.hf @@ -0,0 +1,4 @@ +// Issue #23: Withdrawal claims made before the first withdrawal period are incorrectly allowed +#ifndef HARDFORK_23_TIME +#define HARDFORK_23_TIME (fc::time_point_sec( 1512747600 )) +#endif diff --git a/libraries/chain/hardfork.d/385.hf b/libraries/chain/hardfork.d/385.hf new file mode 100644 index 0000000000..43f8ead0f8 --- /dev/null +++ b/libraries/chain/hardfork.d/385.hf @@ -0,0 +1,4 @@ +// #385 October 23 enforce PARENT.CHILD and allow short names +#ifndef HARDFORK_385_TIME +#define HARDFORK_385_TIME (fc::time_point_sec( 1445558400 )) +#endif diff --git a/libraries/chain/hardfork.d/516.hf b/libraries/chain/hardfork.d/516.hf index 445e287110..8085972402 100644 --- a/libraries/chain/hardfork.d/516.hf +++ b/libraries/chain/hardfork.d/516.hf @@ -1,4 +1,4 @@ // #516 Special authorities #ifndef HARDFORK_516_TIME -#define HARDFORK_516_TIME (fc::time_point_sec( 1455127200 )) +#define HARDFORK_516_TIME (fc::time_point_sec( 1456250400 )) #endif diff --git a/libraries/chain/hardfork.d/533.hf b/libraries/chain/hardfork.d/533.hf index 3830028b14..32d8ede8e8 100644 --- a/libraries/chain/hardfork.d/533.hf +++ b/libraries/chain/hardfork.d/533.hf @@ -1,4 +1,4 @@ // #533 Improve vote counting implementation #ifndef HARDFORK_533_TIME -#define HARDFORK_533_TIME (fc::time_point_sec( 1455127200 )) +#define HARDFORK_533_TIME (fc::time_point_sec( 1456250400 )) #endif diff --git a/libraries/chain/hardfork.d/538.hf b/libraries/chain/hardfork.d/538.hf index 68da2c43d5..99a27537b1 100644 --- a/libraries/chain/hardfork.d/538.hf +++ b/libraries/chain/hardfork.d/538.hf @@ -1,4 +1,4 @@ // #538 Buyback accounts #ifndef HARDFORK_538_TIME -#define HARDFORK_538_TIME (fc::time_point_sec( 1455127200 )) +#define HARDFORK_538_TIME (fc::time_point_sec( 1456250400 )) #endif diff --git a/libraries/chain/hardfork.d/555.hf b/libraries/chain/hardfork.d/555.hf index 28959f1151..45ef0473d6 100644 --- a/libraries/chain/hardfork.d/555.hf +++ b/libraries/chain/hardfork.d/555.hf @@ -1,4 +1,4 @@ // #555 Buyback accounts #ifndef HARDFORK_555_TIME -#define HARDFORK_555_TIME (fc::time_point_sec( 1455127200 )) +#define HARDFORK_555_TIME (fc::time_point_sec( 1456250400 )) #endif diff --git a/libraries/chain/hardfork.d/563.hf b/libraries/chain/hardfork.d/563.hf index 892214ed9e..001563ca27 100644 --- a/libraries/chain/hardfork.d/563.hf +++ b/libraries/chain/hardfork.d/563.hf @@ -1,4 +1,4 @@ // #563 Stealth fee routing #ifndef HARDFORK_563_TIME -#define HARDFORK_563_TIME (fc::time_point_sec( 1455127200 )) +#define HARDFORK_563_TIME (fc::time_point_sec( 1456250400 )) #endif diff --git a/libraries/chain/hardfork.d/572.hf b/libraries/chain/hardfork.d/572.hf index f054225fe5..a466ef72c5 100644 --- a/libraries/chain/hardfork.d/572.hf +++ b/libraries/chain/hardfork.d/572.hf @@ -1,4 +1,4 @@ // #572 Allow asset to update permission flags when no supply exists #ifndef HARDFORK_572_TIME -#define HARDFORK_572_TIME (fc::time_point_sec( 1450378800 )) +#define HARDFORK_572_TIME (fc::time_point_sec( 1456250400 )) #endif diff --git a/libraries/chain/hardfork.d/599.hf b/libraries/chain/hardfork.d/599.hf index 71f7e94e33..6249101d43 100644 --- a/libraries/chain/hardfork.d/599.hf +++ b/libraries/chain/hardfork.d/599.hf @@ -1,4 +1,4 @@ // #599 Unpacking of extension is incorrect #ifndef HARDFORK_599_TIME -#define HARDFORK_599_TIME (fc::time_point_sec( 1458061200 )) +#define HARDFORK_599_TIME (fc::time_point_sec( 1459789200 )) #endif diff --git a/libraries/chain/hardfork.d/607.hf b/libraries/chain/hardfork.d/607.hf index 135619c2a3..77c8c9e09f 100644 --- a/libraries/chain/hardfork.d/607.hf +++ b/libraries/chain/hardfork.d/607.hf @@ -1,4 +1,4 @@ // #607 Disable negative voting on workers #ifndef HARDFORK_607_TIME -#define HARDFORK_607_TIME (fc::time_point_sec( 1458061200 )) +#define HARDFORK_607_TIME (fc::time_point_sec( 1458752400 )) #endif diff --git a/libraries/chain/hardfork.d/613.hf b/libraries/chain/hardfork.d/613.hf index 74c740fb73..9978d33c44 100644 --- a/libraries/chain/hardfork.d/613.hf +++ b/libraries/chain/hardfork.d/613.hf @@ -1,4 +1,4 @@ // #613 Deprecate annual membership #ifndef HARDFORK_613_TIME -#define HARDFORK_613_TIME (fc::time_point_sec( 1458061200 )) +#define HARDFORK_613_TIME (fc::time_point_sec( 1458752400 )) #endif diff --git a/libraries/chain/hardfork.d/615.hf b/libraries/chain/hardfork.d/615.hf index a5599bbcf8..ac0535dc66 100644 --- a/libraries/chain/hardfork.d/615.hf +++ b/libraries/chain/hardfork.d/615.hf @@ -1,4 +1,4 @@ // #615 Fix price feed expiration check, so websocket server will never spam too much data #ifndef HARDFORK_615_TIME -#define HARDFORK_615_TIME (fc::time_point_sec( 1457550000 )) +#define HARDFORK_615_TIME (fc::time_point_sec( 1458752400 )) #endif diff --git a/libraries/chain/hardfork.d/CORE_1040.hf b/libraries/chain/hardfork.d/CORE_1040.hf new file mode 100644 index 0000000000..1689f1e793 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_1040.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #1040 Remove temp-account balance check +#ifndef HARDFORK_CORE_1040_TIME +#define HARDFORK_CORE_1040_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_1479.hf b/libraries/chain/hardfork.d/CORE_1479.hf new file mode 100644 index 0000000000..2f8ee807e3 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_1479.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #1479 nodes crashing on self-approving proposal +#ifndef HARDFORK_CORE_1479_TIME +#define HARDFORK_CORE_1479_TIME (fc::time_point_sec( 1545436800 )) // 2018-12-22T00:00:00Z +#endif diff --git a/libraries/chain/hardfork.d/CORE_184.hf b/libraries/chain/hardfork.d/CORE_184.hf new file mode 100644 index 0000000000..1bb6748125 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_184.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #184 Fix "Potential something-for-nothing fill bug" +#ifndef HARDFORK_CORE_184_TIME +#define HARDFORK_CORE_184_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_188.hf b/libraries/chain/hardfork.d/CORE_188.hf new file mode 100644 index 0000000000..8c1be5c4bc --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_188.hf @@ -0,0 +1,4 @@ +// #188 Add operation to allow claiming of funds in an asset's fee pool +#ifndef HARDFORK_CORE_188_TIME +#define HARDFORK_CORE_188_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_199.hf b/libraries/chain/hardfork.d/CORE_199.hf new file mode 100644 index 0000000000..a909511cc9 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_199.hf @@ -0,0 +1,4 @@ +// bitshares-core #199 Require owner key for change of asset-issuer (new operation) +#ifndef HARDFORK_CORE_199_TIME +#define HARDFORK_CORE_199_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_214.hf b/libraries/chain/hardfork.d/CORE_214.hf new file mode 100644 index 0000000000..bc51065773 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_214.hf @@ -0,0 +1,4 @@ +// bitshares-core #214 Proposal cannot contain proposal_update_operation +#ifndef HARDFORK_CORE_214_TIME +#define HARDFORK_CORE_214_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_216.hf b/libraries/chain/hardfork.d/CORE_216.hf new file mode 100644 index 0000000000..4e58108a00 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_216.hf @@ -0,0 +1,4 @@ +// bitshares-core #216 Process to reset a Smartcoin after a Black Swan +#ifndef HARDFORK_CORE_216_TIME +#define HARDFORK_CORE_216_TIME (fc::time_point_sec( 1512747600 )) +#endif diff --git a/libraries/chain/hardfork.d/CORE_338.hf b/libraries/chain/hardfork.d/CORE_338.hf new file mode 100644 index 0000000000..2584a451d4 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_338.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #338 Fix "margin call order fills at price of matching limit_order" +#ifndef HARDFORK_CORE_338_TIME +#define HARDFORK_CORE_338_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_342.hf b/libraries/chain/hardfork.d/CORE_342.hf new file mode 100644 index 0000000000..85c874530f --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_342.hf @@ -0,0 +1,5 @@ +// bitshares-core issue #342 +// Mitigate rounding issue when matching orders +#ifndef HARDFORK_CORE_342_TIME +#define HARDFORK_CORE_342_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_343.hf b/libraries/chain/hardfork.d/CORE_343.hf new file mode 100644 index 0000000000..b6e46675ae --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_343.hf @@ -0,0 +1,5 @@ +// bitshares-core issue #343 +// Fix "Inconsistent sorting of call orders between matching against a limit order and a force settle order" +#ifndef HARDFORK_CORE_343_TIME +#define HARDFORK_CORE_343_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_429.hf b/libraries/chain/hardfork.d/CORE_429.hf new file mode 100644 index 0000000000..2d9300f3ff --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_429.hf @@ -0,0 +1,4 @@ +// bitshares-core #429 rounding issue when creating assets +#ifndef HARDFORK_CORE_429_TIME +#define HARDFORK_CORE_429_TIME (fc::time_point_sec( 1512747600 )) +#endif diff --git a/libraries/chain/hardfork.d/CORE_453.hf b/libraries/chain/hardfork.d/CORE_453.hf new file mode 100644 index 0000000000..15832f352e --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_453.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #453 Fix "Multiple limit order and call order matching issue" +#ifndef HARDFORK_CORE_453_TIME +#define HARDFORK_CORE_453_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_518.hf b/libraries/chain/hardfork.d/CORE_518.hf new file mode 100644 index 0000000000..ff4cec54bd --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_518.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #518 Clean up bitasset_data during maintenance +#ifndef HARDFORK_CORE_518_TIME +#define HARDFORK_CORE_518_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_583.hf b/libraries/chain/hardfork.d/CORE_583.hf new file mode 100644 index 0000000000..67589efdd3 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_583.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #583 Always allow updating a call order to higher collateral ratio +#ifndef HARDFORK_CORE_583_TIME +#define HARDFORK_CORE_583_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_588.hf b/libraries/chain/hardfork.d/CORE_588.hf new file mode 100644 index 0000000000..87cd88fd0c --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_588.hf @@ -0,0 +1,4 @@ +// Issue #588: Virtual operations should be excluded from transactions +#ifndef HARDFORK_CORE_588_TIME +#define HARDFORK_CORE_588_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_604.hf b/libraries/chain/hardfork.d/CORE_604.hf new file mode 100644 index 0000000000..7fd077b502 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_604.hf @@ -0,0 +1,5 @@ +// bitshares-core issue #604 +// Implement BSIP 26: refund order creation fee in original paid asset when order is cancelled +#ifndef HARDFORK_CORE_604_TIME +#define HARDFORK_CORE_604_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_606.hf b/libraries/chain/hardfork.d/CORE_606.hf new file mode 100644 index 0000000000..67c27d7488 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_606.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #606 Fix "Undercollateralized short positions should be called regardless of asks" +#ifndef HARDFORK_CORE_606_TIME +#define HARDFORK_CORE_606_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_620.hf b/libraries/chain/hardfork.d/CORE_620.hf new file mode 100644 index 0000000000..d322b8c934 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_620.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #620 Allow numbers at the end of asset symbol +#ifndef HARDFORK_CORE_620_TIME +#define HARDFORK_CORE_620_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_625.hf b/libraries/chain/hardfork.d/CORE_625.hf new file mode 100644 index 0000000000..260106a89f --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_625.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #625 Fix "Potential erratic order matching issue involving margin call orders" +#ifndef HARDFORK_CORE_625_TIME +#define HARDFORK_CORE_625_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_834.hf b/libraries/chain/hardfork.d/CORE_834.hf new file mode 100644 index 0000000000..41fd87af24 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_834.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #834 "BSIP38: add target CR option to short positions" +#ifndef HARDFORK_CORE_834_TIME +#define HARDFORK_CORE_834_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_868_890.hf b/libraries/chain/hardfork.d/CORE_868_890.hf new file mode 100644 index 0000000000..4067e1b16f --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_868_890.hf @@ -0,0 +1,5 @@ +// bitshares-core issue #868 Clear price feed data after updated a bitAsset's backing asset ID +// bitshares-core issue #890 Update median feeds after feed_lifetime_sec changed +#ifndef HARDFORK_CORE_868_890_TIME +#define HARDFORK_CORE_868_890_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_922_931.hf b/libraries/chain/hardfork.d/CORE_922_931.hf new file mode 100644 index 0000000000..f821f8fe9f --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_922_931.hf @@ -0,0 +1,5 @@ +// bitshares-core issue #922 Missing checks when updating an asset's bitasset_data +// bitshares-core issue #931 Changing backing asset ID runs some checks against the old value instead of the new +#ifndef HARDFORK_CORE_922_931_TIME +#define HARDFORK_CORE_922_931_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_935.hf b/libraries/chain/hardfork.d/CORE_935.hf new file mode 100644 index 0000000000..44a979a2cb --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_935.hf @@ -0,0 +1,4 @@ +// bitshares-core issue #935 Call check_call_orders not only when settlement_price changed +#ifndef HARDFORK_CORE_935_TIME +#define HARDFORK_CORE_935_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/CORE_942.hf b/libraries/chain/hardfork.d/CORE_942.hf new file mode 100644 index 0000000000..085f3cfcd3 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_942.hf @@ -0,0 +1,5 @@ +// bitshares-core issue #942 +// Incorrectly checking asset authorization for withdraw_from_account in withdraw_permission_claim_evaluator +#ifndef HARDFORK_CORE_942_TIME +#define HARDFORK_CORE_942_TIME (fc::time_point_sec( 1532008920 )) // Thu, 19 Jul 2018 14:02:00 UTC +#endif diff --git a/libraries/chain/hardfork.d/core-143.hf b/libraries/chain/hardfork.d/core-143.hf new file mode 100644 index 0000000000..ded673f855 --- /dev/null +++ b/libraries/chain/hardfork.d/core-143.hf @@ -0,0 +1,4 @@ +// #143 Require voted entities to exist +#ifndef HARDFORK_CORE_143_TIME +#define HARDFORK_CORE_143_TIME (fc::time_point_sec( 1512747600 )) +#endif diff --git a/libraries/chain/include/graphene/chain/account_object.hpp b/libraries/chain/include/graphene/chain/account_object.hpp index faf59e22af..cb52552eae 100644 --- a/libraries/chain/include/graphene/chain/account_object.hpp +++ b/libraries/chain/include/graphene/chain/account_object.hpp @@ -46,11 +46,16 @@ namespace graphene { namespace chain { account_id_type owner; + string name; ///< redundantly store account name here for better maintenance performance + /** * Keep the most recent operation as a root pointer to a linked list of the transaction history. */ account_transaction_history_id_type most_recent_op; - uint32_t total_ops = 0; + /** Total operations related to this account. */ + uint64_t total_ops = 0; + /** Total operations related to this account that has been removed from the database. */ + uint64_t removed_ops = 0; /** * When calculating votes it is necessary to know how much is stored in orders (and thus unavailable for @@ -59,6 +64,20 @@ namespace graphene { namespace chain { */ share_type total_core_in_orders; + share_type core_in_balance = 0; ///< redundantly store core balance here for better maintenance performance + + bool has_cashback_vb = false; ///< redundantly store this for better maintenance performance + + bool is_voting = false; ///< redundately store whether this account is voting for better maintenance performance + + time_point_sec last_vote_time; // add last time voted + + /// Whether this account owns some CORE asset and is voting + inline bool has_some_core_voting() const + { + return is_voting && ( total_core_in_orders > 0 || core_in_balance > 0 || has_cashback_vb ); + } + /** * Tracks the total fees paid by this account for the purpose of calculating bulk discounts. */ @@ -79,6 +98,12 @@ namespace graphene { namespace chain { */ share_type pending_vested_fees; + /// Whether this account has pending fees, no matter vested or not + inline bool has_pending_fees() const { return pending_fees > 0 || pending_vested_fees > 0; } + + /// Whether need to process this account during the maintenance interval + inline bool need_maintenance() const { return has_some_core_voting() || has_pending_fees(); } + /// @brief Split up and pay out @ref pending_fees and @ref pending_vested_fees void process_fees(const account_object& a, database& d) const; @@ -104,6 +129,7 @@ namespace graphene { namespace chain { account_id_type owner; asset_id_type asset_type; share_type balance; + bool maintenance_flag = false; ///< Whether need to process this balance object in maintenance interval asset get_balance()const { return asset(balance, asset_type); } void adjust_balance(const asset& delta); @@ -276,20 +302,20 @@ namespace graphene { namespace chain { /** given an account or key, map it to the set of accounts that reference it in an active or owner authority */ - map< account_id_type, set > account_to_account_memberships; - map< public_key_type, set > account_to_key_memberships; + map< account_id_type, set > account_to_account_memberships; + map< public_key_type, set, pubkey_comparator > account_to_key_memberships; /** some accounts use address authorities in the genesis block */ - map< address, set > account_to_address_memberships; + map< address, set > account_to_address_memberships; protected: - set get_account_members( const account_object& a )const; - set get_key_members( const account_object& a )const; - set
get_address_members( const account_object& a )const; + set get_account_members( const account_object& a )const; + set get_key_members( const account_object& a )const; + set
get_address_members( const account_object& a )const; - set before_account_members; - set before_key_members; - set
before_address_members; + set before_account_members; + set before_key_members; + set
before_address_members; }; @@ -309,8 +335,32 @@ namespace graphene { namespace chain { map< account_id_type, set > referred_by; }; - struct by_account_asset; + /** + * @brief This secondary index will allow fast access to the balance objects + * that belonging to an account. + */ + class balances_by_account_index : public secondary_index + { + public: + virtual void object_inserted( const object& obj ) override; + virtual void object_removed( const object& obj ) override; + virtual void about_to_modify( const object& before ) override; + virtual void object_modified( const object& after ) override; + + const map< asset_id_type, const account_balance_object* >& get_account_balances( const account_id_type& acct )const; + const account_balance_object* get_account_balance( const account_id_type& acct, const asset_id_type& asset )const; + + private: + static const uint8_t bits; + static const uint64_t mask; + + /** Maps each account to its balance objects */ + vector< vector< map< asset_id_type, const account_balance_object* > > > balances; + std::stack< object_id_type > ids_being_modified; + }; + struct by_asset_balance; + struct by_maintenance_flag; /** * @ingroup object_index */ @@ -318,13 +368,8 @@ namespace graphene { namespace chain { account_balance_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, - ordered_unique< tag, - composite_key< - account_balance_object, - member, - member - > - >, + ordered_non_unique< tag, + member< account_balance_object, bool, &account_balance_object::maintenance_flag > >, ordered_unique< tag, composite_key< account_balance_object, @@ -364,6 +409,33 @@ namespace graphene { namespace chain { */ typedef generic_index account_index; + struct by_owner; + struct by_maintenance_seq; + + /** + * @ingroup object_index + */ + typedef multi_index_container< + account_statistics_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, + member< account_statistics_object, account_id_type, &account_statistics_object::owner > >, + ordered_unique< tag, + composite_key< + account_statistics_object, + const_mem_fun, + member + > + > + > + > account_stats_multi_index_type; + + /** + * @ingroup object_index + */ + typedef generic_index account_stats_index; + }} FC_REFLECT_DERIVED( graphene::chain::account_object, @@ -380,14 +452,18 @@ FC_REFLECT_DERIVED( graphene::chain::account_object, FC_REFLECT_DERIVED( graphene::chain::account_balance_object, (graphene::db::object), - (owner)(asset_type)(balance) ) + (owner)(asset_type)(balance)(maintenance_flag) ) FC_REFLECT_DERIVED( graphene::chain::account_statistics_object, (graphene::chain::object), - (owner) + (owner)(name) (most_recent_op) - (total_ops) + (total_ops)(removed_ops) (total_core_in_orders) + (core_in_balance) + (has_cashback_vb) + (is_voting) + (last_vote_time) (lifetime_fees_paid) (pending_fees)(pending_vested_fees) ) diff --git a/libraries/chain/include/graphene/chain/asset_evaluator.hpp b/libraries/chain/include/graphene/chain/asset_evaluator.hpp index eb8d5789d5..e2573356b0 100644 --- a/libraries/chain/include/graphene/chain/asset_evaluator.hpp +++ b/libraries/chain/include/graphene/chain/asset_evaluator.hpp @@ -26,6 +26,9 @@ #include #include +#include +#include + namespace graphene { namespace chain { class asset_create_evaluator : public evaluator @@ -35,6 +38,13 @@ namespace graphene { namespace chain { void_result do_evaluate( const asset_create_operation& o ); object_id_type do_apply( const asset_create_operation& o ); + + /** override the default behavior defined by generic_evalautor which is to + * post the fee to fee_paying_account_stats.pending_fees + */ + virtual void pay_fee() override; + private: + bool fee_is_odd; }; class asset_issue_evaluator : public evaluator @@ -71,6 +81,17 @@ namespace graphene { namespace chain { const asset_object* asset_to_update = nullptr; }; + class asset_update_issuer_evaluator : public evaluator + { + public: + typedef asset_update_issuer_operation operation_type; + + void_result do_evaluate( const asset_update_issuer_operation& o ); + void_result do_apply( const asset_update_issuer_operation& o ); + + const asset_object* asset_to_update = nullptr; + }; + class asset_update_bitasset_evaluator : public evaluator { public: @@ -80,6 +101,7 @@ namespace graphene { namespace chain { void_result do_apply( const asset_update_bitasset_operation& o ); const asset_bitasset_data_object* bitasset_to_update = nullptr; + const asset_object* asset_to_update = nullptr; }; class asset_update_feed_producers_evaluator : public evaluator @@ -90,7 +112,7 @@ namespace graphene { namespace chain { void_result do_evaluate( const operation_type& o ); void_result do_apply( const operation_type& o ); - const asset_bitasset_data_object* bitasset_to_update = nullptr; + const asset_object* asset_to_update = nullptr; }; class asset_fund_fee_pool_evaluator : public evaluator @@ -133,7 +155,8 @@ namespace graphene { namespace chain { void_result do_evaluate( const asset_publish_feed_operation& o ); void_result do_apply( const asset_publish_feed_operation& o ); - std::map,price_feed> median_feed_values; + const asset_object* asset_ptr = nullptr; + const asset_bitasset_data_object* bitasset_ptr = nullptr; }; class asset_claim_fees_evaluator : public evaluator @@ -145,4 +168,13 @@ namespace graphene { namespace chain { void_result do_apply( const asset_claim_fees_operation& o ); }; + class asset_claim_pool_evaluator : public evaluator + { + public: + typedef asset_claim_pool_operation operation_type; + + void_result do_evaluate( const asset_claim_pool_operation& o ); + void_result do_apply( const asset_claim_pool_operation& o ); + }; + } } // graphene::chain diff --git a/libraries/chain/include/graphene/chain/asset_object.hpp b/libraries/chain/include/graphene/chain/asset_object.hpp index 28d5974eea..5f73e79ddd 100644 --- a/libraries/chain/include/graphene/chain/asset_object.hpp +++ b/libraries/chain/include/graphene/chain/asset_object.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -24,7 +24,6 @@ #pragma once #include #include -#include #include /** @@ -146,7 +145,12 @@ namespace graphene { namespace chain { template const asset_bitasset_data_object& bitasset_data(const DB& db)const - { assert(bitasset_data_id); return db.get(*bitasset_data_id); } + { + FC_ASSERT( bitasset_data_id.valid(), + "Asset ${a} (${id}) is not a market issued asset.", + ("a",this->symbol)("id",this->id) ); + return db.get( *bitasset_data_id ); + } template const asset_dynamic_data_object& dynamic_data(const DB& db)const @@ -172,6 +176,9 @@ namespace graphene { namespace chain { static const uint8_t space_id = implementation_ids; static const uint8_t type_id = impl_asset_bitasset_data_type; + /// The asset this object belong to + asset_id_type asset_id; + /// The tunable options for BitAssets are stored in this field. bitasset_options options; @@ -209,8 +216,27 @@ namespace graphene { namespace chain { share_type settlement_fund; ///@} + /// Track whether core_exchange_rate in corresponding asset_object has updated + bool asset_cer_updated = false; + + /// Track whether core exchange rate in current feed has updated + bool feed_cer_updated = false; + + /// Whether need to update core_exchange_rate in asset_object + bool need_to_update_cer() const + { + return ( ( feed_cer_updated || asset_cer_updated ) && !current_feed.core_exchange_rate.is_null() ); + } + + /// The time when @ref current_feed would expire time_point_sec feed_expiration_time()const - { return current_feed_publication_time + options.feed_lifetime_sec; } + { + uint32_t current_feed_seconds = current_feed_publication_time.sec_since_epoch(); + if( std::numeric_limits::max() - current_feed_seconds <= options.feed_lifetime_sec ) + return time_point_sec::maximum(); + else + return current_feed_publication_time + options.feed_lifetime_sec; + } bool feed_is_expired_before_hardfork_615(time_point_sec current_time)const { return feed_expiration_time() >= current_time; } bool feed_is_expired(time_point_sec current_time)const @@ -218,25 +244,47 @@ namespace graphene { namespace chain { void update_median_feeds(time_point_sec current_time); }; + // key extractor for short backing asset + struct bitasset_short_backing_asset_extractor + { + typedef asset_id_type result_type; + result_type operator() (const asset_bitasset_data_object& obj) const + { + return obj.options.short_backing_asset; + } + }; + + struct by_short_backing_asset; struct by_feed_expiration; + struct by_cer_update; + typedef multi_index_container< asset_bitasset_data_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, - ordered_non_unique< tag, - const_mem_fun< asset_bitasset_data_object, time_point_sec, &asset_bitasset_data_object::feed_expiration_time > + ordered_non_unique< tag, bitasset_short_backing_asset_extractor >, + ordered_unique< tag, + composite_key< asset_bitasset_data_object, + const_mem_fun< asset_bitasset_data_object, time_point_sec, &asset_bitasset_data_object::feed_expiration_time >, + member< asset_bitasset_data_object, asset_id_type, &asset_bitasset_data_object::asset_id > + > + >, + ordered_non_unique< tag, + const_mem_fun< asset_bitasset_data_object, bool, &asset_bitasset_data_object::need_to_update_cer > > > > asset_bitasset_data_object_multi_index_type; - typedef flat_index asset_bitasset_data_index; + typedef generic_index asset_bitasset_data_index; struct by_symbol; struct by_type; + struct by_issuer; typedef multi_index_container< asset_object, indexed_by< ordered_unique< tag, member< object, object_id_type, &object::id > >, ordered_unique< tag, member >, + ordered_non_unique< tag, member >, ordered_unique< tag, composite_key< asset_object, const_mem_fun, @@ -253,6 +301,7 @@ FC_REFLECT_DERIVED( graphene::chain::asset_dynamic_data_object, (graphene::db::o (current_supply)(confidential_supply)(accumulated_fees)(fee_pool) ) FC_REFLECT_DERIVED( graphene::chain::asset_bitasset_data_object, (graphene::db::object), + (asset_id) (feeds) (current_feed) (current_feed_publication_time) @@ -261,6 +310,8 @@ FC_REFLECT_DERIVED( graphene::chain::asset_bitasset_data_object, (graphene::db:: (is_prediction_market) (settlement_price) (settlement_fund) + (asset_cer_updated) + (feed_cer_updated) ) FC_REFLECT_DERIVED( graphene::chain::asset_object, (graphene::db::object), diff --git a/libraries/chain/include/graphene/chain/block_database.hpp b/libraries/chain/include/graphene/chain/block_database.hpp index d1f613c150..ccae46cf04 100644 --- a/libraries/chain/include/graphene/chain/block_database.hpp +++ b/libraries/chain/include/graphene/chain/block_database.hpp @@ -26,6 +26,8 @@ #include namespace graphene { namespace chain { + struct index_entry; + class block_database { public: @@ -43,7 +45,11 @@ namespace graphene { namespace chain { optional fetch_by_number( uint32_t block_num )const; optional last()const; optional last_id()const; + size_t blocks_current_position()const; + size_t total_block_size()const; private: + optional last_index_entry()const; + fc::path _index_filename; mutable std::fstream _blocks; mutable std::fstream _block_num_to_pos; }; diff --git a/libraries/chain/include/graphene/chain/config.hpp b/libraries/chain/include/graphene/chain/config.hpp index 74b7d2b54b..35594d94e7 100644 --- a/libraries/chain/include/graphene/chain/config.hpp +++ b/libraries/chain/include/graphene/chain/config.hpp @@ -26,14 +26,13 @@ #define GRAPHENE_SYMBOL "CORE" #define GRAPHENE_ADDRESS_PREFIX "GPH" -#define GRAPHENE_MIN_ACCOUNT_NAME_LENGTH 3 +#define GRAPHENE_MIN_ACCOUNT_NAME_LENGTH 1 #define GRAPHENE_MAX_ACCOUNT_NAME_LENGTH 63 #define GRAPHENE_MIN_ASSET_SYMBOL_LENGTH 3 #define GRAPHENE_MAX_ASSET_SYMBOL_LENGTH 16 #define GRAPHENE_MAX_SHARE_SUPPLY int64_t(1000000000000000ll) -#define GRAPHENE_MAX_PAY_RATE 10000 /* 100% */ #define GRAPHENE_MAX_SIG_CHECK_DEPTH 2 /** * Don't allow the committee_members to publish a limit that would @@ -45,7 +44,7 @@ #define GRAPHENE_DEFAULT_BLOCK_INTERVAL 5 /* seconds */ #define GRAPHENE_DEFAULT_MAX_TRANSACTION_SIZE 2048 -#define GRAPHENE_DEFAULT_MAX_BLOCK_SIZE (GRAPHENE_DEFAULT_MAX_TRANSACTION_SIZE*GRAPHENE_DEFAULT_BLOCK_INTERVAL*200000) +#define GRAPHENE_DEFAULT_MAX_BLOCK_SIZE (2*1000*1000) /* < 2 MiB (less than MAX_MESSAGE_SIZE in graphene/net/config.hpp) */ #define GRAPHENE_DEFAULT_MAX_TIME_UNTIL_EXPIRATION (60*60*24) // seconds, aka: 1 day #define GRAPHENE_DEFAULT_MAINTENANCE_INTERVAL (60*60*24) // seconds, aka: 1 day #define GRAPHENE_DEFAULT_MAINTENANCE_SKIP_SLOTS 3 // number of slots to skip for maintenance interval @@ -54,12 +53,9 @@ #define GRAPHENE_MAX_UNDO_HISTORY 10000 #define GRAPHENE_MIN_BLOCK_SIZE_LIMIT (GRAPHENE_MIN_TRANSACTION_SIZE_LIMIT*5) // 5 transactions per block -#define GRAPHENE_MIN_TRANSACTION_EXPIRATION_LIMIT (GRAPHENE_MAX_BLOCK_INTERVAL * 5) // 5 transactions per block #define GRAPHENE_BLOCKCHAIN_PRECISION uint64_t( 100000 ) #define GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS 5 -#define GRAPHENE_DEFAULT_TRANSFER_FEE (1*GRAPHENE_BLOCKCHAIN_PRECISION) -#define GRAPHENE_MAX_INSTANCE_ID (uint64_t(-1)>>16) /** percentage fields are fixed point with a denominator of 10,000 */ #define GRAPHENE_100_PERCENT 10000 #define GRAPHENE_1_PERCENT (GRAPHENE_100_PERCENT/100) @@ -69,7 +65,6 @@ #define GRAPHENE_DEFAULT_FORCE_SETTLEMENT_OFFSET 0 ///< 1% #define GRAPHENE_DEFAULT_FORCE_SETTLEMENT_MAX_VOLUME (20* GRAPHENE_1_PERCENT) ///< 20% #define GRAPHENE_DEFAULT_PRICE_FEED_LIFETIME (60*60*24) ///< 1 day -#define GRAPHENE_MAX_FEED_PRODUCERS 200 #define GRAPHENE_DEFAULT_MAX_AUTHORITY_MEMBERSHIP 10 #define GRAPHENE_DEFAULT_MAX_ASSET_WHITELIST_AUTHORITIES 10 #define GRAPHENE_DEFAULT_MAX_ASSET_FEED_PUBLISHERS 10 @@ -96,13 +91,9 @@ #define GRAPHENE_DEFAULT_COMMITTEE_PROPOSAL_REVIEW_PERIOD_SEC (60*60*24*7*2) // Two weeks #define GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE (20*GRAPHENE_1_PERCENT) #define GRAPHENE_DEFAULT_LIFETIME_REFERRER_PERCENT_OF_FEE (30*GRAPHENE_1_PERCENT) -#define GRAPHENE_DEFAULT_MAX_BULK_DISCOUNT_PERCENT (50*GRAPHENE_1_PERCENT) -#define GRAPHENE_DEFAULT_BULK_DISCOUNT_THRESHOLD_MIN ( GRAPHENE_BLOCKCHAIN_PRECISION*int64_t(1000) ) -#define GRAPHENE_DEFAULT_BULK_DISCOUNT_THRESHOLD_MAX ( GRAPHENE_DEFAULT_BULK_DISCOUNT_THRESHOLD_MIN*int64_t(100) ) #define GRAPHENE_DEFAULT_CASHBACK_VESTING_PERIOD_SEC (60*60*24*365) ///< 1 year #define GRAPHENE_DEFAULT_CASHBACK_VESTING_THRESHOLD (GRAPHENE_BLOCKCHAIN_PRECISION*int64_t(100)) #define GRAPHENE_DEFAULT_BURN_PERCENT_OF_FEE (20*GRAPHENE_1_PERCENT) -#define GRAPHENE_WITNESS_PAY_PERCENT_PRECISION (1000000000) #define GRAPHENE_DEFAULT_MAX_ASSERT_OPCODE 1 #define GRAPHENE_DEFAULT_FEE_LIQUIDATION_THRESHOLD GRAPHENE_BLOCKCHAIN_PRECISION * 100; #define GRAPHENE_DEFAULT_ACCOUNTS_PER_FEE_SCALE 1000 @@ -113,18 +104,6 @@ #define GRAPHENE_MAX_URL_LENGTH 127 -// counter initialization values used to derive near and far future seeds for shuffling witnesses -// we use the fractional bits of sqrt(2) in hex -#define GRAPHENE_NEAR_SCHEDULE_CTR_IV ( (uint64_t( 0x6a09 ) << 0x30) \ - | (uint64_t( 0xe667 ) << 0x20) \ - | (uint64_t( 0xf3bc ) << 0x10) \ - | (uint64_t( 0xc908 ) ) ) - -// and the fractional bits of sqrt(3) in hex -#define GRAPHENE_FAR_SCHEDULE_CTR_IV ( (uint64_t( 0xbb67 ) << 0x30) \ - | (uint64_t( 0xae85 ) << 0x20) \ - | (uint64_t( 0x84ca ) << 0x10) \ - | (uint64_t( 0xa73b ) ) ) /** * every second, the fraction of burned core asset which cycles is @@ -139,12 +118,10 @@ #define GRAPHENE_DEFAULT_MINIMUM_FEEDS 7 -#define GRAPHENE_MAX_INTEREST_APR uint16_t( 10000 ) - #define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4 #define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3 -#define GRAPHENE_CURRENT_DB_VERSION "GPH2.5" +#define GRAPHENE_CURRENT_DB_VERSION "20190219" #define GRAPHENE_IRREVERSIBLE_THRESHOLD (70 * GRAPHENE_1_PERCENT) @@ -168,5 +145,6 @@ #define GRAPHENE_NULL_WITNESS (graphene::chain::witness_id_type(0)) ///@} -// hack for unit test -#define GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET (asset_id_type(1)) +#define GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET (asset_id_type(743)) + +#define GRAPHENE_MAX_NESTED_OBJECTS (200) diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index 1b721253e6..7fa190cb66 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -36,8 +36,6 @@ #include #include -#include - #include #include @@ -68,15 +66,13 @@ namespace graphene { namespace chain { skip_witness_signature = 1 << 0, ///< used while reindexing skip_transaction_signatures = 1 << 1, ///< used by non-witness nodes skip_transaction_dupe_check = 1 << 2, ///< used while reindexing - skip_fork_db = 1 << 3, ///< used while reindexing skip_block_size_check = 1 << 4, ///< used when applying locally generated transactions skip_tapos_check = 1 << 5, ///< used while reindexing -- note this skips expiration check as well - skip_authority_check = 1 << 6, ///< used while reindexing -- disables any checking of authority on transactions + // skip_authority_check = 1 << 6, ///< removed because effectively identical to skip_transaction_signatures skip_merkle_check = 1 << 7, ///< used while reindexing skip_assert_evaluation = 1 << 8, ///< used while reindexing skip_undo_history_check = 1 << 9, ///< used while reindexing - skip_witness_schedule_check = 1 << 10, ///< used while reindexing - skip_validate = 1 << 11 ///< used prior to checkpoint, skips validate() call on transaction + skip_witness_schedule_check = 1 << 10 ///< used while reindexing }; /** @@ -89,10 +85,12 @@ namespace graphene { namespace chain { * * @param data_dir Path to open or create database in * @param genesis_loader A callable object which returns the genesis state to initialize new databases on + * @param db_version a version string that changes when the internal database format and/or logic is modified */ void open( const fc::path& data_dir, - std::function genesis_loader ); + std::function genesis_loader, + const std::string& db_version ); /** * @brief Rebuild object graph from block history and open detabase @@ -100,7 +98,7 @@ namespace graphene { namespace chain { * This method may be called after or instead of @ref database::open, and will rebuild the object graph by * replaying blockchain history. When this method exits successfully, the database will be open. */ - void reindex(fc::path data_dir, const genesis_state_type& initial_allocation = genesis_state_type()); + void reindex(fc::path data_dir); /** * @brief wipe Delete database from disk, and potentially the raw chain as well. @@ -136,9 +134,9 @@ namespace graphene { namespace chain { bool before_last_checkpoint()const; bool push_block( const signed_block& b, uint32_t skip = skip_nothing ); - processed_transaction push_transaction( const signed_transaction& trx, uint32_t skip = skip_nothing ); + processed_transaction push_transaction( const precomputable_transaction& trx, uint32_t skip = skip_nothing ); bool _push_block( const signed_block& b ); - processed_transaction _push_transaction( const signed_transaction& trx ); + processed_transaction _push_transaction( const precomputable_transaction& trx ); ///@throws fc::exception if the proposed transaction fails to apply. processed_transaction push_proposal( const proposal_object& proposal ); @@ -193,12 +191,18 @@ namespace graphene { namespace chain { * Emitted After a block has been applied and committed. The callback * should not yield and should execute quickly. */ - fc::signal&)> changed_objects; + fc::signal&, const flat_set&)> new_objects; + + /** + * Emitted After a block has been applied and committed. The callback + * should not yield and should execute quickly. + */ + fc::signal&, const flat_set&)> changed_objects; /** this signal is emitted any time an object is removed and contains a * pointer to the last value of every object that was removed. */ - fc::signal&)> removed_objects; + fc::signal&, const vector&, const flat_set&)> removed_objects; //////////////////// db_witness_schedule.cpp //////////////////// @@ -244,11 +248,14 @@ namespace graphene { namespace chain { const chain_id_type& get_chain_id()const; const asset_object& get_core_asset()const; + const asset_dynamic_data_object& get_core_dynamic_data()const; const chain_property_object& get_chain_properties()const; const global_property_object& get_global_properties()const; const dynamic_global_property_object& get_dynamic_global_properties()const; const node_property_object& get_node_properties()const; const fee_schedule& current_fee_schedule()const; + const account_statistics_object& get_account_stats_by_owner( account_id_type owner )const; + const witness_schedule_object& get_witness_schedule_object()const; time_point_sec head_block_time()const; uint32_t head_block_num()const; @@ -329,8 +336,11 @@ namespace graphene { namespace chain { /// @{ @group Market Helpers void globally_settle_asset( const asset_object& bitasset, const price& settle_price ); - void cancel_order(const force_settlement_object& order, bool create_virtual_op = true); - void cancel_order(const limit_order_object& order, bool create_virtual_op = true); + void cancel_settle_order(const force_settlement_object& order, bool create_virtual_op = true); + void cancel_limit_order(const limit_order_object& order, bool create_virtual_op = true, bool skip_cancel_fee = false); + void revive_bitasset( const asset_object& bitasset ); + void cancel_bid(const collateral_bid_object& bid, bool create_virtual_op = true); + void execute_bid( const collateral_bid_object& bid, share_type debt_covered, share_type collateral_from_fund, const price_feed& current_feed ); /** * @brief Process a new limit order through the markets @@ -340,37 +350,43 @@ namespace graphene { namespace chain { * This function takes a new limit order, and runs the markets attempting to match it with existing orders * already on the books. */ + bool apply_order_before_hardfork_625(const limit_order_object& new_order_object, bool allow_black_swan = true); bool apply_order(const limit_order_object& new_order_object, bool allow_black_swan = true); /** - * Matches the two orders, + * Matches the two orders, the first parameter is taker, the second is maker. * * @return a bit field indicating which orders were filled (and thus removed) * * 0 - no orders were matched - * 1 - bid was filled - * 2 - ask was filled + * 1 - taker was filled + * 2 - maker was filled * 3 - both were filled */ ///@{ - template - int match( const limit_order_object& bid, const OrderType& ask, const price& match_price ); - int match( const limit_order_object& bid, const limit_order_object& ask, const price& trade_price ); + int match( const limit_order_object& taker, const limit_order_object& maker, const price& trade_price ); + int match( const limit_order_object& taker, const call_order_object& maker, const price& trade_price, + const price& feed_price, const uint16_t maintenance_collateral_ratio ); /// @return the amount of asset settled asset match(const call_order_object& call, const force_settlement_object& settle, const price& match_price, - asset max_settlement); + asset max_settlement, + const price& fill_price); ///@} /** * @return true if the order was completely filled and thus freed. */ - bool fill_order( const limit_order_object& order, const asset& pays, const asset& receives, bool cull_if_small ); - bool fill_order( const call_order_object& order, const asset& pays, const asset& receives ); - bool fill_order( const force_settlement_object& settle, const asset& pays, const asset& receives ); + bool fill_limit_order( const limit_order_object& order, const asset& pays, const asset& receives, bool cull_if_small, + const price& fill_price, const bool is_maker ); + bool fill_call_order( const call_order_object& order, const asset& pays, const asset& receives, + const price& fill_price, const bool is_maker ); + bool fill_settle_order( const force_settlement_object& settle, const asset& pays, const asset& receives, + const price& fill_price, const bool is_maker ); - bool check_call_orders( const asset_object& mia, bool enable_black_swan = true ); + bool check_call_orders( const asset_object& mia, bool enable_black_swan = true, bool for_new_limit_order = false, + const asset_bitasset_data_object* bitasset_ptr = nullptr ); // helpers to fill_order void pay_order( const account_object& receiver, const asset& receives, const asset& pays ); @@ -379,7 +395,7 @@ namespace graphene { namespace chain { asset pay_market_fees( const asset_object& recv_asset, const asset& receives ); - ///@} + ///@{ /** * This method validates transactions without adding it to the pending state. * @return true if the transaction would validate @@ -389,14 +405,43 @@ namespace graphene { namespace chain { /** when popping a block, the transactions that were removed get cached here so they * can be reapplied at the proper time */ - std::deque< signed_transaction > _popped_tx; + std::deque< precomputable_transaction > _popped_tx; /** * @} */ + + /// Enable or disable tracking of votes of standby witnesses and committee members + inline void enable_standby_votes_tracking(bool enable) { _track_standby_votes = enable; } + + /** Precomputes digests, signatures and operation validations depending + * on skip flags. "Expensive" computations may be done in a parallel + * thread. + * + * @param block the block to preprocess + * @param skip indicates which computations can be skipped + * @return a future that will resolve to the input block with + * precomputations applied + */ + fc::future precompute_parallel( const signed_block& block, const uint32_t skip = skip_nothing )const; + + /** Precomputes digests, signatures and operation validations. + * "Expensive" computations may be done in a parallel thread. + * + * @param trx the transaction to preprocess + * @return a future that will resolve to the input transaction with + * precomputations applied + */ + fc::future precompute_parallel( const precomputable_transaction& trx )const; + private: + template + void _precompute_parallel( const Trx* trx, const size_t count, const uint32_t skip )const; + protected: //Mark pop_undo() as protected -- we do not want outside calling pop_undo(); it should call pop_block() instead void pop_undo() { object_database::pop_undo(); } + void notify_applied_block( const signed_block& block ); + void notify_on_pending_transaction( const signed_transaction& tx ); void notify_changed_objects(); private: @@ -413,9 +458,11 @@ namespace graphene { namespace chain { void apply_block( const signed_block& next_block, uint32_t skip = skip_nothing ); processed_transaction apply_transaction( const signed_transaction& trx, uint32_t skip = skip_nothing ); operation_result apply_operation( transaction_evaluation_state& eval_state, const operation& op ); + private: void _apply_block( const signed_block& next_block ); processed_transaction _apply_transaction( const signed_transaction& trx ); + void _cancel_bids_and_revive_mpa( const asset_object& bitasset, const asset_bitasset_data_object& bad ); ///Steps involved in applying a new block ///@{ @@ -424,17 +471,23 @@ namespace graphene { namespace chain { const witness_object& _validate_block_header( const signed_block& next_block )const; void create_block_summary(const signed_block& next_block); + //////////////////// db_witness_schedule.cpp //////////////////// + + uint32_t update_witness_missed_blocks( const signed_block& b ); + //////////////////// db_update.cpp //////////////////// - void update_global_dynamic_data( const signed_block& b ); + void update_global_dynamic_data( const signed_block& b, const uint32_t missed_blocks ); void update_signing_witness(const witness_object& signing_witness, const signed_block& new_block); void update_last_irreversible_block(); void clear_expired_transactions(); void clear_expired_proposals(); void clear_expired_orders(); void update_expired_feeds(); + void update_core_exchange_rates(); void update_maintenance_flag( bool new_maintenance_flag ); void update_withdraw_permissions(); - bool check_for_blackswan( const asset_object& mia, bool enable_black_swan = true ); + bool check_for_blackswan( const asset_object& mia, bool enable_black_swan = true, + const asset_bitasset_data_object* bitasset_ptr = nullptr ); ///Steps performed only at maintenance intervals ///@{ @@ -448,9 +501,11 @@ namespace graphene { namespace chain { void update_active_witnesses(); void update_active_committee_members(); void update_worker_votes(); + void process_bids( const asset_bitasset_data_object& bad ); + void process_bitassets(); - template - void perform_account_maintenance(std::tuple helpers); + template + void perform_account_maintenance( Type tally_helper ); ///@} ///@} @@ -489,6 +544,35 @@ namespace graphene { namespace chain { flat_map _checkpoints; node_property_object _node_property_object; + + /// Whether to update votes of standby witnesses and committee members when performing chain maintenance. + /// Set it to true to provide accurate data to API clients, set to false to have better performance. + bool _track_standby_votes = true; + + /** + * Whether database is successfully opened or not. + * + * The database is considered open when there's no exception + * or assertion fail during database::open() method, and + * database::close() has not been called, or failed during execution. + */ + bool _opened = false; + + // Counts nested proposal updates + uint32_t _push_proposal_nesting_depth = 0; + + /// Tracks assets affected by bitshares-core issue #453 before hard fork #615 in one block + flat_set _issue_453_affected_assets; + + /// Pointers to core asset object and global objects who will have immutable addresses after created + ///@{ + const asset_object* _p_core_asset_obj = nullptr; + const asset_dynamic_data_object* _p_core_dynamic_data_obj = nullptr; + const global_property_object* _p_global_prop_obj = nullptr; + const dynamic_global_property_object* _p_dyn_global_prop_obj = nullptr; + const chain_property_object* _p_chain_property_obj = nullptr; + const witness_schedule_object* _p_witness_schedule_obj = nullptr; + ///@} }; namespace detail diff --git a/libraries/chain/include/graphene/chain/db_with.hpp b/libraries/chain/include/graphene/chain/db_with.hpp index de93bb15f5..7ae189216f 100644 --- a/libraries/chain/include/graphene/chain/db_with.hpp +++ b/libraries/chain/include/graphene/chain/db_with.hpp @@ -80,11 +80,9 @@ struct pending_transactions_restorer { try { if( !_db.is_known_transaction( tx.id() ) ) { - // since push_transaction() takes a signed_transaction, - // the operation_results field will be ignored. _db._push_transaction( tx ); } - } catch ( const fc::exception& ) { + } catch ( const fc::exception& ) { // ignore invalid transactions } } _db._popped_tx.clear(); @@ -93,17 +91,11 @@ struct pending_transactions_restorer try { if( !_db.is_known_transaction( tx.id() ) ) { - // since push_transaction() takes a signed_transaction, - // the operation_results field will be ignored. _db._push_transaction( tx ); } } - catch( const fc::exception& e ) - { - /* - wlog( "Pending transaction became invalid after switching to block ${b} ${t}", ("b", _db.head_block_id())("t",_db.head_block_time()) ); - wlog( "The invalid pending transaction caused exception ${e}", ("e", e.to_detail_string() ) ); - */ + catch( const fc::exception& ) + { // ignore invalid transactions } } } diff --git a/libraries/chain/include/graphene/chain/evaluator.hpp b/libraries/chain/include/graphene/chain/evaluator.hpp index af90517eca..1ae582bd68 100644 --- a/libraries/chain/include/graphene/chain/evaluator.hpp +++ b/libraries/chain/include/graphene/chain/evaluator.hpp @@ -29,7 +29,7 @@ namespace graphene { namespace chain { class database; - struct signed_transaction; + class signed_transaction; class generic_evaluator; class transaction_evaluation_state; @@ -93,7 +93,7 @@ namespace graphene { namespace chain { * * Rather than returning a value, this method fills in core_fee_paid field. */ - void convert_fee(); + virtual void convert_fee(); object_id_type get_relative_id( object_id_type rel_id )const; diff --git a/libraries/chain/include/graphene/chain/exceptions.hpp b/libraries/chain/include/graphene/chain/exceptions.hpp index 2e07ca26f6..14931bc196 100644 --- a/libraries/chain/include/graphene/chain/exceptions.hpp +++ b/libraries/chain/include/graphene/chain/exceptions.hpp @@ -24,7 +24,7 @@ #pragma once #include -#include +#include #define GRAPHENE_ASSERT( expr, exc_type, FORMAT, ... ) \ FC_MULTILINE_MACRO_BEGIN \ @@ -65,6 +65,21 @@ msg \ ) +#define GRAPHENE_TRY_NOTIFY( signal, ... ) \ + try \ + { \ + signal( __VA_ARGS__ ); \ + } \ + catch( const graphene::chain::plugin_exception& e ) \ + { \ + elog( "Caught plugin exception: ${e}", ("e", e.to_detail_string() ) ); \ + throw; \ + } \ + catch( ... ) \ + { \ + wlog( "Caught unexpected exception in plugin" ); \ + } + namespace graphene { namespace chain { FC_DECLARE_EXCEPTION( chain_exception, 3000000, "blockchain exception" ) @@ -77,6 +92,7 @@ namespace graphene { namespace chain { FC_DECLARE_DERIVED_EXCEPTION( undo_database_exception, graphene::chain::chain_exception, 3070000, "undo database exception" ) FC_DECLARE_DERIVED_EXCEPTION( unlinkable_block_exception, graphene::chain::chain_exception, 3080000, "unlinkable block" ) FC_DECLARE_DERIVED_EXCEPTION( black_swan_exception, graphene::chain::chain_exception, 3090000, "black swan" ) + FC_DECLARE_DERIVED_EXCEPTION( plugin_exception, graphene::chain::chain_exception, 3100000, "plugin exception" ) FC_DECLARE_DERIVED_EXCEPTION( tx_missing_active_auth, graphene::chain::transaction_exception, 3030001, "missing required active authority" ) FC_DECLARE_DERIVED_EXCEPTION( tx_missing_owner_auth, graphene::chain::transaction_exception, 3030002, "missing required owner authority" ) diff --git a/libraries/chain/include/graphene/chain/fork_database.hpp b/libraries/chain/include/graphene/chain/fork_database.hpp index 8ca95b5e4b..363a21f2ce 100644 --- a/libraries/chain/include/graphene/chain/fork_database.hpp +++ b/libraries/chain/include/graphene/chain/fork_database.hpp @@ -44,11 +44,6 @@ namespace graphene { namespace chain { weak_ptr< fork_item > prev; uint32_t num; // initialized in ctor - /** - * Used to flag a block as invalid and prevent other blocks from - * building on top of it. - */ - bool invalid = false; block_id_type id; signed_block data; }; @@ -98,12 +93,10 @@ namespace graphene { namespace chain { struct block_id; struct block_num; - struct by_previous; typedef multi_index_container< item_ptr, indexed_by< hashed_unique, member, std::hash>, - hashed_non_unique, const_mem_fun, std::hash>, ordered_non_unique, member> > > fork_multi_index_type; @@ -117,7 +110,6 @@ namespace graphene { namespace chain { uint32_t _max_size = 1024; - fork_multi_index_type _unlinked_index; fork_multi_index_type _index; shared_ptr _head; }; diff --git a/libraries/app/include/graphene/app/impacted.hpp b/libraries/chain/include/graphene/chain/impacted.hpp similarity index 96% rename from libraries/app/include/graphene/app/impacted.hpp rename to libraries/chain/include/graphene/chain/impacted.hpp index 2e59b91047..2a22cbd123 100644 --- a/libraries/app/include/graphene/app/impacted.hpp +++ b/libraries/chain/include/graphene/chain/impacted.hpp @@ -28,7 +28,7 @@ #include #include -namespace graphene { namespace app { +namespace graphene { namespace chain { void operation_get_impacted_accounts( const graphene::chain::operation& op, @@ -39,4 +39,4 @@ void transaction_get_impacted_accounts( fc::flat_set& result ); -} } // graphene::app +} } // graphene::app \ No newline at end of file diff --git a/libraries/chain/include/graphene/chain/market_evaluator.hpp b/libraries/chain/include/graphene/chain/market_evaluator.hpp index 9d653c0737..96a4ac07ed 100644 --- a/libraries/chain/include/graphene/chain/market_evaluator.hpp +++ b/libraries/chain/include/graphene/chain/market_evaluator.hpp @@ -31,6 +31,7 @@ namespace graphene { namespace chain { class asset_object; class asset_bitasset_data_object; class call_order_object; + struct bid_collateral_operation; struct call_order_update_operation; struct limit_order_cancel_operation; struct limit_order_create_operation; @@ -45,12 +46,17 @@ namespace graphene { namespace chain { asset calculate_market_fee( const asset_object* aobj, const asset& trade_amount ); + /** override the default behavior defined by generic_evalautor + */ + virtual void convert_fee() override; + /** override the default behavior defined by generic_evalautor which is to * post the fee to fee_paying_account_stats.pending_fees */ virtual void pay_fee() override; share_type _deferred_fee = 0; + asset _deferred_paid_fee; const limit_order_create_operation* _op = nullptr; const account_object* _seller = nullptr; const asset_object* _sell_asset = nullptr; @@ -74,7 +80,7 @@ namespace graphene { namespace chain { typedef call_order_update_operation operation_type; void_result do_evaluate( const call_order_update_operation& o ); - void_result do_apply( const call_order_update_operation& o ); + object_id_type do_apply( const call_order_update_operation& o ); bool _closing_order = false; const asset_object* _debt_asset = nullptr; @@ -83,4 +89,18 @@ namespace graphene { namespace chain { const asset_bitasset_data_object* _bitasset_data = nullptr; }; + class bid_collateral_evaluator : public evaluator + { + public: + typedef bid_collateral_operation operation_type; + + void_result do_evaluate( const bid_collateral_operation& o ); + void_result do_apply( const bid_collateral_operation& o ); + + const asset_object* _debt_asset = nullptr; + const asset_bitasset_data_object* _bitasset_data = nullptr; + const account_object* _paying_account = nullptr; + const collateral_bid_object* _bid = nullptr; + }; + } } // graphene::chain diff --git a/libraries/chain/include/graphene/chain/market_object.hpp b/libraries/chain/include/graphene/chain/market_object.hpp index c41def13e1..706d5ed313 100644 --- a/libraries/chain/include/graphene/chain/market_object.hpp +++ b/libraries/chain/include/graphene/chain/market_object.hpp @@ -52,7 +52,8 @@ class limit_order_object : public abstract_object account_id_type seller; share_type for_sale; ///< asset id is sell_price.base.asset_id price sell_price; - share_type deferred_fee; + share_type deferred_fee; ///< fee converted to CORE + asset deferred_paid_fee; ///< originally paid fee pair get_market()const { @@ -63,6 +64,8 @@ class limit_order_object : public abstract_object asset amount_for_sale()const { return asset( for_sale, sell_price.base.asset_id ); } asset amount_to_receive()const { return amount_for_sale() * sell_price; } + asset_id_type sell_asset_id()const { return sell_price.base.asset_id; } + asset_id_type receive_asset_id()const { return sell_price.quote.asset_id; } }; struct by_id; @@ -89,8 +92,10 @@ typedef multi_index_container< ordered_unique< tag, composite_key< limit_order_object, member, + member, member - > + >, + composite_key_compare, std::greater, std::less> > > > limit_order_multi_index_type; @@ -114,12 +119,25 @@ class call_order_object : public abstract_object asset get_debt()const { return asset( debt, debt_type() ); } asset amount_to_receive()const { return get_debt(); } asset_id_type debt_type()const { return call_price.quote.asset_id; } + asset_id_type collateral_type()const { return call_price.base.asset_id; } price collateralization()const { return get_collateral() / get_debt(); } account_id_type borrower; share_type collateral; ///< call_price.base.asset_id, access via get_collateral - share_type debt; ///< call_price.quote.asset_id, access via get_collateral - price call_price; ///< Debt / Collateral + share_type debt; ///< call_price.quote.asset_id, access via get_debt + price call_price; ///< Collateral / Debt + + optional target_collateral_ratio; ///< maximum CR to maintain when selling collateral on margin call + + pair get_market()const + { + auto tmp = std::make_pair( call_price.base.asset_id, call_price.quote.asset_id ); + if( tmp.first > tmp.second ) std::swap( tmp.first, tmp.second ); + return tmp; + } + + /// Calculate maximum quantity of debt to cover to satisfy @ref target_collateral_ratio. + share_type get_max_debt_to_cover( price match_price, price feed_price, const uint16_t maintenance_collateral_ratio )const; }; /** @@ -142,6 +160,27 @@ class force_settlement_object : public abstract_object { return balance.asset_id; } }; +/** + * @class collateral_bid_object + * @brief bids of collateral for debt after a black swan + * + * There should only be one collateral_bid_object per asset per account, and + * only for smartcoin assets that have a global settlement_price. + */ +class collateral_bid_object : public abstract_object +{ + public: + static const uint8_t space_id = implementation_ids; + static const uint8_t type_id = impl_collateral_bid_object_type; + + asset get_additional_collateral()const { return inv_swan_price.base; } + asset get_debt_covered()const { return inv_swan_price.quote; } + asset_id_type debt_type()const { return inv_swan_price.quote.asset_id; } + + account_id_type bidder; + price inv_swan_price; // Collateral / Debt +}; + struct by_collateral; struct by_account; struct by_price; @@ -193,20 +232,46 @@ typedef multi_index_container< > > force_settlement_object_multi_index_type; +typedef multi_index_container< + collateral_bid_object, + indexed_by< + ordered_unique< tag, + member< object, object_id_type, &object::id > >, + ordered_unique< tag, + composite_key< collateral_bid_object, + const_mem_fun< collateral_bid_object, asset_id_type, &collateral_bid_object::debt_type>, + member< collateral_bid_object, account_id_type, &collateral_bid_object::bidder> + > + >, + ordered_unique< tag, + composite_key< collateral_bid_object, + const_mem_fun< collateral_bid_object, asset_id_type, &collateral_bid_object::debt_type>, + member< collateral_bid_object, price, &collateral_bid_object::inv_swan_price >, + member< object, object_id_type, &object::id > + >, + composite_key_compare< std::less, std::greater, std::less > + > + > +> collateral_bid_object_multi_index_type; + typedef generic_index call_order_index; typedef generic_index force_settlement_index; +typedef generic_index collateral_bid_index; } } // graphene::chain FC_REFLECT_DERIVED( graphene::chain::limit_order_object, (graphene::db::object), - (expiration)(seller)(for_sale)(sell_price)(deferred_fee) + (expiration)(seller)(for_sale)(sell_price)(deferred_fee)(deferred_paid_fee) ) FC_REFLECT_DERIVED( graphene::chain::call_order_object, (graphene::db::object), - (borrower)(collateral)(debt)(call_price) ) + (borrower)(collateral)(debt)(call_price)(target_collateral_ratio) ) FC_REFLECT_DERIVED( graphene::chain::force_settlement_object, (graphene::db::object), (owner)(balance)(settlement_date) ) + +FC_REFLECT_DERIVED( graphene::chain::collateral_bid_object, (graphene::db::object), + (bidder)(inv_swan_price) ) diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index ecbbc58d04..3aeaab1923 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -92,38 +92,53 @@ namespace graphene { namespace chain { static const uint8_t type_id = impl_account_transaction_history_object_type; account_id_type account; /// the account this operation applies to operation_history_id_type operation_id; - uint32_t sequence = 0; /// the operation position within the given account + uint64_t sequence = 0; /// the operation position within the given account account_transaction_history_id_type next; //std::pair account_op()const { return std::tie( account, operation_id ); } //std::pair account_seq()const { return std::tie( account, sequence ); } }; - + struct by_id; -struct by_seq; -struct by_op; -typedef multi_index_container< - account_transaction_history_object, - indexed_by< - ordered_unique< tag, member< object, object_id_type, &object::id > >, - ordered_unique< tag, - composite_key< account_transaction_history_object, - member< account_transaction_history_object, account_id_type, &account_transaction_history_object::account>, - member< account_transaction_history_object, uint32_t, &account_transaction_history_object::sequence> - > - >, - ordered_unique< tag, - composite_key< account_transaction_history_object, - member< account_transaction_history_object, account_id_type, &account_transaction_history_object::account>, + + typedef multi_index_container< + operation_history_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > > + > + > operation_history_multi_index_type; + + typedef generic_index operation_history_index; + + struct by_seq; + struct by_op; + struct by_opid; + + typedef multi_index_container< + account_transaction_history_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, + composite_key< account_transaction_history_object, + member< account_transaction_history_object, account_id_type, &account_transaction_history_object::account>, + member< account_transaction_history_object, uint64_t, &account_transaction_history_object::sequence> + > + >, + ordered_unique< tag, + composite_key< account_transaction_history_object, + member< account_transaction_history_object, account_id_type, &account_transaction_history_object::account>, + member< account_transaction_history_object, operation_history_id_type, &account_transaction_history_object::operation_id> + > + >, + ordered_non_unique< tag, member< account_transaction_history_object, operation_history_id_type, &account_transaction_history_object::operation_id> > > - > -> account_transaction_history_multi_index_type; + > account_transaction_history_multi_index_type; + + typedef generic_index account_transaction_history_index; -typedef generic_index account_transaction_history_index; - } } // graphene::chain FC_REFLECT_DERIVED( graphene::chain::operation_history_object, (graphene::chain::object), diff --git a/libraries/chain/include/graphene/chain/proposal_evaluator.hpp b/libraries/chain/include/graphene/chain/proposal_evaluator.hpp index bf6fc54721..04b5c62d22 100644 --- a/libraries/chain/include/graphene/chain/proposal_evaluator.hpp +++ b/libraries/chain/include/graphene/chain/proposal_evaluator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2015-2018 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -25,11 +25,28 @@ #include #include -#include -#include namespace graphene { namespace chain { + class hardfork_visitor_1479 + { + public: + typedef void result_type; + + uint64_t max_update_instance = 0; + uint64_t nested_update_count = 0; + + template + void operator()(const T &v) const {} + + void operator()(const proposal_update_operation &v); + + void operator()(const proposal_delete_operation &v); + + // loop and self visit in proposals + void operator()(const graphene::chain::proposal_create_operation &v); + }; + class proposal_create_evaluator : public evaluator { public: @@ -39,6 +56,8 @@ namespace graphene { namespace chain { object_id_type do_apply( const proposal_create_operation& o ); transaction _proposed_trx; + + hardfork_visitor_1479 vtor_1479; }; class proposal_update_evaluator : public evaluator diff --git a/libraries/chain/include/graphene/chain/proposal_object.hpp b/libraries/chain/include/graphene/chain/proposal_object.hpp index 97d98a5ba7..12fae3c89e 100644 --- a/libraries/chain/include/graphene/chain/proposal_object.hpp +++ b/libraries/chain/include/graphene/chain/proposal_object.hpp @@ -27,6 +27,7 @@ #include #include +#include namespace graphene { namespace chain { @@ -50,8 +51,10 @@ class proposal_object : public abstract_object flat_set required_owner_approvals; flat_set available_owner_approvals; flat_set available_key_approvals; + account_id_type proposer; + std::string fail_reason; - bool is_authorized_to_execute(database& db)const; + bool is_authorized_to_execute(database& db) const; }; /** @@ -83,7 +86,13 @@ typedef boost::multi_index_container< proposal_object, indexed_by< ordered_unique< tag< by_id >, member< object, object_id_type, &object::id > >, - ordered_non_unique< tag< by_expiration >, member< proposal_object, time_point_sec, &proposal_object::expiration_time > > + //ordered_non_unique< tag< by_expiration >, member< proposal_object, time_point_sec, &proposal_object::expiration_time > > + ordered_unique, + composite_key, + member< object, object_id_type, &object::id > + > + > > > proposal_multi_index_container; typedef generic_index proposal_index; @@ -93,4 +102,4 @@ typedef generic_index proposal_ FC_REFLECT_DERIVED( graphene::chain::proposal_object, (graphene::chain::object), (expiration_time)(review_period_time)(proposed_transaction)(required_active_approvals) (available_active_approvals)(required_owner_approvals)(available_owner_approvals) - (available_key_approvals) ) + (available_key_approvals)(proposer) ) diff --git a/libraries/chain/include/graphene/chain/protocol/account.hpp b/libraries/chain/include/graphene/chain/protocol/account.hpp index 329d03e290..f2be53837b 100644 --- a/libraries/chain/include/graphene/chain/protocol/account.hpp +++ b/libraries/chain/include/graphene/chain/protocol/account.hpp @@ -57,6 +57,12 @@ namespace graphene { namespace chain { flat_set votes; extensions_type extensions; + /// Whether this account is voting + inline bool is_voting() const + { + return ( voting_account != GRAPHENE_PROXY_TO_SELF_ACCOUNT || !votes.empty() ); + } + void validate()const; }; @@ -264,11 +270,11 @@ namespace graphene { namespace chain { } } // graphene::chain FC_REFLECT(graphene::chain::account_options, (memo_key)(voting_account)(num_witness)(num_committee)(votes)(extensions)) -FC_REFLECT_TYPENAME( graphene::chain::account_whitelist_operation::account_listing) FC_REFLECT_ENUM( graphene::chain::account_whitelist_operation::account_listing, (no_listing)(white_listed)(black_listed)(white_and_black_listed)) FC_REFLECT(graphene::chain::account_create_operation::ext, (null_ext)(owner_special_authority)(active_special_authority)(buyback_options) ) +FC_REFLECT_TYPENAME(graphene::chain::extension) FC_REFLECT( graphene::chain::account_create_operation, (fee)(registrar) (referrer)(referrer_percent) @@ -276,6 +282,7 @@ FC_REFLECT( graphene::chain::account_create_operation, ) FC_REFLECT(graphene::chain::account_update_operation::ext, (null_ext)(owner_special_authority)(active_special_authority) ) +FC_REFLECT_TYPENAME(graphene::chain::extension) FC_REFLECT( graphene::chain::account_update_operation, (fee)(account)(owner)(active)(new_options)(extensions) ) diff --git a/libraries/chain/include/graphene/chain/protocol/address.hpp b/libraries/chain/include/graphene/chain/protocol/address.hpp index 00331c0813..b225b42caf 100644 --- a/libraries/chain/include/graphene/chain/protocol/address.hpp +++ b/libraries/chain/include/graphene/chain/protocol/address.hpp @@ -78,8 +78,8 @@ namespace graphene { namespace chain { namespace fc { - void to_variant( const graphene::chain::address& var, fc::variant& vo ); - void from_variant( const fc::variant& var, graphene::chain::address& vo ); + void to_variant( const graphene::chain::address& var, fc::variant& vo, uint32_t max_depth = 1 ); + void from_variant( const fc::variant& var, graphene::chain::address& vo, uint32_t max_depth = 1 ); } namespace std diff --git a/libraries/chain/include/graphene/chain/protocol/asset.hpp b/libraries/chain/include/graphene/chain/protocol/asset.hpp index a938129ac5..86d17892ff 100644 --- a/libraries/chain/include/graphene/chain/protocol/asset.hpp +++ b/libraries/chain/include/graphene/chain/protocol/asset.hpp @@ -29,6 +29,8 @@ namespace graphene { namespace chain { extern const int64_t scaled_precision_lut[]; + struct price; + struct asset { asset( share_type a = 0, asset_id_type id = asset_id_type() ) @@ -60,20 +62,20 @@ namespace graphene { namespace chain { FC_ASSERT( a.asset_id == b.asset_id ); return a.amount < b.amount; } - friend bool operator <= ( const asset& a, const asset& b ) + friend inline bool operator <= ( const asset& a, const asset& b ) { - return (a == b) || (a < b); + return !(b < a); } - friend bool operator != ( const asset& a, const asset& b ) + friend inline bool operator != ( const asset& a, const asset& b ) { return !(a == b); } - friend bool operator > ( const asset& a, const asset& b ) + friend inline bool operator > ( const asset& a, const asset& b ) { - return !(a <= b); + return (b < a); } - friend bool operator >= ( const asset& a, const asset& b ) + friend inline bool operator >= ( const asset& a, const asset& b ) { return !(a < b); } @@ -94,6 +96,8 @@ namespace graphene { namespace chain { FC_ASSERT( precision < 19 ); return scaled_precision_lut[ precision ]; } + + asset multiply_and_round_up( const price& p )const; ///< Multiply and round up }; /** @@ -110,8 +114,8 @@ namespace graphene { namespace chain { */ struct price { - price(const asset& base = asset(), const asset quote = asset()) - : base(base),quote(quote){} + explicit price(const asset& _base = asset(), const asset& _quote = asset()) + : base(_base),quote(_quote){} asset base; asset quote; @@ -136,15 +140,23 @@ namespace graphene { namespace chain { price operator / ( const asset& base, const asset& quote ); inline price operator~( const price& p ) { return price{p.quote,p.base}; } - bool operator < ( const asset& a, const asset& b ); - bool operator <= ( const asset& a, const asset& b ); bool operator < ( const price& a, const price& b ); - bool operator <= ( const price& a, const price& b ); - bool operator > ( const price& a, const price& b ); - bool operator >= ( const price& a, const price& b ); bool operator == ( const price& a, const price& b ); - bool operator != ( const price& a, const price& b ); - asset operator * ( const asset& a, const price& b ); + + inline bool operator > ( const price& a, const price& b ) { return (b < a); } + inline bool operator <= ( const price& a, const price& b ) { return !(b < a); } + inline bool operator >= ( const price& a, const price& b ) { return !(a < b); } + inline bool operator != ( const price& a, const price& b ) { return !(a == b); } + + asset operator * ( const asset& a, const price& b ); ///< Multiply and round down + + price operator * ( const price& p, const ratio_type& r ); + price operator / ( const price& p, const ratio_type& r ); + + inline price& operator *= ( price& p, const ratio_type& r ) + { return p = p * r; } + inline price& operator /= ( price& p, const ratio_type& r ) + { return p = p / r; } /** * @class price_feed diff --git a/libraries/chain/include/graphene/chain/protocol/asset_ops.hpp b/libraries/chain/include/graphene/chain/protocol/asset_ops.hpp index 3f5ede199e..3a045a30c9 100644 --- a/libraries/chain/include/graphene/chain/protocol/asset_ops.hpp +++ b/libraries/chain/include/graphene/chain/protocol/asset_ops.hpp @@ -54,7 +54,7 @@ namespace graphene { namespace chain { /// order to accept the fee. If this asset's fee pool is funded, the chain will automatically deposite fees /// in this asset to its accumulated fees, and withdraw from the fee pool the same amount as converted at /// the core exchange rate. - price core_exchange_rate; + price core_exchange_rate = price(asset(), asset(0, asset_id_type(1))); /// A set of accounts which maintain whitelists to consult for this asset. If whitelist_authorities /// is non-empty, then only accounts in whitelist_authorities are allowed to hold, use, or transfer the asset. @@ -442,11 +442,74 @@ namespace graphene { namespace chain { void validate()const; }; + /** + * @brief Update issuer of an asset + * @ingroup operations + * + * An issuer has general administrative power of an asset and in some cases + * also its shares issued to individuals. Thus, changing the issuer today + * requires the use of a separate operation that needs to be signed by the + * owner authority. + * + */ + struct asset_update_issuer_operation : public base_operation + { + struct fee_parameters_type { + uint64_t fee = 20 * GRAPHENE_BLOCKCHAIN_PRECISION; + }; + + asset fee; + account_id_type issuer; + asset_id_type asset_to_update; + account_id_type new_issuer; + extensions_type extensions; + + account_id_type fee_payer()const { return issuer; } + void validate()const; + + void get_required_owner_authorities( flat_set& a )const + { a.insert( issuer ); } + + void get_required_active_authorities( flat_set& a )const + { } + + }; + + /** + * @brief Transfers BTS from the fee pool of a specified asset back to the issuer's balance + + * @param fee Payment for the operation execution + * @param issuer Account which will be used for transfering BTS + * @param asset_id Id of the asset whose fee pool is going to be drained + * @param amount_to_claim Amount of BTS to claim from the fee pool + * @param extensions Field for future expansion + + * @pre @ref fee must be paid in the asset other than the one whose pool is being drained + * @pre @ref amount_to_claim should be specified in the core asset + * @pre @ref amount_to_claim should be nonnegative + */ + struct asset_claim_pool_operation : public base_operation + { + struct fee_parameters_type { + uint64_t fee = 20 * GRAPHENE_BLOCKCHAIN_PRECISION; + }; + + asset fee; + account_id_type issuer; + asset_id_type asset_id; /// fee.asset_id must != asset_id + asset amount_to_claim; /// core asset + extensions_type extensions; + + account_id_type fee_payer()const { return issuer; } + void validate()const; + }; } } // graphene::chain FC_REFLECT( graphene::chain::asset_claim_fees_operation, (fee)(issuer)(amount_to_claim)(extensions) ) FC_REFLECT( graphene::chain::asset_claim_fees_operation::fee_parameters_type, (fee) ) +FC_REFLECT( graphene::chain::asset_claim_pool_operation, (fee)(issuer)(asset_id)(amount_to_claim)(extensions) ) +FC_REFLECT( graphene::chain::asset_claim_pool_operation::fee_parameters_type, (fee) ) FC_REFLECT( graphene::chain::asset_options, (max_supply) @@ -479,6 +542,7 @@ FC_REFLECT( graphene::chain::asset_settle_operation::fee_parameters_type, (fee) FC_REFLECT( graphene::chain::asset_settle_cancel_operation::fee_parameters_type, ) FC_REFLECT( graphene::chain::asset_fund_fee_pool_operation::fee_parameters_type, (fee) ) FC_REFLECT( graphene::chain::asset_update_operation::fee_parameters_type, (fee)(price_per_kbyte) ) +FC_REFLECT( graphene::chain::asset_update_issuer_operation::fee_parameters_type, (fee) ) FC_REFLECT( graphene::chain::asset_update_bitasset_operation::fee_parameters_type, (fee) ) FC_REFLECT( graphene::chain::asset_update_feed_producers_operation::fee_parameters_type, (fee) ) FC_REFLECT( graphene::chain::asset_publish_feed_operation::fee_parameters_type, (fee) ) @@ -504,6 +568,13 @@ FC_REFLECT( graphene::chain::asset_update_operation, (new_options) (extensions) ) +FC_REFLECT( graphene::chain::asset_update_issuer_operation, + (fee) + (issuer) + (asset_to_update) + (new_issuer) + (extensions) + ) FC_REFLECT( graphene::chain::asset_update_bitasset_operation, (fee) (issuer) diff --git a/libraries/chain/include/graphene/chain/protocol/authority.hpp b/libraries/chain/include/graphene/chain/protocol/authority.hpp index b6ef60d772..145c24b5c5 100644 --- a/libraries/chain/include/graphene/chain/protocol/authority.hpp +++ b/libraries/chain/include/graphene/chain/protocol/authority.hpp @@ -132,5 +132,4 @@ void add_authority_accounts( } } // namespace graphene::chain FC_REFLECT( graphene::chain::authority, (weight_threshold)(account_auths)(key_auths)(address_auths) ) -FC_REFLECT_TYPENAME( graphene::chain::authority::classification ) FC_REFLECT_ENUM( graphene::chain::authority::classification, (owner)(active)(key) ) diff --git a/libraries/chain/include/graphene/chain/protocol/base.hpp b/libraries/chain/include/graphene/chain/protocol/base.hpp index 52240b934a..73209a1861 100644 --- a/libraries/chain/include/graphene/chain/protocol/base.hpp +++ b/libraries/chain/include/graphene/chain/protocol/base.hpp @@ -27,6 +27,8 @@ #include #include +#include + namespace graphene { namespace chain { /** @@ -94,6 +96,7 @@ namespace graphene { namespace chain { void get_required_active_authorities( flat_set& )const{} void get_required_owner_authorities( flat_set& )const{} void validate()const{} + fc::optional< fc::future > validate_parallel( uint32_t skip )const; static uint64_t calculate_data_fee( uint64_t bytes, uint64_t price_per_kbyte ); }; diff --git a/libraries/chain/include/graphene/chain/protocol/block.hpp b/libraries/chain/include/graphene/chain/protocol/block.hpp index e2dcd0383c..aa8c46052f 100644 --- a/libraries/chain/include/graphene/chain/protocol/block.hpp +++ b/libraries/chain/include/graphene/chain/protocol/block.hpp @@ -26,33 +26,43 @@ namespace graphene { namespace chain { - struct block_header + class block_header { + public: digest_type digest()const; block_id_type previous; uint32_t block_num()const { return num_from_id(previous) + 1; } fc::time_point_sec timestamp; witness_id_type witness; checksum_type transaction_merkle_root; + // Note: when we need to add data to `extensions`, remember to review `database::_generate_block()`. + // More info in https://github.com/bitshares/bitshares-core/issues/1136 extensions_type extensions; static uint32_t num_from_id(const block_id_type& id); }; - struct signed_block_header : public block_header + class signed_block_header : public block_header { - block_id_type id()const; - fc::ecc::public_key signee()const; + public: + const block_id_type& id()const; + const fc::ecc::public_key& signee()const; void sign( const fc::ecc::private_key& signer ); bool validate_signee( const fc::ecc::public_key& expected_signee )const; signature_type witness_signature; + protected: + mutable fc::ecc::public_key _signee; + mutable block_id_type _block_id; }; - struct signed_block : public signed_block_header + class signed_block : public signed_block_header { - checksum_type calculate_merkle_root()const; + public: + const checksum_type& calculate_merkle_root()const; vector transactions; + protected: + mutable checksum_type _calculated_merkle_root; }; } } // graphene::chain diff --git a/libraries/chain/include/graphene/chain/protocol/chain_parameters.hpp b/libraries/chain/include/graphene/chain/protocol/chain_parameters.hpp index 4dbd6c15ff..7869d14a35 100644 --- a/libraries/chain/include/graphene/chain/protocol/chain_parameters.hpp +++ b/libraries/chain/include/graphene/chain/protocol/chain_parameters.hpp @@ -22,25 +22,20 @@ * THE SOFTWARE. */ #pragma once +#include #include #include -#include - -namespace graphene { namespace chain { struct fee_schedule; } } -/* -namespace fc { - template inline void pack( Stream& s, const graphene::chain::fee_schedule& value ); - template inline void unpack( Stream& s, graphene::chain::fee_schedule& value ); -} // namespace fc -*/ namespace graphene { namespace chain { typedef static_variant<> parameter_extension; + + struct fee_schedule; + struct chain_parameters { - /** using a smart ref breaks the circular dependency created between operations and the fee schedule */ - smart_ref current_fees; ///< current schedule of fees + /** using a shared_ptr breaks the circular dependency created between operations and the fee schedule */ + std::shared_ptr current_fees; ///< current schedule of fees uint8_t block_interval = GRAPHENE_DEFAULT_BLOCK_INTERVAL; ///< interval in seconds between blocks uint32_t maintenance_interval = GRAPHENE_DEFAULT_MAINTENANCE_INTERVAL; ///< interval in sections between blockchain maintenance events uint8_t maintenance_skip_slots = GRAPHENE_DEFAULT_MAINTENANCE_SKIP_SLOTS; ///< number of block_intervals to skip at maintenance time @@ -73,6 +68,15 @@ namespace graphene { namespace chain { /** defined in fee_schedule.cpp */ void validate()const; + + chain_parameters(); + chain_parameters(const chain_parameters& other); + chain_parameters(chain_parameters&& other); + chain_parameters& operator=(const chain_parameters& other); + chain_parameters& operator=(chain_parameters&& other); + + private: + static void safe_copy(chain_parameters& to, const chain_parameters& from); }; } } // graphene::chain diff --git a/libraries/chain/include/graphene/chain/protocol/ext.hpp b/libraries/chain/include/graphene/chain/protocol/ext.hpp index ac7755353d..f868fa0b23 100644 --- a/libraries/chain/include/graphene/chain/protocol/ext.hpp +++ b/libraries/chain/include/graphene/chain/protocol/ext.hpp @@ -28,6 +28,8 @@ namespace graphene { namespace chain { +using fc::unsigned_int; + template< typename T > struct extension { @@ -54,15 +56,19 @@ struct graphene_extension_pack_count_visitor template< typename Stream, typename T > struct graphene_extension_pack_read_visitor { - graphene_extension_pack_read_visitor( Stream& s, const T& v ) : stream(s), value(v) {} + graphene_extension_pack_read_visitor( Stream& s, const T& v, uint32_t _max_depth ) + : stream(s), value(v), max_depth(_max_depth - 1) + { + FC_ASSERT( _max_depth > 0 ); + } template void operator()( const char* name )const { if( (value.*member).valid() ) { - fc::raw::pack( stream, unsigned_int( which ) ); - fc::raw::pack( stream, *(value.*member) ); + fc::raw::pack( stream, unsigned_int( which ), max_depth ); + fc::raw::pack( stream, *(value.*member), max_depth ); } ++which; } @@ -70,27 +76,19 @@ struct graphene_extension_pack_read_visitor Stream& stream; const T& value; mutable uint32_t which = 0; + const uint32_t max_depth; }; -template< typename Stream, class T > -void operator<<( Stream& stream, const graphene::chain::extension& value ) -{ - graphene_extension_pack_count_visitor count_vtor( value.value ); - fc::reflector::visit( count_vtor ); - fc::raw::pack( stream, unsigned_int( count_vtor.count ) ); - graphene_extension_pack_read_visitor read_vtor( stream, value.value ); - fc::reflector::visit( read_vtor ); -} - - template< typename Stream, typename T > struct graphene_extension_unpack_visitor { - graphene_extension_unpack_visitor( Stream& s, T& v ) : stream(s), value(v) + graphene_extension_unpack_visitor( Stream& s, T& v, uint32_t _max_depth ) + : stream(s), value(v), max_depth(_max_depth - 1) { + FC_ASSERT( _max_depth > 0 ); unsigned_int c; - fc::raw::unpack( stream, c ); + fc::raw::unpack( stream, c, max_depth ); count_left = c.value; maybe_read_next_which(); } @@ -100,7 +98,7 @@ struct graphene_extension_unpack_visitor if( count_left > 0 ) { unsigned_int w; - fc::raw::unpack( stream, w ); + fc::raw::unpack( stream, w, max_depth ); next_which = w.value; } } @@ -111,7 +109,7 @@ struct graphene_extension_unpack_visitor if( (count_left > 0) && (which == next_which) ) { typename Member::value_type temp; - fc::raw::unpack( stream, temp ); + fc::raw::unpack( stream, temp, max_depth ); (value.*member) = temp; --count_left; maybe_read_next_which(); @@ -127,17 +125,9 @@ struct graphene_extension_unpack_visitor Stream& stream; T& value; + const uint32_t max_depth; }; -template< typename Stream, typename T > -void operator>>( Stream& s, graphene::chain::extension& value ) -{ - value = graphene::chain::extension(); - graphene_extension_unpack_visitor vtor( s, value.value ); - fc::reflector::visit( vtor ); - FC_ASSERT( vtor.count_left == 0 ); // unrecognized extension throws here -} - } } // graphene::chain namespace fc { @@ -145,9 +135,10 @@ namespace fc { template< typename T > struct graphene_extension_from_variant_visitor { - graphene_extension_from_variant_visitor( const variant_object& v, T& val ) - : vo( v ), value( val ) + graphene_extension_from_variant_visitor( const variant_object& v, T& val, uint32_t max_depth ) + : vo( v ), value( val ), _max_depth(max_depth - 1) { + FC_ASSERT( max_depth > 0, "Recursion depth exceeded!" ); count_left = vo.size(); } @@ -157,7 +148,7 @@ struct graphene_extension_from_variant_visitor auto it = vo.find(name); if( it != vo.end() ) { - from_variant( it->value(), (value.*member) ); + from_variant( it->value(), (value.*member), _max_depth ); assert( count_left > 0 ); // x.find(k) returns true for n distinct values of k only if x.size() >= n --count_left; } @@ -165,11 +156,12 @@ struct graphene_extension_from_variant_visitor const variant_object& vo; T& value; + const uint32_t _max_depth; mutable uint32_t count_left = 0; }; template< typename T > -void from_variant( const fc::variant& var, graphene::chain::extension& value ) +void from_variant( const fc::variant& var, graphene::chain::extension& value, uint32_t max_depth ) { value = graphene::chain::extension(); if( var.is_null() ) @@ -180,7 +172,7 @@ void from_variant( const fc::variant& var, graphene::chain::extension& value return; } - graphene_extension_from_variant_visitor vtor( var.get_object(), value.value ); + graphene_extension_from_variant_visitor vtor( var.get_object(), value.value, max_depth ); fc::reflector::visit( vtor ); FC_ASSERT( vtor.count_left == 0 ); // unrecognized extension throws here } @@ -188,25 +180,64 @@ void from_variant( const fc::variant& var, graphene::chain::extension& value template< typename T > struct graphene_extension_to_variant_visitor { - graphene_extension_to_variant_visitor( const T& v ) : value(v) {} + graphene_extension_to_variant_visitor( const T& v, uint32_t max_depth ) : value(v), mvo(max_depth) {} template void operator()( const char* name )const { if( (value.*member).valid() ) - mvo[ name ] = (value.*member); + mvo( name, value.*member ); } const T& value; - mutable mutable_variant_object mvo; + mutable limited_mutable_variant_object mvo; }; template< typename T > -void to_variant( const graphene::chain::extension& value, fc::variant& var ) +void to_variant( const graphene::chain::extension& value, fc::variant& var, uint32_t max_depth ) { - graphene_extension_to_variant_visitor vtor( value.value ); + graphene_extension_to_variant_visitor vtor( value.value, max_depth ); fc::reflector::visit( vtor ); var = vtor.mvo; } +namespace raw { + +template< typename Stream, typename T > +void pack( Stream& stream, const graphene::chain::extension& value, uint32_t _max_depth=FC_PACK_MAX_DEPTH ) +{ + FC_ASSERT( _max_depth > 0 ); + --_max_depth; + graphene::chain::graphene_extension_pack_count_visitor count_vtor( value.value ); + fc::reflector::visit( count_vtor ); + fc::raw::pack( stream, unsigned_int( count_vtor.count ), _max_depth ); + graphene::chain::graphene_extension_pack_read_visitor read_vtor( stream, value.value, _max_depth ); + fc::reflector::visit( read_vtor ); +} + + +template< typename Stream, typename T > +void unpack( Stream& s, graphene::chain::extension& value, uint32_t _max_depth=FC_PACK_MAX_DEPTH ) +{ + FC_ASSERT( _max_depth > 0 ); + --_max_depth; + value = graphene::chain::extension(); + graphene::chain::graphene_extension_unpack_visitor vtor( s, value.value, _max_depth ); + fc::reflector::visit( vtor ); + FC_ASSERT( vtor.count_left == 0 ); // unrecognized extension throws here +} + +} // fc::raw + +template struct get_typename< graphene::chain::extension > +{ + static const char* name() + { + static std::string n = std::string("graphene::chain::extension<") + + fc::get_typename::name() + std::string(">"); + return n.c_str(); + } +}; + + } // fc diff --git a/libraries/chain/include/graphene/chain/protocol/fee_schedule.hpp b/libraries/chain/include/graphene/chain/protocol/fee_schedule.hpp index e250ab1737..f635dbe97c 100644 --- a/libraries/chain/include/graphene/chain/protocol/fee_schedule.hpp +++ b/libraries/chain/include/graphene/chain/protocol/fee_schedule.hpp @@ -34,6 +34,79 @@ namespace graphene { namespace chain { }; typedef transform_to_fee_parameters::type fee_parameters; + template + class fee_helper { + public: + const typename Operation::fee_parameters_type& cget(const flat_set& parameters)const + { + auto itr = parameters.find( typename Operation::fee_parameters_type() ); + FC_ASSERT( itr != parameters.end() ); + return itr->template get(); + } + }; + + template<> + class fee_helper { + public: + const account_create_operation::fee_parameters_type& cget(const flat_set& parameters)const + { + auto itr = parameters.find( account_create_operation::fee_parameters_type() ); + FC_ASSERT( itr != parameters.end() ); + return itr->get(); + } + typename account_create_operation::fee_parameters_type& get(flat_set& parameters)const + { + auto itr = parameters.find( account_create_operation::fee_parameters_type() ); + FC_ASSERT( itr != parameters.end() ); + return itr->get(); + } + }; + + template<> + class fee_helper { + public: + const bid_collateral_operation::fee_parameters_type& cget(const flat_set& parameters)const + { + auto itr = parameters.find( bid_collateral_operation::fee_parameters_type() ); + if ( itr != parameters.end() ) + return itr->get(); + + static bid_collateral_operation::fee_parameters_type bid_collateral_dummy; + bid_collateral_dummy.fee = fee_helper().cget(parameters).fee; + return bid_collateral_dummy; + } + }; + + template<> + class fee_helper { + public: + const asset_update_issuer_operation::fee_parameters_type& cget(const flat_set& parameters)const + { + auto itr = parameters.find( asset_update_issuer_operation::fee_parameters_type() ); + if ( itr != parameters.end() ) + return itr->get(); + + static asset_update_issuer_operation::fee_parameters_type dummy; + dummy.fee = fee_helper().cget(parameters).fee; + return dummy; + } + }; + + template<> + class fee_helper { + public: + const asset_claim_pool_operation::fee_parameters_type& cget(const flat_set& parameters)const + { + auto itr = parameters.find( asset_claim_pool_operation::fee_parameters_type() ); + if ( itr != parameters.end() ) + return itr->get(); + + static asset_claim_pool_operation::fee_parameters_type asset_claim_pool_dummy; + asset_claim_pool_dummy.fee = fee_helper().cget(parameters).fee; + return asset_claim_pool_dummy; + } + }; + /** * @brief contains all of the parameters necessary to calculate the fee for any operation */ @@ -45,9 +118,18 @@ namespace graphene { namespace chain { /** * Finds the appropriate fee parameter struct for the operation - * and then calculates the appropriate fee. + * and then calculates the appropriate fee in CORE asset. + */ + asset calculate_fee( const operation& op )const; + /** + * Finds the appropriate fee parameter struct for the operation + * and then calculates the appropriate fee in an asset specified + * implicitly by core_exchange_rate. + */ + asset calculate_fee( const operation& op, const price& core_exchange_rate )const; + /** + * Updates the operation with appropriate fee and returns the fee. */ - asset calculate_fee( const operation& op, const price& core_exchange_rate = price::unit_price() )const; asset set_fee( operation& op, const price& core_exchange_rate = price::unit_price() )const; void zero_all_fees(); @@ -60,16 +142,12 @@ namespace graphene { namespace chain { template const typename Operation::fee_parameters_type& get()const { - auto itr = parameters.find( typename Operation::fee_parameters_type() ); - FC_ASSERT( itr != parameters.end() ); - return itr->template get(); + return fee_helper().cget(parameters); } template typename Operation::fee_parameters_type& get() { - auto itr = parameters.find( typename Operation::fee_parameters_type() ); - FC_ASSERT( itr != parameters.end() ); - return itr->template get(); + return fee_helper().get(parameters); } /** @@ -77,11 +155,17 @@ namespace graphene { namespace chain { */ flat_set parameters; uint32_t scale = GRAPHENE_100_PERCENT; ///< fee * scale / GRAPHENE_100_PERCENT + private: + static void set_fee_parameters(fee_schedule& sched); }; typedef fee_schedule fee_schedule_type; } } // graphene::chain +namespace fc { + template<> struct get_typename> { static const char* name() { return "shared_ptr"; } }; +} + FC_REFLECT_TYPENAME( graphene::chain::fee_parameters ) FC_REFLECT( graphene::chain::fee_schedule, (parameters)(scale) ) diff --git a/libraries/chain/include/graphene/chain/protocol/market.hpp b/libraries/chain/include/graphene/chain/protocol/market.hpp index 56352c604e..55438d7cc5 100644 --- a/libraries/chain/include/graphene/chain/protocol/market.hpp +++ b/libraries/chain/include/graphene/chain/protocol/market.hpp @@ -23,6 +23,7 @@ */ #pragma once #include +#include namespace graphene { namespace chain { @@ -94,8 +95,6 @@ namespace graphene { namespace chain { void validate()const; }; - - /** * @ingroup operations * @@ -110,6 +109,16 @@ namespace graphene { namespace chain { */ struct call_order_update_operation : public base_operation { + /** + * Options to be used in @ref call_order_update_operation. + * + * @note this struct can be expanded by adding more options in the end. + */ + struct options_type + { + optional target_collateral_ratio; ///< maximum CR to maintain when selling collateral on margin call + }; + /** this is slightly more expensive than limit orders, this pricing impacts prediction markets */ struct fee_parameters_type { uint64_t fee = 20 * GRAPHENE_BLOCKCHAIN_PRECISION; }; @@ -117,6 +126,8 @@ namespace graphene { namespace chain { account_id_type funding_account; ///< pays fee, collateral, and cover asset delta_collateral; ///< the amount of collateral to add to the margin position asset delta_debt; ///< the amount of the debt to be paid off, may be negative to issue new debt + + typedef extension extensions_type; // note: this will be jsonified to {...} but no longer [...] extensions_type extensions; account_id_type fee_payer()const { return funding_account; } @@ -135,15 +146,16 @@ namespace graphene { namespace chain { struct fee_parameters_type {}; fill_order_operation(){} - fill_order_operation( object_id_type o, account_id_type a, asset p, asset r, asset f ) - :order_id(o),account_id(a),pays(p),receives(r),fee(f){} + fill_order_operation( object_id_type o, account_id_type a, asset p, asset r, asset f, price fp, bool m ) + :order_id(o),account_id(a),pays(p),receives(r),fee(f),fill_price(fp),is_maker(m) {} object_id_type order_id; account_id_type account_id; asset pays; asset receives; asset fee; // paid by receiving account - + price fill_price; + bool is_maker; pair get_market()const { @@ -158,16 +170,68 @@ namespace graphene { namespace chain { share_type calculate_fee(const fee_parameters_type& k)const { return 0; } }; + /** + * @ingroup operations + * + * This operation can be used after a black swan to bid collateral for + * taking over part of the debt and the settlement_fund (see BSIP-0018). + */ + struct bid_collateral_operation : public base_operation + { + /** should be equivalent to call_order_update fee */ + struct fee_parameters_type { uint64_t fee = 20 * GRAPHENE_BLOCKCHAIN_PRECISION; }; + + asset fee; + account_id_type bidder; ///< pays fee and additional collateral + asset additional_collateral; ///< the amount of collateral to bid for the debt + asset debt_covered; ///< the amount of debt to take over + extensions_type extensions; + + account_id_type fee_payer()const { return bidder; } + void validate()const; + }; + + /** + * @ingroup operations + * + * @note This is a virtual operation that is created while reviving a + * bitasset from collateral bids. + */ + struct execute_bid_operation : public base_operation + { + struct fee_parameters_type {}; + + execute_bid_operation(){} + execute_bid_operation( account_id_type a, asset d, asset c ) + : bidder(a), debt(d), collateral(c) {} + + account_id_type bidder; + asset debt; + asset collateral; + asset fee; + + account_id_type fee_payer()const { return bidder; } + void validate()const { FC_ASSERT( !"virtual operation" ); } + + /// This is a virtual operation; there is no fee + share_type calculate_fee(const fee_parameters_type& k)const { return 0; } + }; } } // graphene::chain FC_REFLECT( graphene::chain::limit_order_create_operation::fee_parameters_type, (fee) ) FC_REFLECT( graphene::chain::limit_order_cancel_operation::fee_parameters_type, (fee) ) FC_REFLECT( graphene::chain::call_order_update_operation::fee_parameters_type, (fee) ) -/// THIS IS THE ONLY VIRTUAL OPERATION THUS FAR... -FC_REFLECT( graphene::chain::fill_order_operation::fee_parameters_type, ) +FC_REFLECT( graphene::chain::bid_collateral_operation::fee_parameters_type, (fee) ) +FC_REFLECT( graphene::chain::fill_order_operation::fee_parameters_type, ) // VIRTUAL +FC_REFLECT( graphene::chain::execute_bid_operation::fee_parameters_type, ) // VIRTUAL + +FC_REFLECT( graphene::chain::call_order_update_operation::options_type, (target_collateral_ratio) ) +FC_REFLECT_TYPENAME( graphene::chain::call_order_update_operation::extensions_type ) FC_REFLECT( graphene::chain::limit_order_create_operation,(fee)(seller)(amount_to_sell)(min_to_receive)(expiration)(fill_or_kill)(extensions)) FC_REFLECT( graphene::chain::limit_order_cancel_operation,(fee)(fee_paying_account)(order)(extensions) ) FC_REFLECT( graphene::chain::call_order_update_operation, (fee)(funding_account)(delta_collateral)(delta_debt)(extensions) ) -FC_REFLECT( graphene::chain::fill_order_operation, (fee)(order_id)(account_id)(pays)(receives) ) +FC_REFLECT( graphene::chain::fill_order_operation, (fee)(order_id)(account_id)(pays)(receives)(fill_price)(is_maker) ) +FC_REFLECT( graphene::chain::bid_collateral_operation, (fee)(bidder)(additional_collateral)(debt_covered)(extensions) ) +FC_REFLECT( graphene::chain::execute_bid_operation, (fee)(bidder)(debt)(collateral) ) diff --git a/libraries/chain/include/graphene/chain/protocol/operations.hpp b/libraries/chain/include/graphene/chain/protocol/operations.hpp index 7f2639f15a..de2cfa7fd9 100644 --- a/libraries/chain/include/graphene/chain/protocol/operations.hpp +++ b/libraries/chain/include/graphene/chain/protocol/operations.hpp @@ -91,7 +91,11 @@ namespace graphene { namespace chain { transfer_from_blind_operation, asset_settle_cancel_operation, // VIRTUAL asset_claim_fees_operation, - fba_distribute_operation // VIRTUAL + fba_distribute_operation, // VIRTUAL + bid_collateral_operation, + execute_bid_operation, // VIRTUAL + asset_claim_pool_operation, + asset_update_issuer_operation > operation; /// @} // operations group diff --git a/libraries/chain/include/graphene/chain/protocol/transaction.hpp b/libraries/chain/include/graphene/chain/protocol/transaction.hpp index 4d529a277d..915358207a 100644 --- a/libraries/chain/include/graphene/chain/protocol/transaction.hpp +++ b/libraries/chain/include/graphene/chain/protocol/transaction.hpp @@ -62,8 +62,10 @@ namespace graphene { namespace chain { /** * @brief groups operations that should be applied atomically */ - struct transaction + class transaction { + public: + virtual ~transaction() = default; /** * Least significant 16 bits from the reference block number. If @ref relative_expiration is zero, this field * must be zero as well. @@ -85,11 +87,9 @@ namespace graphene { namespace chain { extensions_type extensions; /// Calculate the digest for a transaction - digest_type digest()const; - transaction_id_type id()const; - void validate() const; - /// Calculate the digest used for signature validation - digest_type sig_digest( const chain_id_type& chain_id )const; + digest_type digest()const; + virtual const transaction_id_type& id()const; + virtual void validate() const; void set_expiration( fc::time_point_sec expiration_time ); void set_reference_block( const block_id_type& reference_block ); @@ -113,15 +113,22 @@ namespace graphene { namespace chain { } void get_required_authorities( flat_set& active, flat_set& owner, vector& other )const; + + protected: + // Calculate the digest used for signature validation + digest_type sig_digest( const chain_id_type& chain_id )const; + mutable transaction_id_type _tx_id_buffer; }; /** * @brief adds a signature to a transaction */ - struct signed_transaction : public transaction + class signed_transaction : public transaction { + public: signed_transaction( const transaction& trx = transaction() ) : transaction(trx){} + virtual ~signed_transaction() = default; /** signs and appends to signatures */ const signature_type& sign( const private_key_type& key, const chain_id_type& chain_id ); @@ -165,12 +172,48 @@ namespace graphene { namespace chain { uint32_t max_recursion = GRAPHENE_MAX_SIG_CHECK_DEPTH ) const; - flat_set get_signature_keys( const chain_id_type& chain_id )const; + /** + * @brief Extract public keys from signatures with given chain ID. + * @param chain_id A chain ID + * @return Public keys + * @note If @ref signees is empty, E.G. when it's the first time calling + * this function for the signed transaction, public keys will be + * extracted with given chain ID, and be stored into the mutable + * @ref signees field, then @ref signees will be returned; + * otherwise, the @ref chain_id parameter will be ignored, and + * @ref signees will be returned directly. + */ + virtual const flat_set& get_signature_keys( const chain_id_type& chain_id )const; + /** Signatures */ vector signatures; - /// Removes all operations and signatures + /** Removes all operations and signatures */ void clear() { operations.clear(); signatures.clear(); } + + /** Removes all signatures */ + void clear_signatures() { signatures.clear(); } + protected: + /** Public keys extracted from signatures */ + mutable flat_set _signees; + }; + + /** This represents a signed transaction that will never have its operations, + * signatures etc. modified again, after initial creation. It is therefore + * safe to cache results from various calls. + */ + class precomputable_transaction : public signed_transaction { + public: + precomputable_transaction() {} + precomputable_transaction( const signed_transaction& tx ) : signed_transaction(tx) {}; + precomputable_transaction( signed_transaction&& tx ) : signed_transaction( std::move(tx) ) {}; + virtual ~precomputable_transaction() = default; + + virtual const transaction_id_type& id()const override; + virtual void validate()const override; + virtual const flat_set& get_signature_keys( const chain_id_type& chain_id )const override; + protected: + mutable bool _validated = false; }; void verify_authority( const vector& ops, const flat_set& sigs, @@ -194,10 +237,11 @@ namespace graphene { namespace chain { * If an operation did not create any new object IDs then 0 * should be returned. */ - struct processed_transaction : public signed_transaction + struct processed_transaction : public precomputable_transaction { processed_transaction( const signed_transaction& trx = signed_transaction() ) - : signed_transaction(trx){} + : precomputable_transaction(trx){} + virtual ~processed_transaction() = default; vector operation_results; @@ -209,5 +253,7 @@ namespace graphene { namespace chain { } } // graphene::chain FC_REFLECT( graphene::chain::transaction, (ref_block_num)(ref_block_prefix)(expiration)(operations)(extensions) ) +// Note: not reflecting signees field for backward compatibility; in addition, it should not be in p2p messages FC_REFLECT_DERIVED( graphene::chain::signed_transaction, (graphene::chain::transaction), (signatures) ) -FC_REFLECT_DERIVED( graphene::chain::processed_transaction, (graphene::chain::signed_transaction), (operation_results) ) +FC_REFLECT_DERIVED( graphene::chain::precomputable_transaction, (graphene::chain::signed_transaction), ) +FC_REFLECT_DERIVED( graphene::chain::processed_transaction, (graphene::chain::precomputable_transaction), (operation_results) ) diff --git a/libraries/chain/include/graphene/chain/protocol/types.hpp b/libraries/chain/include/graphene/chain/protocol/types.hpp index 5237fcad53..4e96abf9f9 100644 --- a/libraries/chain/include/graphene/chain/protocol/types.hpp +++ b/libraries/chain/include/graphene/chain/protocol/types.hpp @@ -33,10 +33,12 @@ #include #include #include + +#include + #include #include #include -#include #include #include @@ -46,6 +48,8 @@ #include #include +#include + namespace graphene { namespace chain { using namespace graphene::db; @@ -63,13 +67,11 @@ namespace graphene { namespace chain { using std::tie; using std::make_pair; - using fc::smart_ref; using fc::variant_object; using fc::variant; using fc::enum_type; using fc::optional; using fc::unsigned_int; - using fc::signed_int; using fc::time_point_sec; using fc::time_point; using fc::safe; @@ -84,6 +86,8 @@ namespace graphene { namespace chain { typedef fc::ecc::private_key private_key_type; typedef fc::sha256 chain_id_type; + typedef boost::rational< int32_t > ratio_type; + enum asset_issuer_permission_flags { charge_market_fee = 0x01, /**< an issuer-specified percentage of all market trades in this asset is paid to the issuer */ @@ -155,7 +159,8 @@ namespace graphene { namespace chain { impl_budget_record_object_type, impl_special_authority_object_type, impl_buyback_object_type, - impl_fba_accumulator_object_type + impl_fba_accumulator_object_type, + impl_collateral_bid_object_type }; //typedef fc::unsigned_int object_id_type; @@ -179,7 +184,7 @@ namespace graphene { namespace chain { typedef object_id< protocol_ids, account_object_type, account_object> account_id_type; typedef object_id< protocol_ids, asset_object_type, asset_object> asset_id_type; typedef object_id< protocol_ids, force_settlement_object_type, force_settlement_object> force_settlement_id_type; - typedef object_id< protocol_ids, committee_member_object_type, committee_member_object> committee_member_id_type; + typedef object_id< protocol_ids, committee_member_object_type, committee_member_object> committee_member_id_type; typedef object_id< protocol_ids, witness_object_type, witness_object> witness_id_type; typedef object_id< protocol_ids, limit_order_object_type, limit_order_object> limit_order_id_type; typedef object_id< protocol_ids, call_order_object_type, call_order_object> call_order_id_type; @@ -207,6 +212,7 @@ namespace graphene { namespace chain { class special_authority_object; class buyback_object; class fba_accumulator_object; + class collateral_bid_object; typedef object_id< implementation_ids, impl_global_property_object_type, global_property_object> global_property_id_type; typedef object_id< implementation_ids, impl_dynamic_global_property_object_type, dynamic_global_property_object> dynamic_global_property_id_type; @@ -227,8 +233,8 @@ namespace graphene { namespace chain { typedef object_id< implementation_ids, impl_special_authority_object_type, special_authority_object > special_authority_id_type; typedef object_id< implementation_ids, impl_buyback_object_type, buyback_object > buyback_id_type; typedef object_id< implementation_ids, impl_fba_accumulator_object_type, fba_accumulator_object > fba_accumulator_id_type; + typedef object_id< implementation_ids, impl_collateral_bid_object_type, collateral_bid_object > collateral_bid_id_type; - typedef fc::array symbol_type; typedef fc::ripemd160 block_id_type; typedef fc::ripemd160 checksum_type; typedef fc::ripemd160 transaction_id_type; @@ -256,8 +262,14 @@ namespace graphene { namespace chain { friend bool operator == ( const public_key_type& p1, const fc::ecc::public_key& p2); friend bool operator == ( const public_key_type& p1, const public_key_type& p2); friend bool operator != ( const public_key_type& p1, const public_key_type& p2); - // TODO: This is temporary for testing - bool is_valid_v1( const std::string& base58str ); + }; + + class pubkey_comparator { + public: + inline bool operator()( const public_key_type& a, const public_key_type& b )const + { + return a.key_data < b.key_data; + } }; struct extended_public_key_type @@ -307,12 +319,12 @@ namespace graphene { namespace chain { namespace fc { - void to_variant( const graphene::chain::public_key_type& var, fc::variant& vo ); - void from_variant( const fc::variant& var, graphene::chain::public_key_type& vo ); - void to_variant( const graphene::chain::extended_public_key_type& var, fc::variant& vo ); - void from_variant( const fc::variant& var, graphene::chain::extended_public_key_type& vo ); - void to_variant( const graphene::chain::extended_private_key_type& var, fc::variant& vo ); - void from_variant( const fc::variant& var, graphene::chain::extended_private_key_type& vo ); + void to_variant( const graphene::chain::public_key_type& var, fc::variant& vo, uint32_t max_depth = 2 ); + void from_variant( const fc::variant& var, graphene::chain::public_key_type& vo, uint32_t max_depth = 2 ); + void to_variant( const graphene::chain::extended_public_key_type& var, fc::variant& vo, uint32_t max_depth = 2 ); + void from_variant( const fc::variant& var, graphene::chain::extended_public_key_type& vo, uint32_t max_depth = 2 ); + void to_variant( const graphene::chain::extended_private_key_type& var, fc::variant& vo, uint32_t max_depth = 2 ); + void from_variant( const fc::variant& var, graphene::chain::extended_private_key_type& vo, uint32_t max_depth = 2 ); } FC_REFLECT( graphene::chain::public_key_type, (key_data) ) @@ -359,6 +371,7 @@ FC_REFLECT_ENUM( graphene::chain::impl_object_type, (impl_special_authority_object_type) (impl_buyback_object_type) (impl_fba_accumulator_object_type) + (impl_collateral_bid_object_type) ) FC_REFLECT_TYPENAME( graphene::chain::share_type ) @@ -390,6 +403,7 @@ FC_REFLECT_TYPENAME( graphene::chain::budget_record_id_type ) FC_REFLECT_TYPENAME( graphene::chain::special_authority_id_type ) FC_REFLECT_TYPENAME( graphene::chain::buyback_id_type ) FC_REFLECT_TYPENAME( graphene::chain::fba_accumulator_id_type ) +FC_REFLECT_TYPENAME( graphene::chain::collateral_bid_id_type ) FC_REFLECT( graphene::chain::void_t, ) diff --git a/libraries/chain/include/graphene/chain/protocol/vote.hpp b/libraries/chain/include/graphene/chain/protocol/vote.hpp index b93b9d037f..ec2ebd1640 100644 --- a/libraries/chain/include/graphene/chain/protocol/vote.hpp +++ b/libraries/chain/include/graphene/chain/protocol/vote.hpp @@ -75,11 +75,11 @@ struct vote_id_type {} /// Construct this vote_id_type from a serial string in the form "type:instance" explicit vote_id_type(const std::string& serial) - { + { try { auto colon = serial.find(':'); - if( colon != std::string::npos ) - *this = vote_id_type(vote_type(std::stoul(serial.substr(0, colon))), std::stoul(serial.substr(colon+1))); - } + FC_ASSERT( colon != std::string::npos ); + *this = vote_id_type(vote_type(std::stoul(serial.substr(0, colon))), std::stoul(serial.substr(colon+1))); + } FC_CAPTURE_AND_RETHROW( (serial) ) } /// Set the type of this vote_id_type void set_type(vote_type type) @@ -141,12 +141,11 @@ namespace fc class variant; -void to_variant( const graphene::chain::vote_id_type& var, fc::variant& vo ); -void from_variant( const fc::variant& var, graphene::chain::vote_id_type& vo ); +void to_variant( const graphene::chain::vote_id_type& var, fc::variant& vo, uint32_t max_depth = 1 ); +void from_variant( const fc::variant& var, graphene::chain::vote_id_type& vo, uint32_t max_depth = 1 ); } // fc -FC_REFLECT_TYPENAME( graphene::chain::vote_id_type::vote_type ) FC_REFLECT_TYPENAME( fc::flat_set ) FC_REFLECT_ENUM( graphene::chain::vote_id_type::vote_type, (witness)(committee)(worker)(VOTE_TYPE_COUNT) ) diff --git a/libraries/chain/include/graphene/chain/pts_address.hpp b/libraries/chain/include/graphene/chain/pts_address.hpp index 8c53fb2e17..636e2f114e 100644 --- a/libraries/chain/include/graphene/chain/pts_address.hpp +++ b/libraries/chain/include/graphene/chain/pts_address.hpp @@ -73,6 +73,6 @@ FC_REFLECT( graphene::chain::pts_address, (addr) ) namespace fc { - void to_variant( const graphene::chain::pts_address& var, fc::variant& vo ); - void from_variant( const fc::variant& var, graphene::chain::pts_address& vo ); + void to_variant( const graphene::chain::pts_address& var, fc::variant& vo, uint32_t max_depth = 1 ); + void from_variant( const fc::variant& var, graphene::chain::pts_address& vo, uint32_t max_depth = 1 ); } diff --git a/libraries/chain/include/graphene/chain/transaction_evaluation_state.hpp b/libraries/chain/include/graphene/chain/transaction_evaluation_state.hpp index 5ffb4bb7d1..1b8fa95780 100644 --- a/libraries/chain/include/graphene/chain/transaction_evaluation_state.hpp +++ b/libraries/chain/include/graphene/chain/transaction_evaluation_state.hpp @@ -26,7 +26,7 @@ namespace graphene { namespace chain { class database; - struct signed_transaction; + class signed_transaction; /** * Place holder for state tracked while processing a transaction. This class provides helper methods that are diff --git a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp index 000573bd35..f202ee1b66 100644 --- a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp +++ b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp @@ -27,7 +27,6 @@ #include namespace graphene { namespace chain { - /** * @class withdraw_permission_object * @brief Grants another account authority to withdraw a limited amount of funds per interval @@ -52,14 +51,23 @@ namespace graphene { namespace chain { asset withdrawal_limit; /// The duration of a withdrawal period in seconds uint32_t withdrawal_period_sec = 0; - /// The beginning of the next withdrawal period + /*** + * The beginning of the next withdrawal period + * WARNING: Due to caching, this value does not always represent the start of the next or current period (because it is only updated after a withdrawal operation such as claim). For the latest current period, use current_period(). + */ time_point_sec period_start_time; /// The time at which this withdraw permission expires time_point_sec expiration; - /// tracks the total amount + /*** + * Tracks the total amount + * WARNING: Due to caching, this value does not always represent the total amount claimed during the current period; it may represent what was claimed during the last claimed period (because it is only updated after a withdrawal operation such as claim). For the latest current period, use current_period(). + */ share_type claimed_this_period; - /// True if the permission may still be claimed for this period; false if it has already been used + + /*** + * Determine how much is still available to be claimed during the period that contains a time of interest. This object and function is mainly intended to be used with the "current" time as a parameter. The current time can be obtained from the time of the current head of the blockchain. + */ asset available_this_period( fc::time_point_sec current_time )const { if( current_time >= period_start_time + withdrawal_period_sec ) diff --git a/libraries/chain/include/graphene/chain/worker_object.hpp b/libraries/chain/include/graphene/chain/worker_object.hpp index 1219fc1c6d..b1e2b7c107 100644 --- a/libraries/chain/include/graphene/chain/worker_object.hpp +++ b/libraries/chain/include/graphene/chain/worker_object.hpp @@ -153,7 +153,6 @@ typedef multi_index_container< > > worker_object_multi_index_type; -//typedef flat_index worker_index; using worker_index = generic_index; } } // graphene::chain diff --git a/libraries/chain/market_evaluator.cpp b/libraries/chain/market_evaluator.cpp index 27c31ae42e..270c7d3fc6 100644 --- a/libraries/chain/market_evaluator.cpp +++ b/libraries/chain/market_evaluator.cpp @@ -48,9 +48,13 @@ void_result limit_order_create_evaluator::do_evaluate(const limit_order_create_o _receive_asset = &op.min_to_receive.asset_id(d); if( _sell_asset->options.whitelist_markets.size() ) - FC_ASSERT( _sell_asset->options.whitelist_markets.find(_receive_asset->id) != _sell_asset->options.whitelist_markets.end() ); + FC_ASSERT( _sell_asset->options.whitelist_markets.find(_receive_asset->id) + != _sell_asset->options.whitelist_markets.end(), + "This market has not been whitelisted." ); if( _sell_asset->options.blacklist_markets.size() ) - FC_ASSERT( _sell_asset->options.blacklist_markets.find(_receive_asset->id) == _sell_asset->options.blacklist_markets.end() ); + FC_ASSERT( _sell_asset->options.blacklist_markets.find(_receive_asset->id) + == _sell_asset->options.blacklist_markets.end(), + "This market has been blacklisted." ); FC_ASSERT( is_authorized_asset( d, *_seller, *_sell_asset ) ); FC_ASSERT( is_authorized_asset( d, *_seller, *_receive_asset ) ); @@ -61,23 +65,42 @@ void_result limit_order_create_evaluator::do_evaluate(const limit_order_create_o return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } +void limit_order_create_evaluator::convert_fee() +{ + if( db().head_block_time() <= HARDFORK_CORE_604_TIME ) + generic_evaluator::convert_fee(); + else + if( !trx_state->skip_fee ) + { + if( fee_asset->get_id() != asset_id_type() ) + { + db().modify(*fee_asset_dyn_data, [this](asset_dynamic_data_object& d) { + d.fee_pool -= core_fee_paid; + }); + } + } +} + void limit_order_create_evaluator::pay_fee() { if( db().head_block_time() <= HARDFORK_445_TIME ) generic_evaluator::pay_fee(); else + { _deferred_fee = core_fee_paid; + if( db().head_block_time() > HARDFORK_CORE_604_TIME && fee_asset->get_id() != asset_id_type() ) + _deferred_paid_fee = fee_from_account; + } } object_id_type limit_order_create_evaluator::do_apply(const limit_order_create_operation& op) { try { - const auto& seller_stats = _seller->statistics(db()); - db().modify(seller_stats, [&](account_statistics_object& bal) { - if( op.amount_to_sell.asset_id == asset_id_type() ) - { - bal.total_core_in_orders += op.amount_to_sell.amount; - } - }); + if( op.amount_to_sell.asset_id == asset_id_type() ) + { + db().modify( _seller->statistics(db()), [&op](account_statistics_object& bal) { + bal.total_core_in_orders += op.amount_to_sell.amount; + }); + } db().adjust_balance(op.seller, -op.amount_to_sell); @@ -87,9 +110,14 @@ object_id_type limit_order_create_evaluator::do_apply(const limit_order_create_o obj.sell_price = op.get_price(); obj.expiration = op.expiration; obj.deferred_fee = _deferred_fee; + obj.deferred_paid_fee = _deferred_paid_fee; }); limit_order_id_type order_id = new_order_object.id; // save this because we may remove the object by filling it - bool filled = db().apply_order(new_order_object); + bool filled; + if( db().get_dynamic_global_properties().next_maintenance_time <= HARDFORK_CORE_625_TIME ) + filled = db().apply_order_before_hardfork_625( new_order_object ); + else + filled = db().apply_order( new_order_object ); FC_ASSERT( !op.fill_or_kill || filled ); @@ -114,12 +142,15 @@ asset limit_order_cancel_evaluator::do_apply(const limit_order_cancel_operation& auto quote_asset = _order->sell_price.quote.asset_id; auto refunded = _order->amount_for_sale(); - d.cancel_order(*_order, false /* don't create a virtual op*/); + d.cancel_limit_order(*_order, false /* don't create a virtual op*/); - // Possible optimization: order can be called by canceling a limit order iff the canceled order was at the top of the book. - // Do I need to check calls in both assets? - d.check_call_orders(base_asset(d)); - d.check_call_orders(quote_asset(d)); + if( d.get_dynamic_global_properties().next_maintenance_time <= HARDFORK_CORE_606_TIME ) + { + // Possible optimization: order can be called by canceling a limit order iff the canceled order was at the top of the book. + // Do I need to check calls in both assets? + d.check_call_orders(base_asset(d)); + d.check_call_orders(quote_asset(d)); + } return refunded; } FC_CAPTURE_AND_RETHROW( (o) ) } @@ -128,6 +159,11 @@ void_result call_order_update_evaluator::do_evaluate(const call_order_update_ope { try { database& d = db(); + // TODO: remove this check and the assertion after hf_834 + if( d.get_dynamic_global_properties().next_maintenance_time <= HARDFORK_CORE_834_TIME ) + FC_ASSERT( !o.extensions.value.target_collateral_ratio.valid(), + "Can not set target_collateral_ratio in call_order_update_operation before hardfork 834." ); + _paying_account = &o.funding_account(d); _debt_asset = &o.delta_debt.asset_id(d); FC_ASSERT( _debt_asset->is_market_issued(), "Unable to cover ${sym} as it is not a collateralized asset.", @@ -137,45 +173,36 @@ void_result call_order_update_evaluator::do_evaluate(const call_order_update_ope /// if there is a settlement for this asset, then no further margin positions may be taken and /// all existing margin positions should have been closed va database::globally_settle_asset - FC_ASSERT( !_bitasset_data->has_settlement() ); + FC_ASSERT( !_bitasset_data->has_settlement(), "Cannot update debt position when the asset has been globally settled" ); - FC_ASSERT( o.delta_collateral.asset_id == _bitasset_data->options.short_backing_asset ); + FC_ASSERT( o.delta_collateral.asset_id == _bitasset_data->options.short_backing_asset, + "Collateral asset type should be same as backing asset of debt asset" ); if( _bitasset_data->is_prediction_market ) - FC_ASSERT( o.delta_collateral.amount == o.delta_debt.amount ); + FC_ASSERT( o.delta_collateral.amount == o.delta_debt.amount, + "Debt amount and collateral amount should be same when updating debt position in a prediction market" ); else if( _bitasset_data->current_feed.settlement_price.is_null() ) FC_THROW_EXCEPTION(insufficient_feeds, "Cannot borrow asset with no price feed."); - if( o.delta_debt.amount < 0 ) - { - FC_ASSERT( d.get_balance(*_paying_account, *_debt_asset) >= o.delta_debt, - "Cannot cover by ${c} when payer only has ${b}", - ("c", o.delta_debt.amount)("b", d.get_balance(*_paying_account, *_debt_asset).amount) ); - } - - if( o.delta_collateral.amount > 0 ) - { - FC_ASSERT( d.get_balance(*_paying_account, _bitasset_data->options.short_backing_asset(d)) >= o.delta_collateral, - "Cannot increase collateral by ${c} when payer only has ${b}", ("c", o.delta_collateral.amount) - ("b", d.get_balance(*_paying_account, o.delta_collateral.asset_id(d)).amount) ); - } + // Note: there was code here checking whether the account has enough balance to increase delta collateral, + // which is now removed since the check is implicitly done later by `adjust_balance()` in `do_apply()`. return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } -void_result call_order_update_evaluator::do_apply(const call_order_update_operation& o) +object_id_type call_order_update_evaluator::do_apply(const call_order_update_operation& o) { try { database& d = db(); if( o.delta_debt.amount != 0 ) { - d.adjust_balance( o.funding_account, o.delta_debt ); + d.adjust_balance( o.funding_account, o.delta_debt ); // Deduct the debt paid from the total supply of the debt asset. d.modify(_debt_asset->dynamic_asset_data_id(d), [&](asset_dynamic_data_object& dynamic_asset) { dynamic_asset.current_supply += o.delta_debt.amount; - assert(dynamic_asset.current_supply >= 0); + FC_ASSERT(dynamic_asset.current_supply >= 0); }); } @@ -192,61 +219,73 @@ void_result call_order_update_evaluator::do_apply(const call_order_update_operat } } - auto& call_idx = d.get_index_type().indices().get(); auto itr = call_idx.find( boost::make_tuple(o.funding_account, o.delta_debt.asset_id) ); const call_order_object* call_obj = nullptr; + call_order_id_type call_order_id; + + optional old_collateralization; + optional old_debt; - if( itr == call_idx.end() ) + if( itr == call_idx.end() ) // creating new debt position { - FC_ASSERT( o.delta_collateral.amount > 0 ); - FC_ASSERT( o.delta_debt.amount > 0 ); + FC_ASSERT( o.delta_collateral.amount > 0, "Delta collateral amount of new debt position should be positive" ); + FC_ASSERT( o.delta_debt.amount > 0, "Delta debt amount of new debt position should be positive" ); - call_obj = &d.create( [&](call_order_object& call ){ + call_obj = &d.create( [&o,this]( call_order_object& call ){ call.borrower = o.funding_account; call.collateral = o.delta_collateral.amount; call.debt = o.delta_debt.amount; call.call_price = price::call_price(o.delta_debt, o.delta_collateral, _bitasset_data->current_feed.maintenance_collateral_ratio); - + call.target_collateral_ratio = o.extensions.value.target_collateral_ratio; }); + call_order_id = call_obj->id; } - else + else // updating existing debt position { call_obj = &*itr; + auto new_collateral = call_obj->collateral + o.delta_collateral.amount; + auto new_debt = call_obj->debt + o.delta_debt.amount; + call_order_id = call_obj->id; - d.modify( *call_obj, [&]( call_order_object& call ){ - call.collateral += o.delta_collateral.amount; - call.debt += o.delta_debt.amount; - if( call.debt > 0 ) - { - call.call_price = price::call_price(call.get_debt(), call.get_collateral(), - _bitasset_data->current_feed.maintenance_collateral_ratio); - } - }); - } + if( new_debt == 0 ) + { + FC_ASSERT( new_collateral == 0, "Should claim all collateral when closing debt position" ); + d.remove( *call_obj ); + return call_order_id; + } - auto debt = call_obj->get_debt(); - if( debt.amount == 0 ) - { - FC_ASSERT( call_obj->collateral == 0 ); - d.remove( *call_obj ); - return void_result(); - } + FC_ASSERT( new_collateral > 0 && new_debt > 0, + "Both collateral and debt should be positive after updated a debt position if not to close it" ); - FC_ASSERT(call_obj->collateral > 0 && call_obj->debt > 0); + old_collateralization = call_obj->collateralization(); + old_debt = call_obj->debt; + + d.modify( *call_obj, [&o,new_debt,new_collateral,this]( call_order_object& call ){ + call.collateral = new_collateral; + call.debt = new_debt; + call.call_price = price::call_price( call.get_debt(), call.get_collateral(), + _bitasset_data->current_feed.maintenance_collateral_ratio ); + call.target_collateral_ratio = o.extensions.value.target_collateral_ratio; + }); + } // then we must check for margin calls and other issues if( !_bitasset_data->is_prediction_market ) { - call_order_id_type call_order_id = call_obj->id; - // check to see if the order needs to be margin called now, but don't allow black swans and require there to be // limit orders available that could be used to fill the order. - if( d.check_call_orders( *_debt_asset, false ) ) + // Note: due to https://github.com/bitshares/bitshares-core/issues/649, before core-343 hard fork, + // the first call order may be unable to be updated if the second one is undercollateralized. + if( d.check_call_orders( *_debt_asset, false, false, _bitasset_data ) ) // don't allow black swan, not for new limit order { - const auto call_obj = d.find(call_order_id); - // if we filled at least one call order, we are OK if we totally filled. + call_obj = d.find(call_order_id); + // before hard fork core-583: if we filled at least one call order, we are OK if we totally filled. + // after hard fork core-583: we want to allow increasing collateral + // Note: increasing collateral won't get the call order itself matched (instantly margin called) + // if there is at least a call order get matched but didn't cause a black swan event, + // current order must have got matched. in this case, it's OK if it's totally filled. GRAPHENE_ASSERT( !call_obj, call_order_update_unfilled_margin_call, @@ -256,21 +295,104 @@ void_result call_order_update_evaluator::do_apply(const call_order_update_operat } else { - const auto call_obj = d.find(call_order_id); + call_obj = d.find(call_order_id); + // we know no black swan event has occurred FC_ASSERT( call_obj, "no margin call was executed and yet the call object was deleted" ); - //edump( (~call_obj->call_price) ("<")( _bitasset_data->current_feed.settlement_price) ); - // We didn't fill any call orders. This may be because we - // aren't in margin call territory, or it may be because there - // were no matching orders. In the latter case, we throw. - GRAPHENE_ASSERT( - ~call_obj->call_price < _bitasset_data->current_feed.settlement_price, - call_order_update_unfilled_margin_call, - "Updating call order would trigger a margin call that cannot be fully filled", - ("a", ~call_obj->call_price )("b", _bitasset_data->current_feed.settlement_price) - ); + if( d.head_block_time() <= HARDFORK_CORE_583_TIME ) // TODO remove after hard fork core-583 + { + // We didn't fill any call orders. This may be because we + // aren't in margin call territory, or it may be because there + // were no matching orders. In the latter case, we throw. + GRAPHENE_ASSERT( + ~call_obj->call_price < _bitasset_data->current_feed.settlement_price, + call_order_update_unfilled_margin_call, + "Updating call order would trigger a margin call that cannot be fully filled", + ("a", ~call_obj->call_price )("b", _bitasset_data->current_feed.settlement_price) + ); + } + else // after hard fork, always allow call order to be updated if collateral ratio is increased and debt is not increased + { + // We didn't fill any call orders. This may be because we + // aren't in margin call territory, or it may be because there + // were no matching orders. In the latter case, + // if collateral ratio is not increased or debt is increased, we throw. + // be here, we know no margin call was executed, + // so call_obj's collateral ratio should be set only by op + FC_ASSERT( ( old_collateralization.valid() && call_obj->debt <= *old_debt + && call_obj->collateralization() > *old_collateralization ) + || ~call_obj->call_price < _bitasset_data->current_feed.settlement_price, + "Can only increase collateral ratio without increasing debt if would trigger a margin call that " + "cannot be fully filled", + ("new_call_price", ~call_obj->call_price ) + ("settlement_price", _bitasset_data->current_feed.settlement_price) + ("old_debt", old_debt) + ("new_debt", call_obj->debt) + ("old_collateralization", old_collateralization) + ("new_collateralization", call_obj->collateralization() ) + ); + } } } + return call_order_id; +} FC_CAPTURE_AND_RETHROW( (o) ) } + +void_result bid_collateral_evaluator::do_evaluate(const bid_collateral_operation& o) +{ try { + database& d = db(); + + FC_ASSERT( d.head_block_time() > HARDFORK_CORE_216_TIME, "Not yet!" ); + + _paying_account = &o.bidder(d); + _debt_asset = &o.debt_covered.asset_id(d); + FC_ASSERT( _debt_asset->is_market_issued(), "Unable to cover ${sym} as it is not a collateralized asset.", + ("sym", _debt_asset->symbol) ); + + _bitasset_data = &_debt_asset->bitasset_data(d); + + FC_ASSERT( _bitasset_data->has_settlement() ); + + FC_ASSERT( o.additional_collateral.asset_id == _bitasset_data->options.short_backing_asset ); + + FC_ASSERT( !_bitasset_data->is_prediction_market, "Cannot bid on a prediction market!" ); + + if( o.additional_collateral.amount > 0 ) + { + FC_ASSERT( d.get_balance(*_paying_account, _bitasset_data->options.short_backing_asset(d)) >= o.additional_collateral, + "Cannot bid ${c} collateral when payer only has ${b}", ("c", o.additional_collateral.amount) + ("b", d.get_balance(*_paying_account, o.additional_collateral.asset_id(d)).amount) ); + } + + const collateral_bid_index& bids = d.get_index_type(); + const auto& index = bids.indices().get(); + const auto& bid = index.find( boost::make_tuple( o.debt_covered.asset_id, o.bidder ) ); + if( bid != index.end() ) + _bid = &(*bid); + else + FC_ASSERT( o.debt_covered.amount > 0, "Can't find bid to cancel?!"); + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (o) ) } + + +void_result bid_collateral_evaluator::do_apply(const bid_collateral_operation& o) +{ try { + database& d = db(); + + if( _bid ) + d.cancel_bid( *_bid, false ); + + if( o.debt_covered.amount == 0 ) return void_result(); + + d.adjust_balance( o.bidder, -o.additional_collateral ); + + _bid = &d.create([&]( collateral_bid_object& bid ) { + bid.bidder = o.bidder; + bid.inv_swan_price = o.additional_collateral / o.debt_covered; + }); + + // Note: CORE asset in collateral_bid_object is not counted in account_stats.total_core_in_orders + return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } diff --git a/libraries/chain/market_object.cpp b/libraries/chain/market_object.cpp new file mode 100644 index 0000000000..993df7924f --- /dev/null +++ b/libraries/chain/market_object.cpp @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2018 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include + +#include + +using namespace graphene::chain; + +/* +target_CR = max( target_CR, MCR ) + +target_CR = new_collateral / ( new_debt / feed_price ) + = ( collateral - max_amount_to_sell ) * feed_price + / ( debt - amount_to_get ) + = ( collateral - max_amount_to_sell ) * feed_price + / ( debt - round_down(max_amount_to_sell * match_price ) ) + = ( collateral - max_amount_to_sell ) * feed_price + / ( debt - (max_amount_to_sell * match_price - x) ) + +Note: x is the fraction, 0 <= x < 1 + +=> + +max_amount_to_sell = ( (debt + x) * target_CR - collateral * feed_price ) + / (target_CR * match_price - feed_price) + = ( (debt + x) * tCR / DENOM - collateral * fp_debt_amt / fp_coll_amt ) + / ( (tCR / DENOM) * (mp_debt_amt / mp_coll_amt) - fp_debt_amt / fp_coll_amt ) + = ( (debt + x) * tCR * fp_coll_amt * mp_coll_amt - collateral * fp_debt_amt * DENOM * mp_coll_amt) + / ( tCR * mp_debt_amt * fp_coll_amt - fp_debt_amt * DENOM * mp_coll_amt ) + +max_debt_to_cover = max_amount_to_sell * match_price + = max_amount_to_sell * mp_debt_amt / mp_coll_amt + = ( (debt + x) * tCR * fp_coll_amt * mp_debt_amt - collateral * fp_debt_amt * DENOM * mp_debt_amt) + / (tCR * mp_debt_amt * fp_coll_amt - fp_debt_amt * DENOM * mp_coll_amt) +*/ +share_type call_order_object::get_max_debt_to_cover( price match_price, + price feed_price, + const uint16_t maintenance_collateral_ratio )const +{ try { + // be defensive here, make sure feed_price is in collateral / debt format + if( feed_price.base.asset_id != call_price.base.asset_id ) + feed_price = ~feed_price; + + FC_ASSERT( feed_price.base.asset_id == call_price.base.asset_id + && feed_price.quote.asset_id == call_price.quote.asset_id ); + + if( call_price > feed_price ) // feed protected. be defensive here, although this should be guaranteed by caller + return 0; + + if( !target_collateral_ratio.valid() ) // target cr is not set + return debt; + + uint16_t tcr = std::max( *target_collateral_ratio, maintenance_collateral_ratio ); // use mcr if target cr is too small + + // be defensive here, make sure match_price is in collateral / debt format + if( match_price.base.asset_id != call_price.base.asset_id ) + match_price = ~match_price; + + FC_ASSERT( match_price.base.asset_id == call_price.base.asset_id + && match_price.quote.asset_id == call_price.quote.asset_id ); + + typedef boost::multiprecision::int256_t i256; + i256 mp_debt_amt = match_price.quote.amount.value; + i256 mp_coll_amt = match_price.base.amount.value; + i256 fp_debt_amt = feed_price.quote.amount.value; + i256 fp_coll_amt = feed_price.base.amount.value; + + // firstly we calculate without the fraction (x), the result could be a bit too small + i256 numerator = fp_coll_amt * mp_debt_amt * debt.value * tcr + - fp_debt_amt * mp_debt_amt * collateral.value * GRAPHENE_COLLATERAL_RATIO_DENOM; + if( numerator < 0 ) // feed protected, actually should not be true here, just check to be safe + return 0; + + i256 denominator = fp_coll_amt * mp_debt_amt * tcr - fp_debt_amt * mp_coll_amt * GRAPHENE_COLLATERAL_RATIO_DENOM; + if( denominator <= 0 ) // black swan + return debt; + + // note: if add 1 here, will result in 1.5x imperfection rate; + // however, due to rounding, the result could still be a bit too big, thus imperfect. + i256 to_cover_i256 = ( numerator / denominator ); + if( to_cover_i256 >= debt.value ) // avoid possible overflow + return debt; + share_type to_cover_amt = static_cast< int64_t >( to_cover_i256 ); + + // stabilize + // note: rounding up-down results in 3x imperfection rate in comparison to down-down-up + asset to_pay = asset( to_cover_amt, debt_type() ) * match_price; + asset to_cover = to_pay * match_price; + to_pay = to_cover.multiply_and_round_up( match_price ); + + if( to_cover.amount >= debt || to_pay.amount >= collateral ) // to be safe + return debt; + FC_ASSERT( to_pay.amount < collateral && to_cover.amount < debt ); + + // check collateral ratio after filled, if it's OK, we return + price new_call_price = price::call_price( get_debt() - to_cover, get_collateral() - to_pay, tcr ); + if( new_call_price > feed_price ) + return to_cover.amount; + + // be here, to_cover is too small due to rounding. deal with the fraction + numerator += fp_coll_amt * mp_debt_amt * tcr; // plus the fraction + to_cover_i256 = ( numerator / denominator ) + 1; + if( to_cover_i256 >= debt.value ) // avoid possible overflow + to_cover_i256 = debt.value; + to_cover_amt = static_cast< int64_t >( to_cover_i256 ); + + asset max_to_pay = ( ( to_cover_amt == debt.value ) ? get_collateral() + : asset( to_cover_amt, debt_type() ).multiply_and_round_up( match_price ) ); + if( max_to_pay.amount > collateral ) + max_to_pay.amount = collateral; + + asset max_to_cover = ( ( max_to_pay.amount == collateral ) ? get_debt() : ( max_to_pay * match_price ) ); + if( max_to_cover.amount >= debt ) // to be safe + { + max_to_pay.amount = collateral; + max_to_cover.amount = debt; + } + + if( max_to_pay <= to_pay || max_to_cover <= to_cover ) // strange data. should skip binary search and go on, but doesn't help much + return debt; + FC_ASSERT( max_to_pay > to_pay && max_to_cover > to_cover ); + + asset min_to_pay = to_pay; + asset min_to_cover = to_cover; + + // try with binary search to find a good value + // note: actually binary search can not always provide perfect result here, + // due to rounding, collateral ratio is not always increasing while to_pay or to_cover is increasing + bool max_is_ok = false; + while( true ) + { + // get the mean + if( match_price.base.amount < match_price.quote.amount ) // step of collateral is smaller + { + to_pay.amount = ( min_to_pay.amount + max_to_pay.amount + 1 ) / 2; // should not overflow. round up here + if( to_pay.amount == max_to_pay.amount ) + to_cover.amount = max_to_cover.amount; + else + { + to_cover = to_pay * match_price; + if( to_cover.amount >= max_to_cover.amount ) // can be true when max_is_ok is false + { + to_pay.amount = max_to_pay.amount; + to_cover.amount = max_to_cover.amount; + } + else + { + to_pay = to_cover.multiply_and_round_up( match_price ); // stabilization, no change or become smaller + FC_ASSERT( to_pay.amount < max_to_pay.amount ); + } + } + } + else // step of debt is smaller or equal + { + to_cover.amount = ( min_to_cover.amount + max_to_cover.amount ) / 2; // should not overflow. round down here + if( to_cover.amount == max_to_cover.amount ) + to_pay.amount = max_to_pay.amount; + else + { + to_pay = to_cover.multiply_and_round_up( match_price ); + if( to_pay.amount >= max_to_pay.amount ) // can be true when max_is_ok is false + { + to_pay.amount = max_to_pay.amount; + to_cover.amount = max_to_cover.amount; + } + else + { + to_cover = to_pay * match_price; // stabilization, to_cover should have increased + if( to_cover.amount >= max_to_cover.amount ) // to be safe + { + to_pay.amount = max_to_pay.amount; + to_cover.amount = max_to_cover.amount; + } + } + } + } + + // check again to see if we've moved away from the minimums, if not, use the maximums directly + if( to_pay.amount <= min_to_pay.amount || to_cover.amount <= min_to_cover.amount + || to_pay.amount > max_to_pay.amount || to_cover.amount > max_to_cover.amount ) + { + to_pay.amount = max_to_pay.amount; + to_cover.amount = max_to_cover.amount; + } + + // check the mean + if( to_pay.amount == max_to_pay.amount && ( max_is_ok || to_pay.amount == collateral ) ) + return to_cover.amount; + FC_ASSERT( to_pay.amount < collateral && to_cover.amount < debt ); + + new_call_price = price::call_price( get_debt() - to_cover, get_collateral() - to_pay, tcr ); + if( new_call_price > feed_price ) // good + { + if( to_pay.amount == max_to_pay.amount ) + return to_cover.amount; + max_to_pay.amount = to_pay.amount; + max_to_cover.amount = to_cover.amount; + max_is_ok = true; + } + else // not good + { + if( to_pay.amount == max_to_pay.amount ) + break; + min_to_pay.amount = to_pay.amount; + min_to_cover.amount = to_cover.amount; + } + } + + // be here, max_to_cover is too small due to rounding. search forward + for( uint64_t d1 = 0, d2 = 1, d3 = 1; ; d1 = d2, d2 = d3, d3 = d1 + d2 ) // 1,1,2,3,5,8,... + { + if( match_price.base.amount > match_price.quote.amount ) // step of debt is smaller + { + to_pay.amount += d2; + if( to_pay.amount >= collateral ) + return debt; + to_cover = to_pay * match_price; + if( to_cover.amount >= debt ) + return debt; + to_pay = to_cover.multiply_and_round_up( match_price ); // stabilization + if( to_pay.amount >= collateral ) + return debt; + } + else // step of collateral is smaller or equal + { + to_cover.amount += d2; + if( to_cover.amount >= debt ) + return debt; + to_pay = to_cover.multiply_and_round_up( match_price ); + if( to_pay.amount >= collateral ) + return debt; + to_cover = to_pay * match_price; // stabilization + if( to_cover.amount >= debt ) + return debt; + } + + // check + FC_ASSERT( to_pay.amount < collateral && to_cover.amount < debt ); + + new_call_price = price::call_price( get_debt() - to_cover, get_collateral() - to_pay, tcr ); + if( new_call_price > feed_price ) // good + return to_cover.amount; + } + +} FC_CAPTURE_AND_RETHROW( (*this)(feed_price)(match_price)(maintenance_collateral_ratio) ) } diff --git a/libraries/chain/proposal_evaluator.cpp b/libraries/chain/proposal_evaluator.cpp index a64a38ddc6..348629d9ba 100644 --- a/libraries/chain/proposal_evaluator.cpp +++ b/libraries/chain/proposal_evaluator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2015-2018 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -21,25 +21,140 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ +#include #include #include -#include -#include -#include - -#include +#include namespace graphene { namespace chain { + +struct proposal_operation_hardfork_visitor +{ + typedef void result_type; + const fc::time_point_sec block_time; + const fc::time_point_sec next_maintenance_time; + + proposal_operation_hardfork_visitor( const fc::time_point_sec bt, const fc::time_point_sec nmt ) + : block_time(bt), next_maintenance_time(nmt) {} + + template + void operator()(const T &v) const {} + + // TODO review and cleanup code below after hard fork + // hf_834 + void operator()(const graphene::chain::call_order_update_operation &v) const { + if (next_maintenance_time <= HARDFORK_CORE_834_TIME) { + FC_ASSERT( !v.extensions.value.target_collateral_ratio.valid(), + "Can not set target_collateral_ratio in call_order_update_operation before hardfork 834." ); + } + } + // hf_620 + void operator()(const graphene::chain::asset_create_operation &v) const { + if (block_time < HARDFORK_CORE_620_TIME) { + static const std::locale &loc = std::locale::classic(); + FC_ASSERT(isalpha(v.symbol.back(), loc), "Asset ${s} must end with alpha character before hardfork 620", ("s", v.symbol)); + } + } + // hf_199 + void operator()(const graphene::chain::asset_update_issuer_operation &v) const { + if (block_time < HARDFORK_CORE_199_TIME) { + FC_ASSERT(false, "Not allowed until hardfork 199"); + } + } + // hf_188 + void operator()(const graphene::chain::asset_claim_pool_operation &v) const { + if (block_time < HARDFORK_CORE_188_TIME) { + FC_ASSERT(false, "Not allowed until hardfork 188"); + } + } + // hf_588 + // issue #588 + // + // As a virtual operation which has no evaluator `asset_settle_cancel_operation` + // originally won't be packed into blocks, yet its loose `validate()` method + // make it able to slip into blocks. + // + // We need to forbid this operation being packed into blocks via proposal but + // this will lead to a hardfork (this operation in proposal will denied by new + // node while accept by old node), so a hardfork guard code needed and a + // consensus upgrade over all nodes needed in future. And because the + // `validate()` method not suitable to check database status, so we put the + // code here. + // + // After the hardfork, all nodes will deny packing this operation into a block, + // and then we will check whether exists a proposal containing this kind of + // operation, if not exists, we can harden the `validate()` method to deny + // it in a earlier stage. + // + void operator()(const graphene::chain::asset_settle_cancel_operation &v) const { + if (block_time > HARDFORK_CORE_588_TIME) { + FC_ASSERT(!"Virtual operation"); + } + } + // loop and self visit in proposals + void operator()(const graphene::chain::proposal_create_operation &v) const { + for (const op_wrapper &op : v.proposed_ops) + op.op.visit(*this); + } +}; + +struct hardfork_visitor_214 // non-recursive proposal visitor +{ + typedef void result_type; + + template + void operator()(const T &v) const {} + + void operator()(const proposal_update_operation &v) const { + FC_ASSERT(false, "Not allowed until hardfork 214"); + } +}; + +void hardfork_visitor_1479::operator()(const proposal_update_operation &v) +{ + if( nested_update_count == 0 || v.proposal.instance.value > max_update_instance ) + max_update_instance = v.proposal.instance.value; + nested_update_count++; +} + +void hardfork_visitor_1479::operator()(const proposal_delete_operation &v) +{ + if( nested_update_count == 0 || v.proposal.instance.value > max_update_instance ) + max_update_instance = v.proposal.instance.value; + nested_update_count++; +} + +// loop and self visit in proposals +void hardfork_visitor_1479::operator()(const graphene::chain::proposal_create_operation &v) +{ + for (const op_wrapper &op : v.proposed_ops) + op.op.visit(*this); +} + void_result proposal_create_evaluator::do_evaluate(const proposal_create_operation& o) { try { const database& d = db(); + + // Calling the proposal hardfork visitor + const fc::time_point_sec block_time = d.head_block_time(); + const fc::time_point_sec next_maint_time = d.get_dynamic_global_properties().next_maintenance_time; + proposal_operation_hardfork_visitor vtor( block_time, next_maint_time ); + vtor( o ); + if( block_time < HARDFORK_CORE_214_TIME ) + { // cannot be removed after hf, unfortunately + hardfork_visitor_214 hf214; + for (const op_wrapper &op : o.proposed_ops) + op.op.visit( hf214 ); + } + vtor_1479( o ); + const auto& global_parameters = d.get_global_properties().parameters; - FC_ASSERT( o.expiration_time > d.head_block_time(), "Proposal has already expired on creation." ); - FC_ASSERT( o.expiration_time <= d.head_block_time() + global_parameters.maximum_proposal_lifetime, + FC_ASSERT( o.expiration_time > block_time, "Proposal has already expired on creation." ); + FC_ASSERT( o.expiration_time <= block_time + global_parameters.maximum_proposal_lifetime, "Proposal expiration time is too far in the future."); - FC_ASSERT( !o.review_period_seconds || fc::seconds(*o.review_period_seconds) < (o.expiration_time - d.head_block_time()), + FC_ASSERT( !o.review_period_seconds || fc::seconds(*o.review_period_seconds) < (o.expiration_time - block_time), "Proposal review period must be less than its overall lifetime." ); { @@ -73,6 +188,7 @@ void_result proposal_create_evaluator::do_evaluate(const proposal_create_operati for( const op_wrapper& op : o.proposed_ops ) _proposed_trx.operations.push_back(op.op); + _proposed_trx.validate(); return void_result(); @@ -86,6 +202,7 @@ object_id_type proposal_create_evaluator::do_apply(const proposal_create_operati _proposed_trx.expiration = o.expiration_time; proposal.proposed_transaction = _proposed_trx; proposal.expiration_time = o.expiration_time; + proposal.proposer = o.fee_paying_account; if( o.review_period_seconds ) proposal.review_period_time = o.expiration_time - *o.review_period_seconds; @@ -102,6 +219,20 @@ object_id_type proposal_create_evaluator::do_apply(const proposal_create_operati std::set_difference(required_active.begin(), required_active.end(), proposal.required_owner_approvals.begin(), proposal.required_owner_approvals.end(), std::inserter(proposal.required_active_approvals, proposal.required_active_approvals.begin())); + + if( d.head_block_time() > HARDFORK_CORE_1479_TIME ) + FC_ASSERT( vtor_1479.nested_update_count == 0 || proposal.id.instance() > vtor_1479.max_update_instance, + "Cannot update/delete a proposal with a future id!" ); + else if( vtor_1479.nested_update_count > 0 && proposal.id.instance() <= vtor_1479.max_update_instance ) + { + // prevent approval + transfer_operation top; + top.from = GRAPHENE_NULL_ACCOUNT; + top.to = GRAPHENE_RELAXED_COMMITTEE_ACCOUNT; + top.amount = asset( GRAPHENE_MAX_SHARE_SUPPLY ); + proposal.proposed_transaction.operations.emplace_back( top ); + wlog( "Issue 1479: ${p}", ("p",proposal) ); + } }); return proposal.id; @@ -128,20 +259,6 @@ void_result proposal_update_evaluator::do_evaluate(const proposal_update_operati "", ("id", id)("available", _proposal->available_owner_approvals) ); } - /* All authority checks happen outside of evaluators - if( (d.get_node_properties().skip_flags & database::skip_authority_check) == 0 ) - { - for( const auto& id : o.key_approvals_to_add ) - { - FC_ASSERT( trx_state->signed_by(id) ); - } - for( const auto& id : o.key_approvals_to_remove ) - { - FC_ASSERT( trx_state->signed_by(id) ); - } - } - */ - return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } @@ -152,7 +269,7 @@ void_result proposal_update_evaluator::do_apply(const proposal_update_operation& // Potential optimization: if _executed_proposal is true, we can skip the modify step and make push_proposal skip // signature checks. This isn't done now because I just wrote all the proposals code, and I'm not yet 100% sure the // required approvals are sufficient to authorize the transaction. - d.modify(*_proposal, [&o, &d](proposal_object& p) { + d.modify(*_proposal, [&o](proposal_object& p) { p.available_active_approvals.insert(o.active_approvals_to_add.begin(), o.active_approvals_to_add.end()); p.available_owner_approvals.insert(o.owner_approvals_to_add.begin(), o.owner_approvals_to_add.end()); for( account_id_type id : o.active_approvals_to_remove ) @@ -177,6 +294,9 @@ void_result proposal_update_evaluator::do_apply(const proposal_update_operation& try { _processed_transaction = d.push_proposal(*_proposal); } catch(fc::exception& e) { + d.modify(*_proposal, [&e](proposal_object& p) { + p.fail_reason = e.to_string(fc::log_level(fc::log_level::all)); + }); wlog("Proposed transaction ${id} failed to apply once approved with exception:\n----\n${reason}\n----\nWill try again when it expires.", ("id", o.proposal)("reason", e.to_detail_string())); _proposal_failed = true; @@ -208,4 +328,5 @@ void_result proposal_delete_evaluator::do_apply(const proposal_delete_operation& return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } + } } // graphene::chain diff --git a/libraries/chain/proposal_object.cpp b/libraries/chain/proposal_object.cpp index 565964a51b..7d7884e1c4 100644 --- a/libraries/chain/proposal_object.cpp +++ b/libraries/chain/proposal_object.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2015-2018 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -22,7 +22,6 @@ * THE SOFTWARE. */ #include -#include #include namespace graphene { namespace chain { @@ -43,14 +42,11 @@ bool proposal_object::is_authorized_to_execute(database& db) const } catch ( const fc::exception& e ) { - //idump((available_active_approvals)); - //wlog((e.to_detail_string())); return false; } return true; } - void required_approval_index::object_inserted( const object& obj ) { assert( dynamic_cast(&obj) ); diff --git a/libraries/chain/protocol/account.cpp b/libraries/chain/protocol/account.cpp index b3ad9e00d5..9d281e4caf 100644 --- a/libraries/chain/protocol/account.cpp +++ b/libraries/chain/protocol/account.cpp @@ -56,17 +56,18 @@ namespace graphene { namespace chain { * - Length is between (inclusive) GRAPHENE_MIN_ACCOUNT_NAME_LENGTH and GRAPHENE_MAX_ACCOUNT_NAME_LENGTH */ bool is_valid_name( const string& name ) -{ -#if GRAPHENE_MIN_ACCOUNT_NAME_LENGTH < 3 -#error This is_valid_name implementation implicitly enforces minimum name length of 3. -#endif - +{ try { const size_t len = name.size(); + if( len < GRAPHENE_MIN_ACCOUNT_NAME_LENGTH ) + { return false; + } if( len > GRAPHENE_MAX_ACCOUNT_NAME_LENGTH ) + { return false; + } size_t begin = 0; while( true ) @@ -74,8 +75,10 @@ bool is_valid_name( const string& name ) size_t end = name.find_first_of( '.', begin ); if( end == std::string::npos ) end = len; - if( end - begin < 3 ) + if( (end - begin) < GRAPHENE_MIN_ACCOUNT_NAME_LENGTH ) + { return false; + } switch( name[begin] ) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': @@ -119,7 +122,7 @@ bool is_valid_name( const string& name ) begin = end+1; } return true; -} +} FC_CAPTURE_AND_RETHROW( (name) ) } bool is_cheap_name( const string& n ) { diff --git a/libraries/chain/protocol/address.cpp b/libraries/chain/protocol/address.cpp index 444f9e32bd..19bb4df569 100644 --- a/libraries/chain/protocol/address.cpp +++ b/libraries/chain/protocol/address.cpp @@ -50,17 +50,20 @@ namespace graphene { std::vector v; try { - v = fc::from_base58( base58str.substr( prefix_len ) ); - } - catch( const fc::parse_error_exception& e ) - { - return false; - } + v = fc::from_base58( base58str.substr( prefix_len ) ); + } + catch( const fc::parse_error_exception& e ) + { + return false; + } + if( v.size() != sizeof( fc::ripemd160 ) + 4 ) return false; + const fc::ripemd160 checksum = fc::ripemd160::hash( v.data(), v.size() - 4 ); if( memcmp( v.data() + 20, (char*)checksum._hash, 4 ) != 0 ) return false; + return true; } @@ -98,11 +101,11 @@ namespace graphene { namespace fc { - void to_variant( const graphene::chain::address& var, variant& vo ) + void to_variant( const graphene::chain::address& var, variant& vo, uint32_t max_depth ) { vo = std::string(var); } - void from_variant( const variant& var, graphene::chain::address& vo ) + void from_variant( const variant& var, graphene::chain::address& vo, uint32_t max_depth ) { vo = graphene::chain::address( var.as_string() ); } diff --git a/libraries/chain/protocol/assert.cpp b/libraries/chain/protocol/assert.cpp index 60f26e3f0e..2c15a44563 100644 --- a/libraries/chain/protocol/assert.cpp +++ b/libraries/chain/protocol/assert.cpp @@ -21,7 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include +#include namespace graphene { namespace chain { diff --git a/libraries/chain/protocol/asset.cpp b/libraries/chain/protocol/asset.cpp index e1169b0ce6..531ea7f6f6 100644 --- a/libraries/chain/protocol/asset.cpp +++ b/libraries/chain/protocol/asset.cpp @@ -32,7 +32,7 @@ namespace graphene { namespace chain { bool operator == ( const price& a, const price& b ) { if( std::tie( a.base.asset_id, a.quote.asset_id ) != std::tie( b.base.asset_id, b.quote.asset_id ) ) - return false; + return false; const auto amult = uint128_t( b.quote.amount.value ) * a.base.amount.value; const auto bmult = uint128_t( a.quote.amount.value ) * b.base.amount.value; @@ -53,26 +53,6 @@ namespace graphene { namespace chain { return amult < bmult; } - bool operator <= ( const price& a, const price& b ) - { - return (a == b) || (a < b); - } - - bool operator != ( const price& a, const price& b ) - { - return !(a == b); - } - - bool operator > ( const price& a, const price& b ) - { - return !(a <= b); - } - - bool operator >= ( const price& a, const price& b ) - { - return !(a < b); - } - asset operator * ( const asset& a, const price& b ) { if( a.asset_id == b.base.asset_id ) @@ -92,6 +72,26 @@ namespace graphene { namespace chain { FC_THROW_EXCEPTION( fc::assert_exception, "invalid asset * price", ("asset",a)("price",b) ); } + asset asset::multiply_and_round_up( const price& b )const + { + const asset& a = *this; + if( a.asset_id == b.base.asset_id ) + { + FC_ASSERT( b.base.amount.value > 0 ); + uint128_t result = (uint128_t(a.amount.value) * b.quote.amount.value + b.base.amount.value - 1)/b.base.amount.value; + FC_ASSERT( result <= GRAPHENE_MAX_SHARE_SUPPLY ); + return asset( result.convert_to(), b.quote.asset_id ); + } + else if( a.asset_id == b.quote.asset_id ) + { + FC_ASSERT( b.quote.amount.value > 0 ); + uint128_t result = (uint128_t(a.amount.value) * b.base.amount.value + b.quote.amount.value - 1)/b.quote.amount.value; + FC_ASSERT( result <= GRAPHENE_MAX_SHARE_SUPPLY ); + return asset( result.convert_to(), b.base.asset_id ); + } + FC_THROW_EXCEPTION( fc::assert_exception, "invalid asset::multiply_and_round_up(price)", ("asset",a)("price",b) ); + } + price operator / ( const asset& base, const asset& quote ) { try { FC_ASSERT( base.asset_id != quote.asset_id ); @@ -101,6 +101,93 @@ namespace graphene { namespace chain { price price::max( asset_id_type base, asset_id_type quote ) { return asset( share_type(GRAPHENE_MAX_SHARE_SUPPLY), base ) / asset( share_type(1), quote); } price price::min( asset_id_type base, asset_id_type quote ) { return asset( 1, base ) / asset( GRAPHENE_MAX_SHARE_SUPPLY, quote); } + price operator * ( const price& p, const ratio_type& r ) + { try { + p.validate(); + + FC_ASSERT( r.numerator() > 0 && r.denominator() > 0 ); + + if( r.numerator() == r.denominator() ) return p; + + boost::rational p128( p.base.amount.value, p.quote.amount.value ); + boost::rational r128( r.numerator(), r.denominator() ); + auto cp = p128 * r128; + auto ocp = cp; + + bool shrinked = false; + bool using_max = false; + static const int128_t max( GRAPHENE_MAX_SHARE_SUPPLY ); + while( cp.numerator() > max || cp.denominator() > max ) + { + if( cp.numerator() == 1 ) + { + cp = boost::rational( 1, max ); + using_max = true; + break; + } + else if( cp.denominator() == 1 ) + { + cp = boost::rational( max, 1 ); + using_max = true; + break; + } + else + { + cp = boost::rational( cp.numerator() >> 1, cp.denominator() >> 1 ); + shrinked = true; + } + } + if( shrinked ) // maybe not accurate enough due to rounding, do additional checks here + { + int128_t num = ocp.numerator(); + int128_t den = ocp.denominator(); + if( num > den ) + { + num /= den; + if( num > max ) + num = max; + den = 1; + } + else + { + den /= num; + if( den > max ) + den = max; + num = 1; + } + boost::rational ncp( num, den ); + if( num == max || den == max ) // it's on the edge, we know it's accurate enough + cp = ncp; + else + { + // from the accurate ocp, now we have ncp and cp. use the one which is closer to ocp. + // TODO improve performance + auto diff1 = abs( ncp - ocp ); + auto diff2 = abs( cp - ocp ); + if( diff1 < diff2 ) cp = ncp; + } + } + + price np = asset( cp.numerator().convert_to(), p.base.asset_id ) + / asset( cp.denominator().convert_to(), p.quote.asset_id ); + + if( shrinked || using_max ) + { + if( ( r.numerator() > r.denominator() && np < p ) + || ( r.numerator() < r.denominator() && np > p ) ) + // even with an accurate result, if p is out of valid range, return it + np = p; + } + + np.validate(); + return np; + } FC_CAPTURE_AND_RETHROW( (p)(r.numerator())(r.denominator()) ) } + + price operator / ( const price& p, const ratio_type& r ) + { try { + return p * ratio_type( r.denominator(), r.numerator() ); + } FC_CAPTURE_AND_RETHROW( (p)(r.numerator())(r.denominator()) ) } + /** * The black swan price is defined as debt/collateral, we want to perform a margin call * before debt == collateral. Given a debt/collateral ratio of 1 USD / CORE and @@ -119,7 +206,7 @@ namespace graphene { namespace chain { */ price price::call_price( const asset& debt, const asset& collateral, uint16_t collateral_ratio) { try { - //wdump((debt)(collateral)(collateral_ratio)); + // TODO replace the calculation with new operator*() and/or operator/(), could be a hardfork change due to edge cases boost::rational swan(debt.amount.value,collateral.amount.value); boost::rational ratio( collateral_ratio, GRAPHENE_COLLATERAL_RATIO_DENOM ); auto cp = swan * ratio; @@ -127,10 +214,15 @@ namespace graphene { namespace chain { while( cp.numerator() > GRAPHENE_MAX_SHARE_SUPPLY || cp.denominator() > GRAPHENE_MAX_SHARE_SUPPLY ) cp = boost::rational( (cp.numerator() >> 1)+1, (cp.denominator() >> 1)+1 ); - return ~(asset( cp.numerator().convert_to(), debt.asset_id ) / asset( cp.denominator().convert_to(), collateral.asset_id )); + return ( asset( cp.denominator().convert_to(), collateral.asset_id ) + / asset( cp.numerator().convert_to(), debt.asset_id ) ); } FC_CAPTURE_AND_RETHROW( (debt)(collateral)(collateral_ratio) ) } - bool price::is_null() const { return *this == price(); } + bool price::is_null() const + { + // Effectively same as "return *this == price();" but perhaps faster + return ( base.asset_id == asset_id_type() && quote.asset_id == asset_id_type() ); + } void price::validate() const { try { @@ -168,6 +260,7 @@ namespace graphene { namespace chain { price price_feed::max_short_squeeze_price()const { + // TODO replace the calculation with new operator*() and/or operator/(), could be a hardfork change due to edge cases boost::rational sp( settlement_price.base.amount.value, settlement_price.quote.amount.value ); //debt.amount.value,collateral.amount.value); boost::rational ratio( GRAPHENE_COLLATERAL_RATIO_DENOM, maximum_short_squeeze_ratio ); auto cp = sp * ratio; diff --git a/libraries/chain/protocol/asset_ops.cpp b/libraries/chain/protocol/asset_ops.cpp index 1626a2a926..c88eb9bd8a 100644 --- a/libraries/chain/protocol/asset_ops.cpp +++ b/libraries/chain/protocol/asset_ops.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2015-2018 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -23,32 +23,38 @@ */ #include +#include + namespace graphene { namespace chain { /** * Valid symbols can contain [A-Z0-9], and '.' * They must start with [A, Z] - * They must end with [A, Z] + * They must end with [A, Z] before HF_620 or [A-Z0-9] after it * They can contain a maximum of one '.' */ bool is_valid_symbol( const string& symbol ) { + static const std::locale& loc = std::locale::classic(); if( symbol.size() < GRAPHENE_MIN_ASSET_SYMBOL_LENGTH ) return false; + if( symbol.substr(0,3) == "BIT" ) + return false; + if( symbol.size() > GRAPHENE_MAX_ASSET_SYMBOL_LENGTH ) return false; - if( !isalpha( symbol.front() ) ) + if( !isalpha( symbol.front(), loc ) ) return false; - if( !isalpha( symbol.back() ) ) + if( !isalnum( symbol.back(), loc ) ) return false; bool dot_already_present = false; for( const auto c : symbol ) { - if( (isalpha( c ) && isupper( c )) || isdigit(c) ) + if( (isalpha( c, loc ) && isupper( c, loc )) || isdigit( c, loc ) ) continue; if( c == '.' ) @@ -120,6 +126,12 @@ void asset_update_operation::validate()const FC_ASSERT(dummy.asset_id == asset_id_type()); } +void asset_update_issuer_operation::validate()const +{ + FC_ASSERT( fee.amount >= 0 ); + FC_ASSERT( issuer != new_issuer ); +} + share_type asset_update_operation::calculate_fee(const asset_update_operation::fee_parameters_type& k)const { return k.fee + calculate_data_fee( fc::raw::pack_size(*this), k.price_per_kbyte ); @@ -231,4 +243,11 @@ void asset_claim_fees_operation::validate()const { FC_ASSERT( amount_to_claim.amount > 0 ); } +void asset_claim_pool_operation::validate()const { + FC_ASSERT( fee.amount >= 0 ); + FC_ASSERT( fee.asset_id != asset_id); + FC_ASSERT( amount_to_claim.amount > 0 ); + FC_ASSERT( amount_to_claim.asset_id == asset_id_type()); +} + } } // namespace graphene::chain diff --git a/libraries/chain/protocol/block.cpp b/libraries/chain/protocol/block.cpp index d32365dd08..9fdf4707eb 100644 --- a/libraries/chain/protocol/block.cpp +++ b/libraries/chain/protocol/block.cpp @@ -37,19 +37,23 @@ namespace graphene { namespace chain { return fc::endian_reverse_u32(id._hash[0]); } - block_id_type signed_block_header::id()const + const block_id_type& signed_block_header::id()const { - auto tmp = fc::sha224::hash( *this ); - tmp._hash[0] = fc::endian_reverse_u32(block_num()); // store the block num in the ID, 160 bits is plenty for the hash - static_assert( sizeof(tmp._hash[0]) == 4, "should be 4 bytes" ); - block_id_type result; - memcpy(result._hash, tmp._hash, std::min(sizeof(result), sizeof(tmp))); - return result; + if( !_block_id._hash[0] ) + { + auto tmp = fc::sha224::hash( *this ); + tmp._hash[0] = fc::endian_reverse_u32(block_num()); // store the block num in the ID, 160 bits is plenty for the hash + static_assert( sizeof(tmp._hash[0]) == 4, "should be 4 bytes" ); + memcpy(_block_id._hash, tmp._hash, std::min(sizeof(_block_id), sizeof(tmp))); + } + return _block_id; } - fc::ecc::public_key signed_block_header::signee()const + const fc::ecc::public_key& signed_block_header::signee()const { - return fc::ecc::public_key( witness_signature, digest(), true/*enforce canonical*/ ); + if( !_signee.valid() ) + _signee = fc::ecc::public_key( witness_signature, digest(), true/*enforce canonical*/ ); + return _signee; } void signed_block_header::sign( const fc::ecc::private_key& signer ) @@ -62,31 +66,35 @@ namespace graphene { namespace chain { return signee() == expected_signee; } - checksum_type signed_block::calculate_merkle_root()const + const checksum_type& signed_block::calculate_merkle_root()const { + static const checksum_type empty_checksum; if( transactions.size() == 0 ) - return checksum_type(); - - vector ids; - ids.resize( transactions.size() ); - for( uint32_t i = 0; i < transactions.size(); ++i ) - ids[i] = transactions[i].merkle_digest(); + return empty_checksum; - vector::size_type current_number_of_hashes = ids.size(); - while( current_number_of_hashes > 1 ) + if( !_calculated_merkle_root._hash[0] ) { - // hash ID's in pairs - uint32_t i_max = current_number_of_hashes - (current_number_of_hashes&1); - uint32_t k = 0; + vector ids; + ids.resize( transactions.size() ); + for( uint32_t i = 0; i < transactions.size(); ++i ) + ids[i] = transactions[i].merkle_digest(); - for( uint32_t i = 0; i < i_max; i += 2 ) - ids[k++] = digest_type::hash( std::make_pair( ids[i], ids[i+1] ) ); + vector::size_type current_number_of_hashes = ids.size(); + while( current_number_of_hashes > 1 ) + { + // hash ID's in pairs + uint32_t i_max = current_number_of_hashes - (current_number_of_hashes&1); + uint32_t k = 0; - if( current_number_of_hashes&1 ) - ids[k++] = ids[i_max]; - current_number_of_hashes = k; + for( uint32_t i = 0; i < i_max; i += 2 ) + ids[k++] = digest_type::hash( std::make_pair( ids[i], ids[i+1] ) ); + + if( current_number_of_hashes&1 ) + ids[k++] = ids[i_max]; + current_number_of_hashes = k; + } + _calculated_merkle_root = checksum_type::hash( ids[0] ); } - return checksum_type::hash( ids[0] ); + return _calculated_merkle_root; } - } } diff --git a/libraries/chain/protocol/chain_parameters.cpp b/libraries/chain/protocol/chain_parameters.cpp new file mode 100644 index 0000000000..8e70d624e0 --- /dev/null +++ b/libraries/chain/protocol/chain_parameters.cpp @@ -0,0 +1,79 @@ +#include +#include + +namespace graphene { namespace chain { + chain_parameters::chain_parameters() { + current_fees = std::make_shared(); + } + + // copy constructor + chain_parameters::chain_parameters(const chain_parameters& other) + { + current_fees = std::make_shared(*other.current_fees); + safe_copy(*this, other); + } + + // copy assignment + chain_parameters& chain_parameters::operator=(const chain_parameters& other) + { + if (&other != this) + { + current_fees = std::make_shared(*other.current_fees); + safe_copy(*this, other); + } + return *this; + } + + // copies the easy stuff + void chain_parameters::safe_copy(chain_parameters& to, const chain_parameters& from) + { + to.block_interval = from.block_interval; + to.maintenance_interval = from.maintenance_interval; + to.maintenance_skip_slots = from.maintenance_skip_slots; + to.committee_proposal_review_period = from.committee_proposal_review_period; + to.maximum_transaction_size = from.maximum_transaction_size; + to.maximum_block_size = from.maximum_block_size; + to.maximum_time_until_expiration = from.maximum_time_until_expiration; + to.maximum_proposal_lifetime = from.maximum_proposal_lifetime; + to.maximum_asset_whitelist_authorities = from.maximum_asset_whitelist_authorities; + to.maximum_asset_feed_publishers = from.maximum_asset_feed_publishers; + to.maximum_witness_count = from.maximum_witness_count; + to.maximum_committee_count = from.maximum_committee_count; + to.maximum_authority_membership = from.maximum_authority_membership; + to.reserve_percent_of_fee = from.reserve_percent_of_fee; + to.network_percent_of_fee = from.network_percent_of_fee; + to.lifetime_referrer_percent_of_fee = from.lifetime_referrer_percent_of_fee; + to.cashback_vesting_period_seconds = from.cashback_vesting_period_seconds; + to.cashback_vesting_threshold = from.cashback_vesting_threshold; + to.count_non_member_votes = from.count_non_member_votes; + to.allow_non_member_whitelists = from.allow_non_member_whitelists; + to.witness_pay_per_block = from.witness_pay_per_block; + to.witness_pay_vesting_seconds = from.witness_pay_vesting_seconds; + to.worker_budget_per_day = from.worker_budget_per_day; + to.max_predicate_opcode = from.max_predicate_opcode; + to.fee_liquidation_threshold = from.fee_liquidation_threshold; + to.accounts_per_fee_scale = from.accounts_per_fee_scale; + to.account_fee_scale_bitshifts = from.account_fee_scale_bitshifts; + to.max_authority_depth = from.max_authority_depth; + to.extensions = from.extensions; + } + + // move constructor + chain_parameters::chain_parameters(chain_parameters&& other) + { + current_fees = std::move(other.current_fees); + safe_copy(*this, other); + } + + // move assignment + chain_parameters& chain_parameters::operator=(chain_parameters&& other) + { + if (&other != this) + { + current_fees = std::move(other.current_fees); + safe_copy(*this, other); + } + return *this; + } + +}} diff --git a/libraries/chain/protocol/confidential.cpp b/libraries/chain/protocol/confidential.cpp index 7ec5a49aa8..603befa122 100644 --- a/libraries/chain/protocol/confidential.cpp +++ b/libraries/chain/protocol/confidential.cpp @@ -133,6 +133,7 @@ void blind_transfer_operation::validate()const FC_ASSERT( info.max_value <= GRAPHENE_MAX_SHARE_SUPPLY ); } } + FC_ASSERT( fc::ecc::verify_sum( in, out, net_public ), "", ("net_public", net_public) ); } FC_CAPTURE_AND_RETHROW( (*this) ) } share_type blind_transfer_operation::calculate_fee( const fee_parameters_type& k )const diff --git a/libraries/chain/protocol/fee_schedule.cpp b/libraries/chain/protocol/fee_schedule.cpp index ab8f6532f8..24efe2ce8c 100644 --- a/libraries/chain/protocol/fee_schedule.cpp +++ b/libraries/chain/protocol/fee_schedule.cpp @@ -23,26 +23,11 @@ */ #include #include -#include - -namespace fc -{ - // explicitly instantiate the smart_ref, gcc fails to instantiate it in some release builds - //template graphene::chain::fee_schedule& smart_ref::operator=(smart_ref&&); - //template graphene::chain::fee_schedule& smart_ref::operator=(U&&); - //template graphene::chain::fee_schedule& smart_ref::operator=(const smart_ref&); - //template smart_ref::smart_ref(); - //template const graphene::chain::fee_schedule& smart_ref::operator*() const; -} #define MAX_FEE_STABILIZATION_ITERATION 4 namespace graphene { namespace chain { - typedef fc::smart_ref smart_fee_schedule; - - static smart_fee_schedule tmp; - fee_schedule::fee_schedule() { } @@ -79,13 +64,21 @@ namespace graphene { namespace chain { { typedef uint64_t result_type; - const fee_parameters& param; - calc_fee_visitor( const fee_parameters& p ):param(p){} + const fee_schedule& param; + const int current_op; + calc_fee_visitor( const fee_schedule& p, const operation& op ):param(p),current_op(op.which()){} template - result_type operator()( const OpType& op )const + result_type operator()( const OpType& op )const { - return op.calculate_fee( param.get() ).value; + try { + return op.calculate_fee( param.get() ).value; + } catch (fc::assert_exception& e) { + fee_parameters params; params.set_which(current_op); + auto itr = param.parameters.find(params); + if( itr != param.parameters.end() ) params = *itr; + return op.calculate_fee( params.get() ).value; + } } }; @@ -122,25 +115,23 @@ namespace graphene { namespace chain { this->scale = 0; } + asset fee_schedule::calculate_fee( const operation& op )const + { + uint64_t required_fee = op.visit( calc_fee_visitor( *this, op ) ); + if( scale != GRAPHENE_100_PERCENT ) + { + auto scaled = fc::uint128(required_fee) * scale; + scaled /= GRAPHENE_100_PERCENT; + FC_ASSERT( scaled <= GRAPHENE_MAX_SHARE_SUPPLY, + "Required fee after scaling would exceed maximum possible supply" ); + required_fee = scaled.to_uint64(); + } + return asset( required_fee ); + } + asset fee_schedule::calculate_fee( const operation& op, const price& core_exchange_rate )const { - //idump( (op)(core_exchange_rate) ); - fee_parameters params; params.set_which(op.which()); - auto itr = parameters.find(params); - if( itr != parameters.end() ) params = *itr; - auto base_value = op.visit( calc_fee_visitor( params ) ); - auto scaled = fc::uint128(base_value) * scale; - scaled /= GRAPHENE_100_PERCENT; - FC_ASSERT( scaled <= GRAPHENE_MAX_SHARE_SUPPLY ); - //idump( (base_value)(scaled)(core_exchange_rate) ); - auto result = asset( scaled.to_uint64(), asset_id_type(0) ) * core_exchange_rate; - //FC_ASSERT( result * core_exchange_rate >= asset( scaled.to_uint64()) ); - - while( result * core_exchange_rate < asset( scaled.to_uint64()) ) - result.amount++; - - FC_ASSERT( result.amount <= GRAPHENE_MAX_SHARE_SUPPLY ); - return result; + return calculate_fee( op ).multiply_and_round_up( core_exchange_rate ); } asset fee_schedule::set_fee( operation& op, const price& core_exchange_rate )const diff --git a/libraries/chain/protocol/market.cpp b/libraries/chain/protocol/market.cpp index 923f4763f0..fd12fa4e7d 100644 --- a/libraries/chain/protocol/market.cpp +++ b/libraries/chain/protocol/market.cpp @@ -43,6 +43,15 @@ void call_order_update_operation::validate()const FC_ASSERT( fee.amount >= 0 ); FC_ASSERT( delta_collateral.asset_id != delta_debt.asset_id ); FC_ASSERT( delta_collateral.amount != 0 || delta_debt.amount != 0 ); + + // note: no validation is needed for extensions so far: the only attribute inside is target_collateral_ratio + +} FC_CAPTURE_AND_RETHROW((*this)) } + +void bid_collateral_operation::validate()const +{ try { + FC_ASSERT( fee.amount >= 0 ); + FC_ASSERT( debt_covered.amount == 0 || (debt_covered.amount > 0 && additional_collateral.amount > 0) ); } FC_CAPTURE_AND_RETHROW((*this)) } } } // graphene::chain diff --git a/libraries/chain/protocol/operations.cpp b/libraries/chain/protocol/operations.cpp index 40a37eba3a..48a65f6fed 100644 --- a/libraries/chain/protocol/operations.cpp +++ b/libraries/chain/protocol/operations.cpp @@ -32,6 +32,12 @@ uint64_t base_operation::calculate_data_fee( uint64_t bytes, uint64_t price_per_ return result.to_uint64(); } +fc::optional< fc::future > base_operation::validate_parallel( uint32_t skip )const +{ + validate(); + return fc::optional< fc::future >(); +} + void balance_claim_operation::validate()const { FC_ASSERT( fee == asset() ); diff --git a/libraries/chain/protocol/proposal.cpp b/libraries/chain/protocol/proposal.cpp index 069824af74..7d072e4a90 100644 --- a/libraries/chain/protocol/proposal.cpp +++ b/libraries/chain/protocol/proposal.cpp @@ -23,7 +23,6 @@ */ #include #include -#include namespace graphene { namespace chain { @@ -89,7 +88,8 @@ void proposal_update_operation::get_required_authorities( vector& o ) auth.key_auths[k] = 1; auth.weight_threshold = auth.key_auths.size(); - o.emplace_back( std::move(auth) ); + if( auth.weight_threshold > 0 ) + o.emplace_back( std::move(auth) ); } void proposal_update_operation::get_required_active_authorities( flat_set& a )const diff --git a/libraries/chain/protocol/transaction.cpp b/libraries/chain/protocol/transaction.cpp index 5de878eadb..b642dea72e 100644 --- a/libraries/chain/protocol/transaction.cpp +++ b/libraries/chain/protocol/transaction.cpp @@ -23,9 +23,9 @@ */ #include #include +#include #include #include -#include #include namespace graphene { namespace chain { @@ -56,15 +56,14 @@ void transaction::validate() const { FC_ASSERT( operations.size() > 0, "A transaction must have at least one operation", ("trx",*this) ); for( const auto& op : operations ) - operation_validate(op); + operation_validate(op); } -graphene::chain::transaction_id_type graphene::chain::transaction::id() const +const transaction_id_type& transaction::id() const { auto h = digest(); - transaction_id_type result; - memcpy(result._hash, h._hash, std::min(sizeof(result), sizeof(h))); - return result; + memcpy(_tx_id_buffer._hash, h._hash, std::min(sizeof(_tx_id_buffer), sizeof(h))); + return _tx_id_buffer; } const signature_type& graphene::chain::signed_transaction::sign(const private_key_type& key, const chain_id_type& chain_id) @@ -97,15 +96,18 @@ void transaction::get_required_authorities( flat_set& active, f { for( const auto& op : operations ) operation_get_required_authorities( op, active, owner, other ); + for( const auto& account : owner ) + active.erase( account ); } +const flat_set empty_keyset; struct sign_state { - /** returns true if we have a signature for this key or can - * produce a signature for this key, else returns false. + /** returns true if we have a signature for this key or can + * produce a signature for this key, else returns false. */ bool signed_by( const public_key_type& k ) { @@ -164,7 +166,7 @@ struct sign_state /** * Checks to see if we have signatures of the active authorites of - * the accounts specified in authority or the keys specified. + * the accounts specified in authority or the keys specified. */ bool check_authority( const authority* au, uint32_t depth = 0 ) { @@ -193,7 +195,7 @@ struct sign_state if( approved_by.find(a.first) == approved_by.end() ) { if( depth == max_recursion ) - return false; + continue; if( check_authority( get_active( a.first ), depth+1 ) ) { approved_by.insert( a.first ); @@ -226,7 +228,7 @@ struct sign_state sign_state( const flat_set& sigs, const std::function& a, - const flat_set& keys = flat_set() ) + const flat_set& keys = empty_keyset ) :get_active(a),available_keys(keys) { for( const auto& key : sigs ) @@ -243,7 +245,7 @@ struct sign_state }; -void verify_authority( const vector& ops, const flat_set& sigs, +void verify_authority( const vector& ops, const flat_set& sigs, const std::function& get_active, const std::function& get_owner, uint32_t max_recursion_depth, @@ -275,20 +277,21 @@ void verify_authority( const vector& ops, const flat_set& ops, const flat_set signed_transaction::get_signature_keys( const chain_id_type& chain_id )const +const flat_set& signed_transaction::get_signature_keys( const chain_id_type& chain_id )const { try { auto d = sig_digest( chain_id ); flat_set result; @@ -305,14 +308,14 @@ flat_set signed_transaction::get_signature_keys( const chain_id { GRAPHENE_ASSERT( result.insert( fc::ecc::public_key(sig,d) ).second, - tx_duplicate_sig, - "Duplicate Signature detected" ); + tx_duplicate_sig, + "Duplicate Signature detected" ); } - return result; + _signees = std::move( result ); + return _signees; } FC_CAPTURE_AND_RETHROW() } - set signed_transaction::get_required_signatures( const chain_id_type& chain_id, const flat_set& available_keys, @@ -325,8 +328,8 @@ set signed_transaction::get_required_signatures( vector other; get_required_authorities( required_active, required_owner, other ); - - sign_state s(get_signature_keys( chain_id ),get_active,available_keys); + const flat_set& signature_keys = get_signature_keys( chain_id ); + sign_state s( signature_keys, get_active, available_keys ); s.max_recursion = max_recursion_depth; for( const auto& auth : other ) @@ -334,14 +337,15 @@ set signed_transaction::get_required_signatures( for( auto& owner : required_owner ) s.check_authority( get_owner( owner ) ); for( auto& active : required_active ) - s.check_authority( active ); + s.check_authority( active ) || s.check_authority( get_owner( active ) ); s.remove_unused_signatures(); set result; for( auto& provided_sig : s.provided_signatures ) - if( available_keys.find( provided_sig.first ) != available_keys.end() ) + if( available_keys.find( provided_sig.first ) != available_keys.end() + && signature_keys.find( provided_sig.first ) == signature_keys.end() ) result.insert( provided_sig.first ); return result; @@ -374,6 +378,29 @@ set signed_transaction::minimize_required_signatures( return set( result.begin(), result.end() ); } +const transaction_id_type& precomputable_transaction::id()const +{ + if( !_tx_id_buffer._hash[0] ) + transaction::id(); + return _tx_id_buffer; +} + +void precomputable_transaction::validate() const +{ + if( _validated ) return; + transaction::validate(); + _validated = true; +} + +const flat_set& precomputable_transaction::get_signature_keys( const chain_id_type& chain_id )const +{ + // Strictly we should check whether the given chain ID is same as the one used to initialize the `signees` field. + // However, we don't pass in another chain ID so far, for better performance, we skip the check. + if( _signees.empty() ) + signed_transaction::get_signature_keys( chain_id ); + return _signees; +} + void signed_transaction::verify_authority( const chain_id_type& chain_id, const std::function& get_active, diff --git a/libraries/chain/protocol/types.cpp b/libraries/chain/protocol/types.cpp index ac5ad8c83a..a51474f0da 100644 --- a/libraries/chain/protocol/types.cpp +++ b/libraries/chain/protocol/types.cpp @@ -44,17 +44,6 @@ namespace graphene { namespace chain { // TODO: Refactor syntactic checks into static is_valid() // to make public_key_type API more similar to address API std::string prefix( GRAPHENE_ADDRESS_PREFIX ); - - // TODO: This is temporary for testing - try - { - if( is_valid_v1( base58str ) ) - prefix = std::string( "BTS" ); - } - catch( ... ) - { - } - const size_t prefix_len = prefix.size(); FC_ASSERT( base58str.size() > prefix_len ); FC_ASSERT( base58str.substr( 0, prefix_len ) == prefix , "", ("base58str", base58str) ); @@ -64,20 +53,6 @@ namespace graphene { namespace chain { FC_ASSERT( fc::ripemd160::hash( key_data.data, key_data.size() )._hash[0] == bin_key.check ); }; - // TODO: This is temporary for testing - bool public_key_type::is_valid_v1( const std::string& base58str ) - { - std::string prefix( "BTS" ); - const size_t prefix_len = prefix.size(); - FC_ASSERT( base58str.size() > prefix_len ); - FC_ASSERT( base58str.substr( 0, prefix_len ) == prefix , "", ("base58str", base58str) ); - auto bin = fc::from_base58( base58str.substr( prefix_len ) ); - auto bin_key = fc::raw::unpack(bin); - fc::ecc::public_key_data key_data = bin_key.data; - FC_ASSERT( fc::ripemd160::hash( key_data.data, key_data.size() )._hash[0] == bin_key.check ); - return true; - } - public_key_type::operator fc::ecc::public_key_data() const { return key_data; @@ -225,32 +200,32 @@ namespace graphene { namespace chain { namespace fc { using namespace std; - void to_variant( const graphene::chain::public_key_type& var, fc::variant& vo ) + void to_variant( const graphene::chain::public_key_type& var, fc::variant& vo, uint32_t max_depth ) { vo = std::string( var ); } - void from_variant( const fc::variant& var, graphene::chain::public_key_type& vo ) + void from_variant( const fc::variant& var, graphene::chain::public_key_type& vo, uint32_t max_depth ) { vo = graphene::chain::public_key_type( var.as_string() ); } - void to_variant( const graphene::chain::extended_public_key_type& var, fc::variant& vo ) + void to_variant( const graphene::chain::extended_public_key_type& var, fc::variant& vo, uint32_t max_depth ) { vo = std::string( var ); } - void from_variant( const fc::variant& var, graphene::chain::extended_public_key_type& vo ) + void from_variant( const fc::variant& var, graphene::chain::extended_public_key_type& vo, uint32_t max_depth ) { vo = graphene::chain::extended_public_key_type( var.as_string() ); } - void to_variant( const graphene::chain::extended_private_key_type& var, fc::variant& vo ) + void to_variant( const graphene::chain::extended_private_key_type& var, fc::variant& vo, uint32_t max_depth ) { vo = std::string( var ); } - void from_variant( const fc::variant& var, graphene::chain::extended_private_key_type& vo ) + void from_variant( const fc::variant& var, graphene::chain::extended_private_key_type& vo, uint32_t max_depth ) { vo = graphene::chain::extended_private_key_type( var.as_string() ); } diff --git a/libraries/chain/protocol/vote.cpp b/libraries/chain/protocol/vote.cpp index 44be9bcaae..f78f2b4f11 100644 --- a/libraries/chain/protocol/vote.cpp +++ b/libraries/chain/protocol/vote.cpp @@ -38,12 +38,12 @@ vote_id_type get_next_vote_id( global_property_object& gpo, vote_id_type::vote_t namespace fc { -void to_variant(const graphene::chain::vote_id_type& var, variant& vo) +void to_variant( const graphene::chain::vote_id_type& var, variant& vo, uint32_t max_depth ) { vo = string(var); } -void from_variant(const variant& var, graphene::chain::vote_id_type& vo) +void from_variant( const variant& var, graphene::chain::vote_id_type& vo, uint32_t max_depth ) { vo = graphene::chain::vote_id_type(var.as_string()); } diff --git a/libraries/chain/pts_address.cpp b/libraries/chain/pts_address.cpp index d2b8c33c34..27f3d256cc 100644 --- a/libraries/chain/pts_address.cpp +++ b/libraries/chain/pts_address.cpp @@ -89,11 +89,11 @@ namespace graphene { namespace chain { namespace fc { - void to_variant( const graphene::chain::pts_address& var, variant& vo ) + void to_variant( const graphene::chain::pts_address& var, variant& vo, uint32_t max_depth ) { vo = std::string(var); } - void from_variant( const variant& var, graphene::chain::pts_address& vo ) + void from_variant( const variant& var, graphene::chain::pts_address& vo, uint32_t max_depth ) { vo = graphene::chain::pts_address( var.as_string() ); } diff --git a/libraries/chain/transaction_object.cpp b/libraries/chain/transaction_object.cpp deleted file mode 100644 index fb4f75dff7..0000000000 --- a/libraries/chain/transaction_object.cpp +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include - -namespace graphene { namespace chain { - -const object* transaction_index::create(const std::function& constructor, object_id_type) -{ - transaction_object obj; - - obj.id = get_next_available_id(); - constructor(&obj); - - auto result = _index.insert(std::move(obj)); - FC_ASSERT(result.second, "Could not create transaction_object! Most likely a uniqueness constraint is violated."); - return &*result.first; -} - -void transaction_index::modify(const object* obj, - const std::function& m) -{ - assert(obj != nullptr); - FC_ASSERT(obj->id < _index.size()); - - const transaction_object* t = dynamic_cast(obj); - assert(t != nullptr); - - auto itr = _index.find(obj->id.instance()); - assert(itr != _index.end()); - _index.modify(itr, [&m](transaction_object& o) { m(&o); }); -} - -void transaction_index::add(unique_ptr o) -{ - assert(o); - object_id_type id = o->id; - assert(id.space() == transaction_object::space_id); - assert(id.type() == transaction_object::type_id); - assert(id.instance() == size()); - - auto trx = dynamic_cast(o.get()); - assert(trx != nullptr); - o.release(); - - auto result = _index.insert(std::move(*trx)); - FC_ASSERT(result.second, "Could not insert transaction_object! Most likely a uniqueness constraint is violated."); -} - -void transaction_index::remove(object_id_type id) -{ - auto& index = _index.get(); - auto itr = index.find(id.instance()); - if( itr == index.end() ) - return; - - assert(id.space() == transaction_object::space_id); - assert(id.type() == transaction_object::type_id); - - index.erase(itr); -} - -const object*transaction_index::get(object_id_type id) const -{ - if( id.type() != transaction_object::type_id || - id.space() != transaction_object::space_id ) - return nullptr; - - auto itr = _index.find(id.instance()); - if( itr == _index.end() ) - return nullptr; - return &*itr; -} - -} } // graphene::chain diff --git a/libraries/chain/transfer_evaluator.cpp b/libraries/chain/transfer_evaluator.cpp index accc6ca3d1..60bfbdba99 100644 --- a/libraries/chain/transfer_evaluator.cpp +++ b/libraries/chain/transfer_evaluator.cpp @@ -59,7 +59,7 @@ void_result transfer_evaluator::do_evaluate( const transfer_operation& op ) GRAPHENE_ASSERT( from_account.id == asset_type.issuer || to_account.id == asset_type.issuer, transfer_restricted_transfer_asset, - "Asset {asset} has transfer_restricted flag enabled", + "Asset ${asset} has transfer_restricted flag enabled", ("asset", op.amount.asset_id) ); } diff --git a/libraries/chain/vesting_balance_object.cpp b/libraries/chain/vesting_balance_object.cpp index 8414840c28..73448e04c8 100644 --- a/libraries/chain/vesting_balance_object.cpp +++ b/libraries/chain/vesting_balance_object.cpp @@ -64,7 +64,7 @@ asset linear_vesting_policy::get_allowed_withdraw( const vesting_policy_context& } } - return asset( allowed_withdraw, ctx.amount.asset_id ); + return asset( allowed_withdraw, ctx.balance.asset_id ); } void linear_vesting_policy::on_deposit(const vesting_policy_context& ctx) diff --git a/libraries/chain/withdraw_permission_evaluator.cpp b/libraries/chain/withdraw_permission_evaluator.cpp index d001b441ff..4c503c47da 100644 --- a/libraries/chain/withdraw_permission_evaluator.cpp +++ b/libraries/chain/withdraw_permission_evaluator.cpp @@ -59,21 +59,44 @@ object_id_type withdraw_permission_create_evaluator::do_apply(const operation_ty void_result withdraw_permission_claim_evaluator::do_evaluate(const withdraw_permission_claim_evaluator::operation_type& op) { try { const database& d = db(); + time_point_sec head_block_time = d.head_block_time(); const withdraw_permission_object& permit = op.withdraw_permission(d); - FC_ASSERT(permit.expiration > d.head_block_time() ); + FC_ASSERT(permit.expiration > head_block_time); FC_ASSERT(permit.authorized_account == op.withdraw_to_account); FC_ASSERT(permit.withdraw_from_account == op.withdraw_from_account); - FC_ASSERT(op.amount_to_withdraw <= permit.available_this_period( d.head_block_time() ) ); + if (head_block_time >= HARDFORK_23_TIME) { + FC_ASSERT(permit.period_start_time <= head_block_time); + } + FC_ASSERT(op.amount_to_withdraw <= permit.available_this_period( head_block_time ) ); FC_ASSERT(d.get_balance(op.withdraw_from_account, op.amount_to_withdraw.asset_id) >= op.amount_to_withdraw); const asset_object& _asset = op.amount_to_withdraw.asset_id(d); - if( _asset.is_transfer_restricted() ) FC_ASSERT( _asset.issuer == permit.authorized_account || _asset.issuer == permit.withdraw_from_account ); + if( _asset.is_transfer_restricted() ) + { + FC_ASSERT( _asset.issuer == permit.authorized_account || _asset.issuer == permit.withdraw_from_account, + "Asset ${a} '${sym}' has transfer_restricted flag enabled", + ("a", _asset.id)("sym", _asset.symbol) ); + } - const account_object& from = op.withdraw_to_account(d); const account_object& to = permit.authorized_account(d); - FC_ASSERT( is_authorized_asset( d, to, _asset ) ); - FC_ASSERT( is_authorized_asset( d, from, _asset ) ); + FC_ASSERT( is_authorized_asset( d, to, _asset ), + "Account ${acct} '${name}' is unauthorized to transact asset ${a} '${sym}' due to whitelist / blacklist", + ("acct", to.id)("name", to.name)("a", _asset.id)("sym", _asset.symbol) ); + + const account_object& from = op.withdraw_from_account(d); + bool from_is_authorized = ( is_authorized_asset( d, from, _asset ) ); + if( head_block_time > HARDFORK_CORE_942_TIME ) // TODO remove this check after hard fork if things in `else` did not occur + { + FC_ASSERT( from_is_authorized, + "Account ${acct} '${name}' is unauthorized to withdraw asset ${a} '${sym}' due to whitelist / blacklist", + ("acct", from.id)("name", from.name)("a", _asset.id)("sym", _asset.symbol) ); + } + else + { + if( !from_is_authorized ) + wlog( "Unauthorized asset withdrawal (issue #942) occurred at block ${b}", ("b", d.head_block_num()) ); + } return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } diff --git a/libraries/chain/witness_evaluator.cpp b/libraries/chain/witness_evaluator.cpp index 93785bdc9c..e5853eb8c4 100644 --- a/libraries/chain/witness_evaluator.cpp +++ b/libraries/chain/witness_evaluator.cpp @@ -38,12 +38,13 @@ void_result witness_create_evaluator::do_evaluate( const witness_create_operatio object_id_type witness_create_evaluator::do_apply( const witness_create_operation& op ) { try { + database& _db = db(); vote_id_type vote_id; - db().modify(db().get_global_properties(), [&vote_id](global_property_object& p) { + _db.modify( _db.get_global_properties(), [&vote_id](global_property_object& p) { vote_id = get_next_vote_id(p, vote_id_type::witness); }); - const auto& new_witness_object = db().create( [&]( witness_object& obj ){ + const auto& new_witness_object = _db.create( [&op,&vote_id]( witness_object& obj ){ obj.witness_account = op.witness_account; obj.signing_key = op.block_signing_key; obj.vote_id = vote_id; @@ -63,7 +64,7 @@ void_result witness_update_evaluator::do_apply( const witness_update_operation& database& _db = db(); _db.modify( _db.get(op.witness), - [&]( witness_object& wit ) + [&op]( witness_object& wit ) { if( op.new_url.valid() ) wit.url = *op.new_url; diff --git a/libraries/chain/worker_evaluator.cpp b/libraries/chain/worker_evaluator.cpp index cf6f0e0007..b5aea8f3b4 100644 --- a/libraries/chain/worker_evaluator.cpp +++ b/libraries/chain/worker_evaluator.cpp @@ -106,7 +106,7 @@ object_id_type worker_create_evaluator::do_apply(const worker_create_evaluator:: void refund_worker_type::pay_worker(share_type pay, database& db) { total_burned += pay; - db.modify(db.get(asset_id_type()).dynamic_data(db), [pay](asset_dynamic_data_object& d) { + db.modify( db.get_core_dynamic_data(), [pay](asset_dynamic_data_object& d) { d.current_supply -= pay; }); } diff --git a/libraries/db/CMakeLists.txt b/libraries/db/CMakeLists.txt index 986fe9cbfc..6feb985c6f 100644 --- a/libraries/db/CMakeLists.txt +++ b/libraries/db/CMakeLists.txt @@ -10,3 +10,4 @@ install( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +install( FILES ${HEADERS} DESTINATION "include/graphene/db" ) diff --git a/libraries/db/include/graphene/db/flat_index.hpp b/libraries/db/include/graphene/db/flat_index.hpp deleted file mode 100644 index f1b7912ef7..0000000000 --- a/libraries/db/include/graphene/db/flat_index.hpp +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include - -namespace graphene { namespace db { - - /** - * @class flat_index - * @brief A flat index uses a vector to store data - * - * This index is preferred in situations where the data will never be - * removed from main memory and when lots of small objects that - * are accessed in order are required. - */ - template - class flat_index : public index - { - public: - typedef T object_type; - - virtual const object& create( const std::function& constructor ) override - { - auto id = get_next_id(); - auto instance = id.instance(); - if( instance >= _objects.size() ) _objects.resize( instance + 1 ); - _objects[instance].id = id; - constructor( _objects[instance] ); - use_next_id(); - return _objects[instance]; - } - - virtual void modify( const object& obj, const std::function& modify_callback ) override - { - assert( obj.id.instance() < _objects.size() ); - modify_callback( _objects[obj.id.instance()] ); - } - - virtual const object& insert( object&& obj )override - { - auto instance = obj.id.instance(); - assert( nullptr != dynamic_cast(&obj) ); - if( _objects.size() <= instance ) _objects.resize( instance+1 ); - _objects[instance] = std::move( static_cast(obj) ); - return _objects[instance]; - } - - virtual void remove( const object& obj ) override - { - assert( nullptr != dynamic_cast(&obj) ); - const auto instance = obj.id.instance(); - _objects[instance] = T(); - } - - virtual const object* find( object_id_type id )const override - { - assert( id.space() == T::space_id ); - assert( id.type() == T::type_id ); - - const auto instance = id.instance(); - if( instance >= _objects.size() ) return nullptr; - return &_objects[instance]; - } - - virtual void inspect_all_objects(std::function inspector)const override - { - try { - for( const auto& ptr : _objects ) - { - inspector(ptr); - } - } FC_CAPTURE_AND_RETHROW() - } - - virtual fc::uint128 hash()const override { - fc::uint128 result; - for( const auto& ptr : _objects ) - result += ptr.hash(); - - return result; - } - - class const_iterator - { - public: - const_iterator(){} - const_iterator( const typename vector::const_iterator& a ):_itr(a){} - friend bool operator==( const const_iterator& a, const const_iterator& b ) { return a._itr == b._itr; } - friend bool operator!=( const const_iterator& a, const const_iterator& b ) { return a._itr != b._itr; } - const T* operator*()const { return static_cast(&*_itr); } - const_iterator& operator++(int){ ++_itr; return *this; } - const_iterator& operator++() { ++_itr; return *this; } - private: - typename vector::const_iterator _itr; - }; - const_iterator begin()const { return const_iterator(_objects.begin()); } - const_iterator end()const { return const_iterator(_objects.end()); } - - size_t size()const{ return _objects.size(); } - - void resize( uint32_t s ) { - _objects.resize(s); - for( uint32_t i = 0; i < s; ++i ) - _objects[i].id = object_id_type(object_type::space_id,object_type::type_id,i); - } - - private: - vector< T > _objects; - }; - -} } // graphene::db diff --git a/libraries/db/include/graphene/db/generic_index.hpp b/libraries/db/include/graphene/db/generic_index.hpp index 8a433264e1..fb11d44a37 100644 --- a/libraries/db/include/graphene/db/generic_index.hpp +++ b/libraries/db/include/graphene/db/generic_index.hpp @@ -67,10 +67,25 @@ namespace graphene { namespace chain { virtual void modify( const object& obj, const std::function& m )override { - assert( nullptr != dynamic_cast(&obj) ); - auto ok = _indices.modify( _indices.iterator_to( static_cast(obj) ), - [&m]( ObjectType& o ){ m(o); } ); - FC_ASSERT( ok, "Could not modify object, most likely a index constraint was violated" ); + assert(nullptr != dynamic_cast(&obj)); + std::exception_ptr exc; + auto ok = _indices.modify(_indices.iterator_to(static_cast(obj)), + [&m, &exc](ObjectType& o) mutable { + try { + m(o); + } catch (fc::exception& e) { + exc = std::current_exception(); + elog("Exception while modifying object: ${e} -- object may be corrupted", + ("e", e)); + } catch (...) { + exc = std::current_exception(); + elog("Unknown exception while modifying object"); + } + } + ); + if (exc) + std::rethrow_exception(exc); + FC_ASSERT(ok, "Could not modify object, most likely an index constraint was violated"); } virtual void remove( const object& obj )override diff --git a/libraries/db/include/graphene/db/index.hpp b/libraries/db/include/graphene/db/index.hpp index 7ecf5a668a..2ba0c76a3c 100644 --- a/libraries/db/include/graphene/db/index.hpp +++ b/libraries/db/include/graphene/db/index.hpp @@ -23,11 +23,14 @@ */ #pragma once #include + #include #include #include #include + #include +#include namespace graphene { namespace db { class object_database; @@ -108,7 +111,7 @@ namespace graphene { namespace db { const object& get( object_id_type id )const { auto maybe_found = find( id ); - FC_ASSERT( maybe_found != nullptr, "Unable to find Object", ("id",id) ); + FC_ASSERT( maybe_found != nullptr, "Unable to find Object ${id}", ("id",id) ); return *maybe_found; } @@ -130,7 +133,7 @@ namespace graphene { namespace db { virtual fc::uint128 hash()const = 0; virtual void add_observer( const shared_ptr& ) = 0; - virtual void object_from_variant( const fc::variant& var, object& obj )const = 0; + virtual void object_from_variant( const fc::variant& var, object& obj, uint32_t max_depth )const = 0; virtual void object_default( object& obj )const = 0; }; @@ -164,10 +167,11 @@ namespace graphene { namespace db { /** called just after obj is modified */ void on_modify( const object& obj ); - template - void add_secondary_index() + template + T* add_secondary_index(Args... args) { - _sindex.emplace_back( new T() ); + _sindex.emplace_back( new T(args...) ); + return static_cast(_sindex.back().get()); } template @@ -189,6 +193,111 @@ namespace graphene { namespace db { object_database& _db; }; + /** @class direct_index + * @brief A secondary index that tracks objects in vectors indexed by object + * id. It is meant for fully (or almost fully) populated indexes only (will + * fail when loading an object_database with large gaps). + * + * WARNING! If any of the methods called on insertion, removal or + * modification throws, subsequent behaviour is undefined! Such exceptions + * indicate that this index type is not appropriate for the use-case. + */ + template + class direct_index : public secondary_index + { + static_assert( chunkbits < 64, "Do you really want arrays with more than 2^63 elements???" ); + + // private + static const size_t MAX_HOLE = 100; + static const size_t _mask = ((1 << chunkbits) - 1); + uint64_t next = 0; + vector< vector< const Object* > > content; + std::stack< object_id_type > ids_being_modified; + + public: + direct_index() { + FC_ASSERT( (1ULL << chunkbits) > MAX_HOLE, "Small chunkbits is inefficient." ); + } + + virtual ~direct_index(){} + + virtual void object_inserted( const object& obj ) + { + uint64_t instance = obj.id.instance(); + if( instance == next ) + { + if( !(next & _mask) ) + { + content.resize((next >> chunkbits) + 1); + content[next >> chunkbits].resize( 1 << chunkbits, nullptr ); + } + next++; + } + else if( instance < next ) + FC_ASSERT( !content[instance >> chunkbits][instance & _mask], "Overwriting insert at {id}!", ("id",obj.id) ); + else // instance > next, allow small "holes" + { + FC_ASSERT( instance <= next + MAX_HOLE, "Out-of-order insert: {id} > {next}!", ("id",obj.id)("next",next) ); + if( !(next & _mask) || (next & (~_mask)) != (instance & (~_mask)) ) + { + content.resize((instance >> chunkbits) + 1); + content[instance >> chunkbits].resize( 1 << chunkbits, nullptr ); + } + while( next <= instance ) + { + content[next >> chunkbits][next & _mask] = nullptr; + next++; + } + } + FC_ASSERT( nullptr != dynamic_cast(&obj), "Wrong object type!" ); + content[instance >> chunkbits][instance & _mask] = static_cast( &obj ); + } + + virtual void object_removed( const object& obj ) + { + FC_ASSERT( nullptr != dynamic_cast(&obj), "Wrong object type!" ); + uint64_t instance = obj.id.instance(); + FC_ASSERT( instance < next, "Removing out-of-range object: {id} > {next}!", ("id",obj.id)("next",next) ); + FC_ASSERT( content[instance >> chunkbits][instance & _mask], "Removing non-existent object {id}!", ("id",obj.id) ); + content[instance >> chunkbits][instance & _mask] = nullptr; + } + + virtual void about_to_modify( const object& before ) + { + ids_being_modified.emplace( before.id ); + } + + virtual void object_modified( const object& after ) + { + FC_ASSERT( ids_being_modified.top() == after.id, "Modification of ID is not supported!"); + ids_being_modified.pop(); + } + + template< typename object_id > + const Object* find( const object_id& id )const + { + static_assert( object_id::space_id == Object::space_id, "Space ID mismatch!" ); + static_assert( object_id::type_id == Object::type_id, "Type_ID mismatch!" ); + if( id.instance >= next ) return nullptr; + return content[id.instance.value >> chunkbits][id.instance.value & _mask]; + }; + + template< typename object_id > + const Object& get( const object_id& id )const + { + const Object* ptr = find( id ); + FC_ASSERT( ptr != nullptr, "Object not found!" ); + return *ptr; + }; + + const Object* find( const object_id_type& id )const + { + FC_ASSERT( id.space() == Object::space_id, "Space ID mismatch!" ); + FC_ASSERT( id.type() == Object::type_id, "Type_ID mismatch!" ); + if( id.instance() >= next ) return nullptr; + return content[id.instance() >> chunkbits][id.instance() & ((1 << chunkbits) - 1)]; + }; + }; /** * @class primary_index @@ -197,14 +306,18 @@ namespace graphene { namespace db { * * @see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */ - template + template class primary_index : public DerivedIndex, public base_primary_index { public: typedef typename DerivedIndex::object_type object_type; primary_index( object_database& db ) - :base_primary_index(db),_next_id(object_type::space_id,object_type::type_id,0) {} + :base_primary_index(db),_next_id(object_type::space_id,object_type::type_id,0) + { + if( DirectBits > 0 ) + _direct_by_id = add_secondary_index< direct_index< object_type, DirectBits > >(); + } virtual uint8_t object_space_id()const override { return object_type::space_id; } @@ -216,6 +329,14 @@ namespace graphene { namespace db { virtual void use_next_id()override { ++_next_id.number; } virtual void set_next_id( object_id_type id )override { _next_id = id; } + /** @return the object with id or nullptr if not found */ + virtual const object* find( object_id_type id )const override + { + if( DirectBits > 0 ) + return _direct_by_id->find( id ); + return DerivedIndex::find( id ); + } + fc::sha256 get_object_version()const { std::string desc = "1.0";//get_type_description(); @@ -233,14 +354,12 @@ namespace graphene { namespace db { fc::raw::unpack(ds, _next_id); fc::raw::unpack(ds, open_ver); FC_ASSERT( open_ver == get_object_version(), "Incompatible Version, the serialization of objects in this index has changed" ); - try { - vector tmp; - while( true ) - { - fc::raw::unpack( ds, tmp ); - load( tmp ); - } - } catch ( const fc::exception& ){} + vector tmp; + while( ds.remaining() > 0 ) + { + fc::raw::unpack( ds, tmp ); + load( tmp ); + } } virtual void save( const path& db ) override @@ -276,6 +395,15 @@ namespace graphene { namespace db { return result; } + virtual const object& insert( object&& obj ) override + { + const auto& result = DerivedIndex::insert( std::move( obj ) ); + for( const auto& item : _sindex ) + item->object_inserted( result ); + on_add( result ); + return result; + } + virtual void remove( const object& obj ) override { for( const auto& item : _sindex ) @@ -300,12 +428,12 @@ namespace graphene { namespace db { _observers.emplace_back( o ); } - virtual void object_from_variant( const fc::variant& var, object& obj )const override + virtual void object_from_variant( const fc::variant& var, object& obj, uint32_t max_depth )const override { object_id_type id = obj.id; object_type* result = dynamic_cast( &obj ); FC_ASSERT( result != nullptr ); - fc::from_variant( var, *result ); + fc::from_variant( var, *result, max_depth ); obj.id = id; } @@ -319,7 +447,8 @@ namespace graphene { namespace db { } private: - object_id_type _next_id; + object_id_type _next_id; + const direct_index< object_type, DirectBits >* _direct_by_id = nullptr; }; } } // graphene::db diff --git a/libraries/db/include/graphene/db/object.hpp b/libraries/db/include/graphene/db/object.hpp index d8d16c3317..c410e273ee 100644 --- a/libraries/db/include/graphene/db/object.hpp +++ b/libraries/db/include/graphene/db/object.hpp @@ -27,6 +27,8 @@ #include #include +#define MAX_NESTING (200) + namespace graphene { namespace db { /** @@ -98,7 +100,7 @@ namespace graphene { namespace db { { static_cast(*this) = std::move( static_cast(obj) ); } - virtual variant to_variant()const { return variant( static_cast(*this) ); } + virtual variant to_variant()const { return variant( static_cast(*this), MAX_NESTING ); } virtual vector pack()const { return fc::raw::pack( static_cast(*this) ); } virtual fc::uint128 hash()const { auto tmp = this->pack(); diff --git a/libraries/db/include/graphene/db/object_database.hpp b/libraries/db/include/graphene/db/object_database.hpp index fa2109aab3..b5e85d76a3 100644 --- a/libraries/db/include/graphene/db/object_database.hpp +++ b/libraries/db/include/graphene/db/object_database.hpp @@ -139,6 +139,12 @@ namespace graphene { namespace db { return static_cast(_index[ObjectType::space_id][ObjectType::type_id].get()); } + template + SecondaryIndexType* add_secondary_index( Args... args ) + { + return get_mutable_index_type().template add_secondary_index(args...); + } + void pop_undo(); fc::path get_data_dir()const { return _data_dir; } diff --git a/libraries/db/include/graphene/db/object_id.hpp b/libraries/db/include/graphene/db/object_id.hpp index 598ff3dee8..acada38d14 100644 --- a/libraries/db/include/graphene/db/object_id.hpp +++ b/libraries/db/include/graphene/db/object_id.hpp @@ -34,13 +34,11 @@ namespace graphene { namespace db { using fc::flat_map; using fc::variant; using fc::unsigned_int; - using fc::signed_int; struct object_id_type { object_id_type( uint8_t s, uint8_t t, uint64_t i ) { - assert( i>>48 == 0 ); FC_ASSERT( i >> 48 == 0, "instance overflow", ("instance",i) ); number = (uint64_t(s)<<56) | (uint64_t(t)<<48) | i; } @@ -169,12 +167,12 @@ struct reflector > }; - inline void to_variant( const graphene::db::object_id_type& var, fc::variant& vo ) + inline void to_variant( const graphene::db::object_id_type& var, fc::variant& vo, uint32_t max_depth = 1 ) { vo = std::string( var ); } - inline void from_variant( const fc::variant& var, graphene::db::object_id_type& vo ) + inline void from_variant( const fc::variant& var, graphene::db::object_id_type& vo, uint32_t max_depth = 1 ) { try { vo.number = 0; const auto& s = var.get_string(); @@ -191,12 +189,12 @@ struct reflector > vo.number |= (space_id << 56) | (type_id << 48); } FC_CAPTURE_AND_RETHROW( (var) ) } template - void to_variant( const graphene::db::object_id& var, fc::variant& vo ) + void to_variant( const graphene::db::object_id& var, fc::variant& vo, uint32_t max_depth = 1 ) { vo = fc::to_string(SpaceID) + "." + fc::to_string(TypeID) + "." + fc::to_string(var.instance.value); } template - void from_variant( const fc::variant& var, graphene::db::object_id& vo ) + void from_variant( const fc::variant& var, graphene::db::object_id& vo, uint32_t max_depth = 1 ) { try { const auto& s = var.get_string(); auto first_dot = s.find('.'); diff --git a/libraries/db/include/graphene/db/undo_database.hpp b/libraries/db/include/graphene/db/undo_database.hpp index 9f10486960..5234ac65aa 100644 --- a/libraries/db/include/graphene/db/undo_database.hpp +++ b/libraries/db/include/graphene/db/undo_database.hpp @@ -59,17 +59,7 @@ namespace graphene { namespace db { { mv._apply_undo = false; } - ~session() { - try { - if( _apply_undo ) _db.undo(); - } - catch ( const fc::exception& e ) - { - elog( "${e}", ("e",e.to_detail_string() ) ); - throw; // maybe crash.. - } - if( _disable_on_exit ) _db.disable(); - } + ~session(); void commit() { _apply_undo = false; _db.commit(); } void undo() { if( _apply_undo ) _db.undo(); _apply_undo = false; } void merge() { if( _apply_undo ) _db.merge(); _apply_undo = false; } @@ -129,6 +119,7 @@ namespace graphene { namespace db { std::size_t size()const { return _stack.size(); } void set_max_size(size_t new_max_size) { _max_size = new_max_size; } size_t max_size()const { return _max_size; } + uint32_t active_sessions()const { return _active_sessions; } const undo_state& head()const; diff --git a/libraries/db/object_database.cpp b/libraries/db/object_database.cpp index 29d83ae722..5b026c08cf 100644 --- a/libraries/db/object_database.cpp +++ b/libraries/db/object_database.cpp @@ -25,6 +25,7 @@ #include #include +#include #include namespace graphene { namespace db { @@ -71,14 +72,26 @@ index& object_database::get_mutable_index(uint8_t space_id, uint8_t type_id) void object_database::flush() { // ilog("Save object_database in ${d}", ("d", _data_dir)); + fc::create_directories( _data_dir / "object_database.tmp" / "lock" ); + std::vector> tasks; + tasks.reserve(200); for( uint32_t space = 0; space < _index.size(); ++space ) { - fc::create_directories( _data_dir / "object_database" / fc::to_string(space) ); + fc::create_directories( _data_dir / "object_database.tmp" / fc::to_string(space) ); const auto types = _index[space].size(); for( uint32_t type = 0; type < types; ++type ) if( _index[space][type] ) - _index[space][type]->save( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) ); + tasks.push_back( fc::do_parallel( [this,space,type] () { + _index[space][type]->save( _data_dir / "object_database.tmp" / fc::to_string(space)/fc::to_string(type) ); + } ) ); } + for( auto& task : tasks ) + task.wait(); + fc::remove_all( _data_dir / "object_database.tmp" / "lock" ); + if( fc::exists( _data_dir / "object_database" ) ) + fc::rename( _data_dir / "object_database", _data_dir / "object_database.old" ); + fc::rename( _data_dir / "object_database.tmp", _data_dir / "object_database" ); + fc::remove_all( _data_dir / "object_database.old" ); } void object_database::wipe(const fc::path& data_dir) @@ -91,12 +104,23 @@ void object_database::wipe(const fc::path& data_dir) void object_database::open(const fc::path& data_dir) { try { - ilog("Opening object database from ${d} ...", ("d", data_dir)); _data_dir = data_dir; + if( fc::exists( _data_dir / "object_database" / "lock" ) ) + { + wlog("Ignoring locked object_database"); + return; + } + std::vector> tasks; + tasks.reserve(200); + ilog("Opening object database from ${d} ...", ("d", data_dir)); for( uint32_t space = 0; space < _index.size(); ++space ) for( uint32_t type = 0; type < _index[space].size(); ++type ) if( _index[space][type] ) - _index[space][type]->open( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) ); + tasks.push_back( fc::do_parallel( [this,space,type] () { + _index[space][type]->open( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) ); + } ) ); + for( auto& task : tasks ) + task.wait(); ilog( "Done opening object database." ); } FC_CAPTURE_AND_RETHROW( (data_dir) ) } diff --git a/libraries/db/undo_database.cpp b/libraries/db/undo_database.cpp index b37b2c7dbd..3e340728e3 100644 --- a/libraries/db/undo_database.cpp +++ b/libraries/db/undo_database.cpp @@ -30,6 +30,19 @@ namespace graphene { namespace db { void undo_database::enable() { _disabled = false; } void undo_database::disable() { _disabled = true; } +undo_database::session::~session() +{ + try { + if( _apply_undo ) _db.undo(); + } + catch ( const fc::exception& e ) + { + elog( "${e}", ("e",e.to_detail_string() ) ); + std::terminate(); + } + if( _disable_on_exit ) _db.disable(); +} + undo_database::session undo_database::start_undo_session( bool force_enable ) { if( _disabled && !force_enable ) return session(*this); @@ -118,8 +131,6 @@ void undo_database::undo() _db.insert( std::move(*item.second) ); _stack.pop_back(); - if( _stack.empty() ) - _stack.emplace_back(); enable(); --_active_sessions; } FC_CAPTURE_AND_RETHROW() } @@ -127,6 +138,12 @@ void undo_database::undo() void undo_database::merge() { FC_ASSERT( _active_sessions > 0 ); + if( _active_sessions == 1 && _stack.size() == 1 ) + { + _stack.pop_back(); + --_active_sessions; + return; + } FC_ASSERT( _stack.size() >=2 ); auto& state = _stack.back(); auto& prev_state = _stack[_stack.size()-2]; diff --git a/libraries/deterministic_openssl_rand/CMakeLists.txt b/libraries/deterministic_openssl_rand/CMakeLists.txt deleted file mode 100644 index 13ef69a077..0000000000 --- a/libraries/deterministic_openssl_rand/CMakeLists.txt +++ /dev/null @@ -1,27 +0,0 @@ - -file(GLOB headers "include/graphene/utilities/*.hpp") - -set(sources deterministic_openssl_rand.cpp - ${headers}) - -add_library( deterministic_openssl_rand - ${sources} - ${HEADERS} ) -target_link_libraries( deterministic_openssl_rand fc ) -target_include_directories( deterministic_openssl_rand - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" - "${CMAKE_CURRENT_SOURCE_DIR}/../blockchain/include" - ) - -if (USE_PCH) - set_target_properties(deterministic_openssl_rand PROPERTIES COTIRE_ADD_UNITY_BUILD FALSE) - cotire(deterministic_openssl_rand) -endif(USE_PCH) - -install( TARGETS - deterministic_openssl_rand - - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) diff --git a/libraries/deterministic_openssl_rand/deterministic_openssl_rand.cpp b/libraries/deterministic_openssl_rand/deterministic_openssl_rand.cpp deleted file mode 100644 index e88e5c26c7..0000000000 --- a/libraries/deterministic_openssl_rand/deterministic_openssl_rand.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include -#include -#include -#include -#include -#include -#include -//#include - -#include -#include -#include -#include - -static bool deterministic_rand_warning_shown = false; - -static void _warn() -{ - if (!deterministic_rand_warning_shown) - { - std::cerr << "********************************************************************************\n" - << "DETERMINISTIC RANDOM NUMBER GENERATION ENABLED\n" - << "********************************************************************************\n" - << "TESTING PURPOSES ONLY -- NOT SUITABLE FOR PRODUCTION USE\n" - << "DO NOT USE PRIVATE KEYS GENERATED WITH THIS PROGRAM FOR LIVE FUNDS\n" - << "********************************************************************************\n"; - deterministic_rand_warning_shown = true; - } -#ifndef GRAPHENE_TEST_NETWORK - std::cerr << "This program looks like a production application, but is calling the deterministic RNG.\n" - << "Perhaps the compile-time options in config.hpp were misconfigured?\n"; - exit(1); -#else - return; -#endif -} - -// These don't need to do anything if you don't have anything for them to do. -static void deterministic_rand_cleanup() { _warn(); } -static void deterministic_rand_add(const void *buf, int num, double add_entropy) { _warn(); } -static int deterministic_rand_status() { _warn(); return 1; } -static void deterministic_rand_seed(const void *buf, int num) { _warn(); } - -static fc::sha512 seed; - -static int deterministic_rand_bytes(unsigned char *buf, int num) -{ - _warn(); - while (num) - { - seed = fc::sha512::hash(seed); - - int bytes_to_copy = std::min(num, sizeof(seed)); - memcpy(buf, &seed, bytes_to_copy); - num -= bytes_to_copy; - buf += bytes_to_copy; - } - return 1; -} - -// Create the table that will link OpenSSL's rand API to our functions. -static RAND_METHOD deterministic_rand_vtable = { - deterministic_rand_seed, - deterministic_rand_bytes, - deterministic_rand_cleanup, - deterministic_rand_add, - deterministic_rand_bytes, - deterministic_rand_status -}; - -namespace graphene { namespace utilities { - -void set_random_seed_for_testing(const fc::sha512& new_seed) -{ - _warn(); - RAND_set_rand_method(&deterministic_rand_vtable); - seed = new_seed; - return; -} - -} } diff --git a/libraries/egenesis/README-dev.md b/libraries/egenesis/README-dev.md new file mode 100644 index 0000000000..3548a25cd6 --- /dev/null +++ b/libraries/egenesis/README-dev.md @@ -0,0 +1,6 @@ +Use this key to produce blocks with the `genesis-dev.json` Genesis. +The following line may be added directly to `config.ini`: + +``` +private-key = ["BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] +``` diff --git a/libraries/egenesis/egenesis_brief.cpp.tmpl b/libraries/egenesis/egenesis_brief.cpp.tmpl index 8ee2ba3b72..d026c599ad 100644 --- a/libraries/egenesis/egenesis_brief.cpp.tmpl +++ b/libraries/egenesis/egenesis_brief.cpp.tmpl @@ -1,20 +1,26 @@ ${generated_file_banner} /* - * Copyright (c) 2015, Cryptonomex, Inc. - * All rights reserved. + * Copyright (c) 2015 Cryptonomex, Inc., and contributors. * - * This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and - * the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification, - * are permitted until September 8, 2015, provided that the following conditions are met: + * The MIT License * - * 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. */ #include @@ -26,7 +32,7 @@ using namespace graphene::chain; chain_id_type get_egenesis_chain_id() { - return chain_id_type( "${chain_id}$" ); + return chain_id_type( "${chain_id}" ); } void compute_egenesis_json( std::string& result ) diff --git a/libraries/egenesis/egenesis_full.cpp.tmpl b/libraries/egenesis/egenesis_full.cpp.tmpl index 7054e20f83..d89a694bc5 100644 --- a/libraries/egenesis/egenesis_full.cpp.tmpl +++ b/libraries/egenesis/egenesis_full.cpp.tmpl @@ -1,20 +1,26 @@ ${generated_file_banner} /* - * Copyright (c) 2015, Cryptonomex, Inc. - * All rights reserved. + * Copyright (c) 2015 Cryptonomex, Inc., and contributors. * - * This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and - * the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification, - * are permitted until September 8, 2015, provided that the following conditions are met: + * The MIT License * - * 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. */ #include @@ -24,26 +30,25 @@ namespace graphene { namespace egenesis { using namespace graphene::chain; -static const char genesis_json_array[${genesis_json_array_height}$][${genesis_json_array_width}$+1] = +static const char genesis_json_array[${genesis_json_array_height}][${genesis_json_array_width}+1] = { -${genesis_json_array}$ +${genesis_json_array} }; chain_id_type get_egenesis_chain_id() { - return chain_id_type( "${chain_id}$" ); + return chain_id_type( "${chain_id}" ); } void compute_egenesis_json( std::string& result ) { - result.reserve( ${genesis_json_length}$ ); + result.reserve( ${genesis_json_length} ); result.resize(0); - for( size_t i=0; i<${genesis_json_array_height}$-1; i++ ) + for( size_t i=0; i<${genesis_json_array_height}-1; i++ ) { - result.append( genesis_json_array[i], ${genesis_json_array_width}$ ); + result.append( genesis_json_array[i], ${genesis_json_array_width} ); } - result.append( std::string( genesis_json_array[ ${genesis_json_array_height}$-1 ] ) ); - return; + result.append( std::string( genesis_json_array[ ${genesis_json_array_height}-1 ] ) ); } fc::sha256 get_egenesis_json_hash() diff --git a/libraries/egenesis/embed_genesis.cpp b/libraries/egenesis/embed_genesis.cpp index 9f8eb0f272..6854e9f6d3 100644 --- a/libraries/egenesis/embed_genesis.cpp +++ b/libraries/egenesis/embed_genesis.cpp @@ -30,7 +30,6 @@ #include #include -#include // required for gcc in release mode #include #include #include @@ -166,7 +165,7 @@ struct egenesis_info else if( genesis_json.valid() ) { // If genesis not exist, generate from genesis_json - genesis = fc::json::from_string( *genesis_json ).as< genesis_state_type >(); + genesis = fc::json::from_string( *genesis_json ).as< genesis_state_type >( 20 ); } else { @@ -215,7 +214,6 @@ void load_genesis( std::cerr << "embed_genesis: Genesis ID from argument is " << chain_id_str << "\n"; info.chain_id = chain_id_str; } - return; } int main( int argc, char** argv ) diff --git a/libraries/egenesis/genesis-dev.json b/libraries/egenesis/genesis-dev.json new file mode 100644 index 0000000000..7d6f03a46c --- /dev/null +++ b/libraries/egenesis/genesis-dev.json @@ -0,0 +1,379 @@ +{ + "initial_timestamp": "2019-02-14T20:32:55", + "max_core_supply": "1000000000000000", + "initial_parameters": { + "current_fees": { + "parameters": [[ + 0,{ + "fee": 2000000, + "price_per_kbyte": 1000000 + } + ],[ + 1,{ + "fee": 500000 + } + ],[ + 2,{ + "fee": 0 + } + ],[ + 3,{ + "fee": 2000000 + } + ],[ + 4,{} + ],[ + 5,{ + "basic_fee": 500000, + "premium_fee": 200000000, + "price_per_kbyte": 100000 + } + ],[ + 6,{ + "fee": 2000000, + "price_per_kbyte": 100000 + } + ],[ + 7,{ + "fee": 300000 + } + ],[ + 8,{ + "membership_annual_fee": 200000000, + "membership_lifetime_fee": 1000000000 + } + ],[ + 9,{ + "fee": 50000000 + } + ],[ + 10,{ + "symbol3": "50000000000", + "symbol4": "30000000000", + "long_symbol": 500000000, + "price_per_kbyte": 10 + } + ],[ + 11,{ + "fee": 50000000, + "price_per_kbyte": 10 + } + ],[ + 12,{ + "fee": 50000000 + } + ],[ + 13,{ + "fee": 50000000 + } + ],[ + 14,{ + "fee": 2000000, + "price_per_kbyte": 100000 + } + ],[ + 15,{ + "fee": 2000000 + } + ],[ + 16,{ + "fee": 100000 + } + ],[ + 17,{ + "fee": 10000000 + } + ],[ + 18,{ + "fee": 50000000 + } + ],[ + 19,{ + "fee": 100000 + } + ],[ + 20,{ + "fee": 500000000 + } + ],[ + 21,{ + "fee": 2000000 + } + ],[ + 22,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 23,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 24,{ + "fee": 100000 + } + ],[ + 25,{ + "fee": 100000 + } + ],[ + 26,{ + "fee": 100000 + } + ],[ + 27,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 28,{ + "fee": 0 + } + ],[ + 29,{ + "fee": 500000000 + } + ],[ + 30,{ + "fee": 2000000 + } + ],[ + 31,{ + "fee": 100000 + } + ],[ + 32,{ + "fee": 100000 + } + ],[ + 33,{ + "fee": 2000000 + } + ],[ + 34,{ + "fee": 500000000 + } + ],[ + 35,{ + "fee": 100000, + "price_per_kbyte": 10 + } + ],[ + 36,{ + "fee": 100000 + } + ],[ + 37,{} + ],[ + 38,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 39,{ + "fee": 500000, + "price_per_output": 500000 + } + ],[ + 40,{ + "fee": 500000, + "price_per_output": 500000 + } + ],[ + 41,{ + "fee": 500000 + } + ],[ + 42,{} + ],[ + 43,{ + "fee": 2000000 + } + ],[ + 44,{} + ],[ + 45,{ + "fee": 2000000 + } + ],[ + 46,{} + ],[ + 47,{ + "fee": 2000000 + } + ],[ + 48,{ + "fee": 2000000 + } + ] + ], + "scale": 10000 + }, + "block_interval": 5, + "maintenance_interval": 86400, + "maintenance_skip_slots": 3, + "committee_proposal_review_period": 1209600, + "maximum_transaction_size": 2048, + "maximum_block_size": 2000000, + "maximum_time_until_expiration": 86400, + "maximum_proposal_lifetime": 2419200, + "maximum_asset_whitelist_authorities": 10, + "maximum_asset_feed_publishers": 10, + "maximum_witness_count": 1001, + "maximum_committee_count": 1001, + "maximum_authority_membership": 10, + "reserve_percent_of_fee": 2000, + "network_percent_of_fee": 2000, + "lifetime_referrer_percent_of_fee": 3000, + "cashback_vesting_period_seconds": 31536000, + "cashback_vesting_threshold": 10000000, + "count_non_member_votes": true, + "allow_non_member_whitelists": false, + "witness_pay_per_block": 1000000, + "worker_budget_per_day": "50000000000", + "max_predicate_opcode": 1, + "fee_liquidation_threshold": 10000000, + "accounts_per_fee_scale": 1000, + "account_fee_scale_bitshifts": 4, + "max_authority_depth": 2, + "extensions": [] + }, + "initial_accounts": [{ + "name": "init0", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init1", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init2", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init3", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init4", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init5", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init6", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init7", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init8", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init9", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init10", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "nathan", + "owner_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": false + } + ], + "initial_assets": [], + "initial_balances": [{ + "owner": "BTSFAbAx7yuxt725qSZvfwWqkdCwp9ZnUama", + "asset_symbol": "BTS", + "amount": "1000000000000000" + } + ], + "initial_vesting_balances": [], + "initial_active_witnesses": 11, + "initial_witness_candidates": [{ + "owner_name": "init0", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init1", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init2", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init3", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init4", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init5", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init6", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init7", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init8", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init9", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init10", + "block_signing_key": "BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + } + ], + "initial_committee_candidates": [{ + "owner_name": "init0" + },{ + "owner_name": "init1" + },{ + "owner_name": "init2" + },{ + "owner_name": "init3" + },{ + "owner_name": "init4" + },{ + "owner_name": "init5" + },{ + "owner_name": "init6" + },{ + "owner_name": "init7" + },{ + "owner_name": "init8" + },{ + "owner_name": "init9" + },{ + "owner_name": "init10" + } + ], + "initial_worker_candidates": [], + "immutable_parameters": { + "min_committee_member_count": 11, + "min_witness_count": 11, + "num_special_accounts": 0, + "num_special_assets": 0 + } +} diff --git a/libraries/egenesis/genesis.json b/libraries/egenesis/genesis.json new file mode 100644 index 0000000000..6c9963bd82 --- /dev/null +++ b/libraries/egenesis/genesis.json @@ -0,0 +1,380 @@ +{ + "initial_timestamp": "2018-11-01T23:07:30", + "max_core_supply": "1000000000000000", + "initial_parameters": { + "current_fees": { + "parameters": [[ + 0,{ + "fee": 2000000, + "price_per_kbyte": 1000000 + } + ],[ + 1,{ + "fee": 500000 + } + ],[ + 2,{ + "fee": 0 + } + ],[ + 3,{ + "fee": 2000000 + } + ],[ + 4,{} + ],[ + 5,{ + "basic_fee": 500000, + "premium_fee": 200000000, + "price_per_kbyte": 100000 + } + ],[ + 6,{ + "fee": 2000000, + "price_per_kbyte": 100000 + } + ],[ + 7,{ + "fee": 300000 + } + ],[ + 8,{ + "membership_annual_fee": 200000000, + "membership_lifetime_fee": 1000000000 + } + ],[ + 9,{ + "fee": 50000000 + } + ],[ + 10,{ + "symbol3": "50000000000", + "symbol4": "30000000000", + "long_symbol": 500000000, + "price_per_kbyte": 10 + } + ],[ + 11,{ + "fee": 50000000, + "price_per_kbyte": 10 + } + ],[ + 12,{ + "fee": 50000000 + } + ],[ + 13,{ + "fee": 50000000 + } + ],[ + 14,{ + "fee": 2000000, + "price_per_kbyte": 100000 + } + ],[ + 15,{ + "fee": 2000000 + } + ],[ + 16,{ + "fee": 100000 + } + ],[ + 17,{ + "fee": 10000000 + } + ],[ + 18,{ + "fee": 50000000 + } + ],[ + 19,{ + "fee": 100000 + } + ],[ + 20,{ + "fee": 500000000 + } + ],[ + 21,{ + "fee": 2000000 + } + ],[ + 22,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 23,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 24,{ + "fee": 100000 + } + ],[ + 25,{ + "fee": 100000 + } + ],[ + 26,{ + "fee": 100000 + } + ],[ + 27,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 28,{ + "fee": 0 + } + ],[ + 29,{ + "fee": 500000000 + } + ],[ + 30,{ + "fee": 2000000 + } + ],[ + 31,{ + "fee": 100000 + } + ],[ + 32,{ + "fee": 100000 + } + ],[ + 33,{ + "fee": 2000000 + } + ],[ + 34,{ + "fee": 500000000 + } + ],[ + 35,{ + "fee": 100000, + "price_per_kbyte": 10 + } + ],[ + 36,{ + "fee": 100000 + } + ],[ + 37,{} + ],[ + 38,{ + "fee": 2000000, + "price_per_kbyte": 10 + } + ],[ + 39,{ + "fee": 500000, + "price_per_output": 500000 + } + ],[ + 40,{ + "fee": 500000, + "price_per_output": 500000 + } + ],[ + 41,{ + "fee": 500000 + } + ],[ + 42,{} + ],[ + 43,{ + "fee": 2000000 + } + ],[ + 44,{} + ],[ + 45,{ + "fee": 2000000 + } + ],[ + 46,{} + ],[ + 47,{ + "fee": 2000000 + } + ],[ + 48,{ + "fee": 2000000 + } + ] + ], + "scale": 10000 + }, + "block_interval": 5, + "maintenance_interval": 86400, + "maintenance_skip_slots": 3, + "committee_proposal_review_period": 1209600, + "maximum_transaction_size": 2048, + "maximum_block_size": 2000000, + "maximum_time_until_expiration": 86400, + "maximum_proposal_lifetime": 2419200, + "maximum_asset_whitelist_authorities": 10, + "maximum_asset_feed_publishers": 10, + "maximum_witness_count": 1001, + "maximum_committee_count": 1001, + "maximum_authority_membership": 10, + "reserve_percent_of_fee": 2000, + "network_percent_of_fee": 2000, + "lifetime_referrer_percent_of_fee": 3000, + "cashback_vesting_period_seconds": 31536000, + "cashback_vesting_threshold": 10000000, + "count_non_member_votes": true, + "allow_non_member_whitelists": false, + "witness_pay_per_block": 1000000, + "worker_budget_per_day": "50000000000", + "max_predicate_opcode": 1, + "fee_liquidation_threshold": 10000000, + "accounts_per_fee_scale": 1000, + "account_fee_scale_bitshifts": 4, + "max_authority_depth": 2, + "extensions": [] + }, + "initial_accounts": [{ + "name": "init0", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init1", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init2", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init3", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init4", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init5", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init6", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init7", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init8", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init9", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "init10", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": true + },{ + "name": "nathan", + "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "is_lifetime_member": false + } + ], + "initial_assets": [], + "initial_balances": [{ + "owner": "GPHFAbAx7yuxt725qSZvfwWqkdCwp9ZnUama", + "asset_symbol": "CORE", + "amount": "1000000000000000" + } + ], + "initial_vesting_balances": [], + "initial_active_witnesses": 11, + "initial_witness_candidates": [{ + "owner_name": "init0", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init1", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init2", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init3", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init4", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init5", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init6", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init7", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init8", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init9", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + },{ + "owner_name": "init10", + "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + } + ], + "initial_committee_candidates": [{ + "owner_name": "init0" + },{ + "owner_name": "init1" + },{ + "owner_name": "init2" + },{ + "owner_name": "init3" + },{ + "owner_name": "init4" + },{ + "owner_name": "init5" + },{ + "owner_name": "init6" + },{ + "owner_name": "init7" + },{ + "owner_name": "init8" + },{ + "owner_name": "init9" + },{ + "owner_name": "init10" + } + ], + "initial_worker_candidates": [], + "initial_chain_id": "aa34045518f1469a28fa4578240d5f039afa9959c0b95ce3b39674efa691fb21", + "immutable_parameters": { + "min_committee_member_count": 11, + "min_witness_count": 11, + "num_special_accounts": 0, + "num_special_assets": 0 + } +} diff --git a/libraries/fc b/libraries/fc index 622ff58039..4e5e471827 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 622ff58039f2388433272a44fe416f5b8025589a +Subproject commit 4e5e471827e37bc5df234a4d0d2fa5f1e475ef7e diff --git a/libraries/net/CMakeLists.txt b/libraries/net/CMakeLists.txt index cabf26a67e..4110098a1f 100644 --- a/libraries/net/CMakeLists.txt +++ b/libraries/net/CMakeLists.txt @@ -32,3 +32,4 @@ install( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +install( FILES ${HEADERS} DESTINATION "include/graphene/net" ) diff --git a/libraries/net/include/graphene/net/config.hpp b/libraries/net/include/graphene/net/config.hpp index d14bd825d1..a9ca55c9fc 100644 --- a/libraries/net/include/graphene/net/config.hpp +++ b/libraries/net/include/graphene/net/config.hpp @@ -49,6 +49,9 @@ #define GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT 20 +/* uncomment next line to use testnet seed ip and port */ +//#define GRAPHENE_TEST_NETWORK 1 + #define GRAPHENE_NET_TEST_SEED_IP "104.236.44.210" // autogenerated #define GRAPHENE_NET_TEST_P2P_PORT 1700 #define GRAPHENE_NET_DEFAULT_P2P_PORT 1776 @@ -103,3 +106,7 @@ #define GRAPHENE_NET_MIN_BLOCK_IDS_TO_PREFETCH 10000 #define GRAPHENE_NET_MAX_TRX_PER_SECOND 1000 + +#define GRAPHENE_NET_MAX_NESTED_OBJECTS (250) + +#define MAXIMUM_PEERDB_SIZE 1000 diff --git a/libraries/net/include/graphene/net/core_messages.hpp b/libraries/net/include/graphene/net/core_messages.hpp index 8af0c3443c..76f74bd253 100644 --- a/libraries/net/include/graphene/net/core_messages.hpp +++ b/libraries/net/include/graphene/net/core_messages.hpp @@ -95,9 +95,9 @@ namespace graphene { namespace net { { static const core_message_type_enum type; - signed_transaction trx; + graphene::chain::precomputable_transaction trx; trx_message() {} - trx_message(signed_transaction transaction) : + trx_message(graphene::chain::signed_transaction transaction) : trx(std::move(transaction)) {} }; diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 1e01904f65..fd111ce879 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -158,9 +158,6 @@ namespace graphene { namespace net { */ virtual fc::time_point_sec get_block_time(const item_hash_t& block_id) = 0; - /** returns graphene::blockchain::now() */ - virtual fc::time_point_sec get_blockchain_now() = 0; - virtual item_hash_t get_head_block_id() const = 0; virtual uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const = 0; @@ -196,7 +193,7 @@ namespace graphene { namespace net { { public: node(const std::string& user_agent); - ~node(); + virtual ~node(); void close(); diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 7cfa316a3f..b6c24ef3de 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -226,7 +226,8 @@ namespace graphene { namespace net bool peer_needs_sync_items_from_us; bool we_need_sync_items_from_peer; fc::optional, fc::time_point> > item_ids_requested_from_peer; /// we check this to detect a timed-out request and in busy() - item_to_time_map_type sync_items_requested_from_peer; /// ids of blocks we've requested from this peer during sync. fetch from another peer if this peer disconnects + fc::time_point last_sync_item_received_time; /// the time we received the last sync item or the time we sent the last batch of sync item requests to this peer + std::set sync_items_requested_from_peer; /// ids of blocks we've requested from this peer during sync. fetch from another peer if this peer disconnects item_hash_t last_block_delegate_has_seen; /// the hash of the last block this peer has told us about that the peer knows fc::time_point_sec last_block_time_delegate_has_seen; bool inhibit_fetching_sync_blocks; @@ -269,6 +270,7 @@ namespace graphene { namespace net fc::thread* _thread; unsigned _send_message_queue_tasks_running; // temporary debugging #endif + bool _currently_handling_message; // true while we're in the middle of handling a message from the remote system private: peer_connection(peer_connection_delegate* delegate); void destroy(); @@ -299,8 +301,9 @@ namespace graphene { namespace net fc::ip::endpoint get_local_endpoint(); void set_remote_endpoint(fc::optional new_remote_endpoint); - bool busy(); - bool idle(); + bool busy() const; + bool idle() const; + bool is_currently_handling_message() const; bool is_transaction_fetching_inhibited() const; fc::sha512 get_shared_secret() const; diff --git a/libraries/net/message_oriented_connection.cpp b/libraries/net/message_oriented_connection.cpp index 5808a03881..858f615d3f 100644 --- a/libraries/net/message_oriented_connection.cpp +++ b/libraries/net/message_oriented_connection.cpp @@ -180,8 +180,8 @@ namespace graphene { namespace net { _delegate->on_message(_self, m); } /// Dedicated catches needed to distinguish from general fc::exception - catch ( const fc::canceled_exception& e ) { throw e; } - catch ( const fc::eof_exception& e ) { throw e; } + catch ( const fc::canceled_exception& e ) { throw; } + catch ( const fc::eof_exception& e ) { throw; } catch ( const fc::exception& e) { /// Here loop should be continued so exception should be just caught locally. diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 6fb212c7f3..b2fc5009b7 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -69,7 +69,6 @@ #include #include #include -#include #include #include @@ -248,520 +247,9 @@ FC_REFLECT(graphene::net::detail::node_configuration, (listen_endpoint) (wait_if_endpoint_is_busy) (private_key)); -namespace graphene { namespace net { namespace detail { - - // when requesting items from peers, we want to prioritize any blocks before - // transactions, but otherwise request items in the order we heard about them - struct prioritized_item_id - { - item_id item; - unsigned sequence_number; - fc::time_point timestamp; // the time we last heard about this item in an inventory message - - prioritized_item_id(const item_id& item, unsigned sequence_number) : - item(item), - sequence_number(sequence_number), - timestamp(fc::time_point::now()) - {} - bool operator<(const prioritized_item_id& rhs) const - { - static_assert(graphene::net::block_message_type > graphene::net::trx_message_type, - "block_message_type must be greater than trx_message_type for prioritized_item_ids to sort correctly"); - if (item.item_type != rhs.item.item_type) - return item.item_type > rhs.item.item_type; - return (signed)(rhs.sequence_number - sequence_number) > 0; - } - }; - -///////////////////////////////////////////////////////////////////////////////////////////////////////// - class statistics_gathering_node_delegate_wrapper : public node_delegate - { - private: - node_delegate *_node_delegate; - fc::thread *_thread; - - typedef boost::accumulators::accumulator_set > call_stats_accumulator; -#define NODE_DELEGATE_METHOD_NAMES (has_item) \ - (handle_message) \ - (handle_block) \ - (handle_transaction) \ - (get_block_ids) \ - (get_item) \ - (get_chain_id) \ - (get_blockchain_synopsis) \ - (sync_status) \ - (connection_count_changed) \ - (get_block_number) \ - (get_block_time) \ - (get_head_block_id) \ - (estimate_last_known_fork_from_git_revision_timestamp) \ - (error_encountered) \ - (get_current_block_interval_in_seconds) - - -#define DECLARE_ACCUMULATOR(r, data, method_name) \ - mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator)); \ - mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator)); \ - mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator)); - BOOST_PP_SEQ_FOR_EACH(DECLARE_ACCUMULATOR, unused, NODE_DELEGATE_METHOD_NAMES) -#undef DECLARE_ACCUMULATOR - - class call_statistics_collector - { - private: - fc::time_point _call_requested_time; - fc::time_point _begin_execution_time; - fc::time_point _execution_completed_time; - const char* _method_name; - call_stats_accumulator* _execution_accumulator; - call_stats_accumulator* _delay_before_accumulator; - call_stats_accumulator* _delay_after_accumulator; - public: - class actual_execution_measurement_helper - { - call_statistics_collector &_collector; - public: - actual_execution_measurement_helper(call_statistics_collector& collector) : - _collector(collector) - { - _collector.starting_execution(); - } - ~actual_execution_measurement_helper() - { - _collector.execution_completed(); - } - }; - call_statistics_collector(const char* method_name, - call_stats_accumulator* execution_accumulator, - call_stats_accumulator* delay_before_accumulator, - call_stats_accumulator* delay_after_accumulator) : - _call_requested_time(fc::time_point::now()), - _method_name(method_name), - _execution_accumulator(execution_accumulator), - _delay_before_accumulator(delay_before_accumulator), - _delay_after_accumulator(delay_after_accumulator) - {} - ~call_statistics_collector() - { - fc::time_point end_time(fc::time_point::now()); - fc::microseconds actual_execution_time(_execution_completed_time - _begin_execution_time); - fc::microseconds delay_before(_begin_execution_time - _call_requested_time); - fc::microseconds delay_after(end_time - _execution_completed_time); - fc::microseconds total_duration(actual_execution_time + delay_before + delay_after); - (*_execution_accumulator)(actual_execution_time.count()); - (*_delay_before_accumulator)(delay_before.count()); - (*_delay_after_accumulator)(delay_after.count()); - if (total_duration > fc::milliseconds(500)) - { - ilog("Call to method node_delegate::${method} took ${total_duration}us, longer than our target maximum of 500ms", - ("method", _method_name) - ("total_duration", total_duration.count())); - ilog("Actual execution took ${execution_duration}us, with a ${delegate_delay}us delay before the delegate thread started " - "executing the method, and a ${p2p_delay}us delay after it finished before the p2p thread started processing the response", - ("execution_duration", actual_execution_time) - ("delegate_delay", delay_before) - ("p2p_delay", delay_after)); - } - } - void starting_execution() - { - _begin_execution_time = fc::time_point::now(); - } - void execution_completed() - { - _execution_completed_time = fc::time_point::now(); - } - }; - public: - statistics_gathering_node_delegate_wrapper(node_delegate* delegate, fc::thread* thread_for_delegate_calls); - - fc::variant_object get_call_statistics(); - - bool has_item( const net::item_id& id ) override; - void handle_message( const message& ) override; - bool handle_block( const graphene::net::block_message& block_message, bool sync_mode, std::vector& contained_transaction_message_ids ) override; - void handle_transaction( const graphene::net::trx_message& transaction_message ) override; - std::vector get_block_ids(const std::vector& blockchain_synopsis, - uint32_t& remaining_item_count, - uint32_t limit = 2000) override; - message get_item( const item_id& id ) override; - chain_id_type get_chain_id() const override; - std::vector get_blockchain_synopsis(const item_hash_t& reference_point, - uint32_t number_of_blocks_after_reference_point) override; - void sync_status( uint32_t item_type, uint32_t item_count ) override; - void connection_count_changed( uint32_t c ) override; - uint32_t get_block_number(const item_hash_t& block_id) override; - fc::time_point_sec get_block_time(const item_hash_t& block_id) override; - fc::time_point_sec get_blockchain_now() override; - item_hash_t get_head_block_id() const override; - uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override; - void error_encountered(const std::string& message, const fc::oexception& error) override; - uint8_t get_current_block_interval_in_seconds() const override; - }; - -///////////////////////////////////////////////////////////////////////////////////////////////////////// - - class node_impl : public peer_connection_delegate - { - public: -#ifdef P2P_IN_DEDICATED_THREAD - std::shared_ptr _thread; -#endif // P2P_IN_DEDICATED_THREAD - std::unique_ptr _delegate; - fc::sha256 _chain_id; - -#define NODE_CONFIGURATION_FILENAME "node_config.json" -#define POTENTIAL_PEER_DATABASE_FILENAME "peers.json" - fc::path _node_configuration_directory; - node_configuration _node_configuration; - - /// stores the endpoint we're listening on. This will be the same as - // _node_configuration.listen_endpoint, unless that endpoint was already - // in use. - fc::ip::endpoint _actual_listening_endpoint; - - /// we determine whether we're firewalled by asking other nodes. Store the result here: - firewalled_state _is_firewalled; - /// if we're behind NAT, our listening endpoint address will appear different to the rest of the world. store it here. - fc::optional _publicly_visible_listening_endpoint; - fc::time_point _last_firewall_check_message_sent; - - /// used by the task that manages connecting to peers - // @{ - std::list _add_once_node_list; /// list of peers we want to connect to as soon as possible - - peer_database _potential_peer_db; - fc::promise::ptr _retrigger_connect_loop_promise; - bool _potential_peer_database_updated; - fc::future _p2p_network_connect_loop_done; - // @} - - /// used by the task that fetches sync items during synchronization - // @{ - fc::promise::ptr _retrigger_fetch_sync_items_loop_promise; - bool _sync_items_to_fetch_updated; - fc::future _fetch_sync_items_loop_done; - - typedef std::unordered_map active_sync_requests_map; - - active_sync_requests_map _active_sync_requests; /// list of sync blocks we've asked for from peers but have not yet received - std::list _new_received_sync_items; /// list of sync blocks we've just received but haven't yet tried to process - std::list _received_sync_items; /// list of sync blocks we've received, but can't yet process because we are still missing blocks that come earlier in the chain - // @} - - fc::future _process_backlog_of_sync_blocks_done; - bool _suspend_fetching_sync_blocks; - - /// used by the task that fetches items during normal operation - // @{ - fc::promise::ptr _retrigger_fetch_item_loop_promise; - bool _items_to_fetch_updated; - fc::future _fetch_item_loop_done; - - struct item_id_index{}; - typedef boost::multi_index_container >, - boost::multi_index::hashed_unique, - boost::multi_index::member, - std::hash > > - > items_to_fetch_set_type; - unsigned _items_to_fetch_sequence_counter; - items_to_fetch_set_type _items_to_fetch; /// list of items we know another peer has and we want - peer_connection::timestamped_items_set_type _recently_failed_items; /// list of transactions we've recently pushed and had rejected by the delegate - // @} - - /// used by the task that advertises inventory during normal operation - // @{ - fc::promise::ptr _retrigger_advertise_inventory_loop_promise; - fc::future _advertise_inventory_loop_done; - std::unordered_set _new_inventory; /// list of items we have received but not yet advertised to our peers - // @} - - fc::future _terminate_inactive_connections_loop_done; - uint8_t _recent_block_interval_in_seconds; // a cached copy of the block interval, to avoid a thread hop to the blockchain to get the current value - - std::string _user_agent_string; - /** _node_public_key is a key automatically generated when the client is first run, stored in - * node_config.json. It doesn't really have much of a purpose yet, there was just some thought - * that we might someday have a use for nodes having a private key (sent in hello messages) - */ - node_id_t _node_public_key; - /** - * _node_id is a random number generated each time the client is launched, used to prevent us - * from connecting to the same client multiple times (sent in hello messages). - * Since this was introduced after the hello_message was finalized, this is sent in the - * user_data field. - * While this shares the same underlying type as a public key, it is really just a random - * number. - */ - node_id_t _node_id; - - /** if we have less than `_desired_number_of_connections`, we will try to connect with more nodes */ - uint32_t _desired_number_of_connections; - /** if we have _maximum_number_of_connections or more, we will refuse any inbound connections */ - uint32_t _maximum_number_of_connections; - /** retry connections to peers that have failed or rejected us this often, in seconds */ - uint32_t _peer_connection_retry_timeout; - /** how many seconds of inactivity are permitted before disconnecting a peer */ - uint32_t _peer_inactivity_timeout; - - fc::tcp_server _tcp_server; - fc::future _accept_loop_complete; - - /** Stores all connections which have not yet finished key exchange or are still sending initial handshaking messages - * back and forth (not yet ready to initiate syncing) */ - std::unordered_set _handshaking_connections; - /** stores fully established connections we're either syncing with or in normal operation with */ - std::unordered_set _active_connections; - /** stores connections we've closed (sent closing message, not actually closed), but are still waiting for the remote end to close before we delete them */ - std::unordered_set _closing_connections; - /** stores connections we've closed, but are still waiting for the OS to notify us that the socket is really closed */ - std::unordered_set _terminating_connections; - - boost::circular_buffer _most_recent_blocks_accepted; // the /n/ most recent blocks we've accepted (currently tuned to the max number of connections) - - uint32_t _sync_item_type; - uint32_t _total_number_of_unfetched_items; /// the number of items we still need to fetch while syncing - std::vector _hard_fork_block_numbers; /// list of all block numbers where there are hard forks - - blockchain_tied_message_cache _message_cache; /// cache message we have received and might be required to provide to other peers via inventory requests - - fc::rate_limiting_group _rate_limiter; - - uint32_t _last_reported_number_of_connections; // number of connections last reported to the client (to avoid sending duplicate messages) - - bool _peer_advertising_disabled; - - fc::future _fetch_updated_peer_lists_loop_done; - - boost::circular_buffer _average_network_read_speed_seconds; - boost::circular_buffer _average_network_write_speed_seconds; - boost::circular_buffer _average_network_read_speed_minutes; - boost::circular_buffer _average_network_write_speed_minutes; - boost::circular_buffer _average_network_read_speed_hours; - boost::circular_buffer _average_network_write_speed_hours; - unsigned _average_network_usage_second_counter; - unsigned _average_network_usage_minute_counter; - - fc::time_point_sec _bandwidth_monitor_last_update_time; - fc::future _bandwidth_monitor_loop_done; - - fc::future _dump_node_status_task_done; - - /* We have two alternate paths through the schedule_peer_for_deletion code -- one that - * uses a mutex to prevent one fiber from adding items to the queue while another is deleting - * items from it, and one that doesn't. The one that doesn't is simpler and more efficient - * code, but we're keeping around the version that uses the mutex because it crashes, and - * this crash probably indicates a bug in our underlying threading code that needs - * fixing. To produce the bug, define USE_PEERS_TO_DELETE_MUTEX and then connect up - * to the network and set your desired/max connection counts high - */ -//#define USE_PEERS_TO_DELETE_MUTEX 1 -#ifdef USE_PEERS_TO_DELETE_MUTEX - fc::mutex _peers_to_delete_mutex; -#endif - std::list _peers_to_delete; - fc::future _delayed_peer_deletion_task_done; - -#ifdef ENABLE_P2P_DEBUGGING_API - std::set _allowed_peers; -#endif // ENABLE_P2P_DEBUGGING_API - - bool _node_is_shutting_down; // set to true when we begin our destructor, used to prevent us from starting new tasks while we're shutting down - - unsigned _maximum_number_of_blocks_to_handle_at_one_time; - unsigned _maximum_number_of_sync_blocks_to_prefetch; - unsigned _maximum_blocks_per_peer_during_syncing; - - std::list > _handle_message_calls_in_progress; - - node_impl(const std::string& user_agent); - virtual ~node_impl(); - - void save_node_configuration(); - - void p2p_network_connect_loop(); - void trigger_p2p_network_connect_loop(); - - bool have_already_received_sync_item( const item_hash_t& item_hash ); - void request_sync_item_from_peer( const peer_connection_ptr& peer, const item_hash_t& item_to_request ); - void request_sync_items_from_peer( const peer_connection_ptr& peer, const std::vector& items_to_request ); - void fetch_sync_items_loop(); - void trigger_fetch_sync_items_loop(); - - bool is_item_in_any_peers_inventory(const item_id& item) const; - void fetch_items_loop(); - void trigger_fetch_items_loop(); - - void advertise_inventory_loop(); - void trigger_advertise_inventory_loop(); - - void terminate_inactive_connections_loop(); +#include "node_impl.hxx" - void fetch_updated_peer_lists_loop(); - void update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second); - void bandwidth_monitor_loop(); - void dump_node_status_task(); - - bool is_accepting_new_connections(); - bool is_wanting_new_connections(); - uint32_t get_number_of_connections(); - peer_connection_ptr get_peer_by_node_id(const node_id_t& id); - - bool is_already_connected_to_id(const node_id_t& node_id); - bool merge_address_info_with_potential_peer_database( const std::vector addresses ); - void display_current_connections(); - uint32_t calculate_unsynced_block_count_from_all_peers(); - std::vector create_blockchain_synopsis_for_peer( const peer_connection* peer ); - void fetch_next_batch_of_item_ids_from_peer( peer_connection* peer, bool reset_fork_tracking_data_for_peer = false ); - - fc::variant_object generate_hello_user_data(); - void parse_hello_user_data_for_peer( peer_connection* originating_peer, const fc::variant_object& user_data ); - - void on_message( peer_connection* originating_peer, - const message& received_message ) override; - - void on_hello_message( peer_connection* originating_peer, - const hello_message& hello_message_received ); - - void on_connection_accepted_message( peer_connection* originating_peer, - const connection_accepted_message& connection_accepted_message_received ); - - void on_connection_rejected_message( peer_connection* originating_peer, - const connection_rejected_message& connection_rejected_message_received ); - - void on_address_request_message( peer_connection* originating_peer, - const address_request_message& address_request_message_received ); - - void on_address_message( peer_connection* originating_peer, - const address_message& address_message_received ); - - void on_fetch_blockchain_item_ids_message( peer_connection* originating_peer, - const fetch_blockchain_item_ids_message& fetch_blockchain_item_ids_message_received ); - - void on_blockchain_item_ids_inventory_message( peer_connection* originating_peer, - const blockchain_item_ids_inventory_message& blockchain_item_ids_inventory_message_received ); - - void on_fetch_items_message( peer_connection* originating_peer, - const fetch_items_message& fetch_items_message_received ); - - void on_item_not_available_message( peer_connection* originating_peer, - const item_not_available_message& item_not_available_message_received ); - - void on_item_ids_inventory_message( peer_connection* originating_peer, - const item_ids_inventory_message& item_ids_inventory_message_received ); - - void on_closing_connection_message( peer_connection* originating_peer, - const closing_connection_message& closing_connection_message_received ); - - void on_current_time_request_message( peer_connection* originating_peer, - const current_time_request_message& current_time_request_message_received ); - - void on_current_time_reply_message( peer_connection* originating_peer, - const current_time_reply_message& current_time_reply_message_received ); - - void forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state); - - void on_check_firewall_message(peer_connection* originating_peer, - const check_firewall_message& check_firewall_message_received); - - void on_check_firewall_reply_message(peer_connection* originating_peer, - const check_firewall_reply_message& check_firewall_reply_message_received); - - void on_get_current_connections_request_message(peer_connection* originating_peer, - const get_current_connections_request_message& get_current_connections_request_message_received); - - void on_get_current_connections_reply_message(peer_connection* originating_peer, - const get_current_connections_reply_message& get_current_connections_reply_message_received); - - void on_connection_closed(peer_connection* originating_peer) override; - - void send_sync_block_to_node_delegate(const graphene::net::block_message& block_message_to_send); - void process_backlog_of_sync_blocks(); - void trigger_process_backlog_of_sync_blocks(); - void process_block_during_sync(peer_connection* originating_peer, const graphene::net::block_message& block_message, const message_hash_type& message_hash); - void process_block_during_normal_operation(peer_connection* originating_peer, const graphene::net::block_message& block_message, const message_hash_type& message_hash); - void process_block_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash); - - void process_ordinary_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash); - - void start_synchronizing(); - void start_synchronizing_with_peer(const peer_connection_ptr& peer); - - void new_peer_just_added(const peer_connection_ptr& peer); /// called after a peer finishes handshaking, kicks off syncing - - void close(); - - void accept_connection_task(peer_connection_ptr new_peer); - void accept_loop(); - void send_hello_message(const peer_connection_ptr& peer); - void connect_to_task(peer_connection_ptr new_peer, const fc::ip::endpoint& remote_endpoint); - bool is_connection_to_endpoint_in_progress(const fc::ip::endpoint& remote_endpoint); - - void move_peer_to_active_list(const peer_connection_ptr& peer); - void move_peer_to_closing_list(const peer_connection_ptr& peer); - void move_peer_to_terminating_list(const peer_connection_ptr& peer); - - peer_connection_ptr get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ); - - void dump_node_status(); - - void delayed_peer_deletion_task(); - void schedule_peer_for_deletion(const peer_connection_ptr& peer_to_delete); - - void disconnect_from_peer( peer_connection* originating_peer, - const std::string& reason_for_disconnect, - bool caused_by_error = false, - const fc::oexception& additional_data = fc::oexception() ); - - // methods implementing node's public interface - void set_node_delegate(node_delegate* del, fc::thread* thread_for_delegate_calls); - void load_configuration( const fc::path& configuration_directory ); - void listen_to_p2p_network(); - void connect_to_p2p_network(); - void add_node( const fc::ip::endpoint& ep ); - void initiate_connect_to(const peer_connection_ptr& peer); - void connect_to_endpoint(const fc::ip::endpoint& ep); - void listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); - void accept_incoming_connections(bool accept); - void listen_on_port( uint16_t port, bool wait_if_not_available ); - - fc::ip::endpoint get_actual_listening_endpoint() const; - std::vector get_connected_peers() const; - uint32_t get_connection_count() const; - - void broadcast(const message& item_to_broadcast, const message_propagation_data& propagation_data); - void broadcast(const message& item_to_broadcast); - void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers); - bool is_connected() const; - std::vector get_potential_peers() const; - void set_advanced_node_parameters( const fc::variant_object& params ); - - fc::variant_object get_advanced_node_parameters(); - message_propagation_data get_transaction_propagation_data( const graphene::net::transaction_id_type& transaction_id ); - message_propagation_data get_block_propagation_data( const graphene::net::block_id_type& block_id ); - - node_id_t get_node_id() const; - void set_allowed_peers( const std::vector& allowed_peers ); - void clear_peer_database(); - void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second ); - void disable_peer_advertising(); - fc::variant_object get_call_statistics() const; - message get_message_for_item(const item_id& item) override; - - fc::variant_object network_get_info() const; - fc::variant_object network_get_usage_stats() const; - - bool is_hard_fork_block(uint32_t block_number) const; - uint32_t get_next_known_hard_fork_block_number(uint32_t block_number) const; - }; // end class node_impl - - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +namespace graphene { namespace net { namespace detail { void node_impl_deleter::operator()(node_impl* impl_to_delete) { @@ -828,7 +316,7 @@ namespace graphene { namespace net { namespace detail { _maximum_blocks_per_peer_during_syncing(GRAPHENE_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING) { _rate_limiter.set_actual_rate_time_constant(fc::seconds(2)); - fc::rand_pseudo_bytes(&_node_id.data[0], (int)_node_id.size()); + fc::rand_bytes(&_node_id.data[0], (int)_node_id.size()); } node_impl::~node_impl() @@ -975,10 +463,7 @@ namespace graphene { namespace net { namespace detail { { throw; } - catch (const fc::exception& e) - { - elog("${e}", ("e", e)); - } + FC_CAPTURE_AND_LOG( (0) ) }// while(!canceled) } @@ -1006,8 +491,8 @@ namespace graphene { namespace net { namespace detail { dlog( "requesting item ${item_hash} from peer ${endpoint}", ("item_hash", item_to_request )("endpoint", peer->get_remote_endpoint() ) ); item_id item_id_to_request( graphene::net::block_message_type, item_to_request ); _active_sync_requests.insert( active_sync_requests_map::value_type(item_to_request, fc::time_point::now() ) ); - peer->sync_items_requested_from_peer.insert( peer_connection::item_to_time_map_type::value_type(item_id_to_request, fc::time_point::now() ) ); - std::vector items_to_fetch; + peer->last_sync_item_received_time = fc::time_point::now(); + peer->sync_items_requested_from_peer.insert(item_to_request); peer->send_message( fetch_items_message(item_id_to_request.item_type, std::vector{item_id_to_request.item_hash} ) ); } @@ -1019,8 +504,8 @@ namespace graphene { namespace net { namespace detail { for (const item_hash_t& item_to_request : items_to_request) { _active_sync_requests.insert( active_sync_requests_map::value_type(item_to_request, fc::time_point::now() ) ); - item_id item_id_to_request( graphene::net::block_message_type, item_to_request ); - peer->sync_items_requested_from_peer.insert( peer_connection::item_to_time_map_type::value_type(item_id_to_request, fc::time_point::now() ) ); + peer->last_sync_item_received_time = fc::time_point::now(); + peer->sync_items_requested_from_peer.insert(item_to_request); } peer->send_message(fetch_items_message(graphene::net::block_message_type, items_to_request)); } @@ -1253,7 +738,7 @@ namespace graphene { namespace net { namespace detail { for (const peer_connection_ptr& peer : _active_connections) { // only advertise to peers who are in sync with us - wdump((peer->peer_needs_sync_items_from_us)); + idump((peer->peer_needs_sync_items_from_us)); if( !peer->peer_needs_sync_items_from_us ) { std::map > items_to_advertise_by_type; @@ -1261,16 +746,14 @@ namespace graphene { namespace net { namespace detail { // or anything it has advertised to us // group the items we need to send by type, because we'll need to send one inventory message per type unsigned total_items_to_send_to_this_peer = 0; - wdump((inventory_to_advertise)); + idump((inventory_to_advertise)); for (const item_id& item_to_advertise : inventory_to_advertise) { - if (peer->inventory_advertised_to_peer.find(item_to_advertise) != peer->inventory_advertised_to_peer.end() ) - wdump((*peer->inventory_advertised_to_peer.find(item_to_advertise))); - if (peer->inventory_peer_advertised_to_us.find(item_to_advertise) != peer->inventory_peer_advertised_to_us.end() ) - wdump((*peer->inventory_peer_advertised_to_us.find(item_to_advertise))); + auto adv_to_peer = peer->inventory_advertised_to_peer.find(item_to_advertise); + auto adv_to_us = peer->inventory_peer_advertised_to_us.find(item_to_advertise); - if (peer->inventory_advertised_to_peer.find(item_to_advertise) == peer->inventory_advertised_to_peer.end() && - peer->inventory_peer_advertised_to_us.find(item_to_advertise) == peer->inventory_peer_advertised_to_us.end()) + if (adv_to_peer == peer->inventory_advertised_to_peer.end() && + adv_to_us == peer->inventory_peer_advertised_to_us.end()) { items_to_advertise_by_type[item_to_advertise.item_type].push_back(item_to_advertise.item_hash); peer->inventory_advertised_to_peer.insert(peer_connection::timestamped_item_id(item_to_advertise, fc::time_point::now())); @@ -1279,6 +762,13 @@ namespace graphene { namespace net { namespace detail { testnetlog("advertising transaction ${id} to peer ${endpoint}", ("id", item_to_advertise.item_hash)("endpoint", peer->get_remote_endpoint())); dlog("advertising item ${id} to peer ${endpoint}", ("id", item_to_advertise.item_hash)("endpoint", peer->get_remote_endpoint())); } + else + { + if (adv_to_peer != peer->inventory_advertised_to_peer.end() ) + idump( (*adv_to_peer) ); + if (adv_to_us != peer->inventory_peer_advertised_to_us.end() ) + idump( (*adv_to_us) ); + } } dlog("advertising ${count} new item(s) of ${types} type(s) to peer ${endpoint}", ("count", total_items_to_send_to_this_peer) @@ -1386,14 +876,13 @@ namespace graphene { namespace net { namespace detail { else { bool disconnect_due_to_request_timeout = false; - for (const peer_connection::item_to_time_map_type::value_type& item_and_time : active_peer->sync_items_requested_from_peer) - if (item_and_time.second < active_ignored_request_threshold) - { - wlog("Disconnecting peer ${peer} because they didn't respond to my request for sync item ${id}", - ("peer", active_peer->get_remote_endpoint())("id", item_and_time.first.item_hash)); - disconnect_due_to_request_timeout = true; - break; - } + if (!active_peer->sync_items_requested_from_peer.empty() && + active_peer->last_sync_item_received_time < active_ignored_request_threshold) + { + wlog("Disconnecting peer ${peer} because they haven't made any progress on my remaining ${count} sync item requests", + ("peer", active_peer->get_remote_endpoint())("count", active_peer->sync_items_requested_from_peer.size())); + disconnect_due_to_request_timeout = true; + } if (!disconnect_due_to_request_timeout && active_peer->item_ids_requested_from_peer && active_peer->item_ids_requested_from_peer->get<1>() < active_ignored_request_threshold) @@ -1425,6 +914,19 @@ namespace graphene { namespace net { namespace detail { wlog( "Sending a keepalive message to peer ${peer} who hasn't sent us any messages in the last ${timeout} seconds", ( "peer", active_peer->get_remote_endpoint() )("timeout", active_send_keepalive_timeout ) ); peers_to_send_keep_alive.push_back(active_peer); + } + else if (active_peer->we_need_sync_items_from_peer && + !active_peer->is_currently_handling_message() && + !active_peer->item_ids_requested_from_peer && + active_peer->ids_of_items_to_get.empty()) + { + // This is a state we should never get into in the first place, but if we do, we should disconnect the peer + // to re-establish the connection. + fc_wlog(fc::logger::get("sync"), "Disconnecting peer ${peer} because we think we need blocks from them but sync has stalled.", + ("peer", active_peer->get_remote_endpoint())); + wlog("Disconnecting peer ${peer} because we think we need blocks from them but sync has stalled.", + ("peer", active_peer->get_remote_endpoint())); + peers_to_disconnect_forcibly.push_back(active_peer); } } } @@ -1844,10 +1346,10 @@ namespace graphene { namespace net { namespace detail { #endif user_data["bitness"] = sizeof(void*) * 8; - user_data["node_id"] = _node_id; + user_data["node_id"] = fc::variant( _node_id, 1 ); item_hash_t head_block_id = _delegate->get_head_block_id(); - user_data["last_known_block_hash"] = head_block_id; + user_data["last_known_block_hash"] = fc::variant( head_block_id, 1 ); user_data["last_known_block_number"] = _delegate->get_block_number(head_block_id); user_data["last_known_block_time"] = _delegate->get_block_time(head_block_id); @@ -1863,19 +1365,19 @@ namespace graphene { namespace net { namespace detail { if (user_data.contains("graphene_git_revision_sha")) originating_peer->graphene_git_revision_sha = user_data["graphene_git_revision_sha"].as_string(); if (user_data.contains("graphene_git_revision_unix_timestamp")) - originating_peer->graphene_git_revision_unix_timestamp = fc::time_point_sec(user_data["graphene_git_revision_unix_timestamp"].as()); + originating_peer->graphene_git_revision_unix_timestamp = fc::time_point_sec(user_data["graphene_git_revision_unix_timestamp"].as(1)); if (user_data.contains("fc_git_revision_sha")) originating_peer->fc_git_revision_sha = user_data["fc_git_revision_sha"].as_string(); if (user_data.contains("fc_git_revision_unix_timestamp")) - originating_peer->fc_git_revision_unix_timestamp = fc::time_point_sec(user_data["fc_git_revision_unix_timestamp"].as()); + originating_peer->fc_git_revision_unix_timestamp = fc::time_point_sec(user_data["fc_git_revision_unix_timestamp"].as(1)); if (user_data.contains("platform")) originating_peer->platform = user_data["platform"].as_string(); if (user_data.contains("bitness")) - originating_peer->bitness = user_data["bitness"].as(); + originating_peer->bitness = user_data["bitness"].as(1); if (user_data.contains("node_id")) - originating_peer->node_id = user_data["node_id"].as(); + originating_peer->node_id = user_data["node_id"].as(1); if (user_data.contains("last_known_fork_block_number")) - originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as(); + originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as(1); } void node_impl::on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received ) @@ -1885,7 +1387,7 @@ namespace graphene { namespace net { namespace detail { node_id_t peer_node_id = hello_message_received.node_public_key; try { - peer_node_id = hello_message_received.user_data["node_id"].as(); + peer_node_id = hello_message_received.user_data["node_id"].as(1); } catch (const fc::exception&) { @@ -2264,7 +1766,7 @@ namespace graphene { namespace net { namespace detail { bool disconnect_from_inhibited_peer = false; // if our client doesn't have any items after the item the peer requested, it will send back // a list containing the last item the peer requested - wdump((reply_message)(fetch_blockchain_item_ids_message_received.blockchain_synopsis)); + idump((reply_message)(fetch_blockchain_item_ids_message_received.blockchain_synopsis)); if( reply_message.item_hashes_available.empty() ) originating_peer->peer_needs_sync_items_from_us = false; /* I have no items in my blockchain */ else if( !fetch_blockchain_item_ids_message_received.blockchain_synopsis.empty() && @@ -2360,7 +1862,6 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); item_hash_t reference_point = peer->last_block_delegate_has_seen; - uint32_t reference_point_block_num = _delegate->get_block_number(peer->last_block_delegate_has_seen); // when we call _delegate->get_blockchain_synopsis(), we may yield and there's a // chance this peer's state will change before we get control back. Save off @@ -2520,180 +2021,203 @@ namespace graphene { namespace net { namespace detail { } originating_peer->item_ids_requested_from_peer.reset(); - dlog( "sync: received a list of ${count} available items from ${peer_endpoint}", - ( "count", blockchain_item_ids_inventory_message_received.item_hashes_available.size() ) - ( "peer_endpoint", originating_peer->get_remote_endpoint() ) ); - //for( const item_hash_t& item_hash : blockchain_item_ids_inventory_message_received.item_hashes_available ) - //{ - // dlog( "sync: ${hash}", ("hash", item_hash ) ); - //} - - // if the peer doesn't have any items after the one we asked for - if( blockchain_item_ids_inventory_message_received.total_remaining_item_count == 0 && - ( blockchain_item_ids_inventory_message_received.item_hashes_available.empty() || // there are no items in the peer's blockchain. this should only happen if our blockchain was empty when we requested, might want to verify that. - ( blockchain_item_ids_inventory_message_received.item_hashes_available.size() == 1 && - _delegate->has_item( item_id(blockchain_item_ids_inventory_message_received.item_type, - blockchain_item_ids_inventory_message_received.item_hashes_available.front() ) ) ) ) && // we've already seen the last item in the peer's blockchain - originating_peer->ids_of_items_to_get.empty() && - originating_peer->number_of_unfetched_item_ids == 0 ) // <-- is the last check necessary? + // if exceptions are throw after clearing the item_ids_requested_from_peer (above), + // it could leave our sync in a stalled state. Wrap a try/catch around the rest + // of the function so we can log if this ever happens. + try { - dlog( "sync: peer said we're up-to-date, entering normal operation with this peer" ); - originating_peer->we_need_sync_items_from_peer = false; + dlog( "sync: received a list of ${count} available items from ${peer_endpoint}", + ( "count", blockchain_item_ids_inventory_message_received.item_hashes_available.size() ) + ( "peer_endpoint", originating_peer->get_remote_endpoint() ) ); + //for( const item_hash_t& item_hash : blockchain_item_ids_inventory_message_received.item_hashes_available ) + //{ + // dlog( "sync: ${hash}", ("hash", item_hash ) ); + //} + + // if the peer doesn't have any items after the one we asked for + if( blockchain_item_ids_inventory_message_received.total_remaining_item_count == 0 && + ( blockchain_item_ids_inventory_message_received.item_hashes_available.empty() || // there are no items in the peer's blockchain. this should only happen if our blockchain was empty when we requested, might want to verify that. + ( blockchain_item_ids_inventory_message_received.item_hashes_available.size() == 1 && + _delegate->has_item( item_id(blockchain_item_ids_inventory_message_received.item_type, + blockchain_item_ids_inventory_message_received.item_hashes_available.front() ) ) ) ) && // we've already seen the last item in the peer's blockchain + originating_peer->ids_of_items_to_get.empty() && + originating_peer->number_of_unfetched_item_ids == 0 ) // <-- is the last check necessary? + { + dlog( "sync: peer said we're up-to-date, entering normal operation with this peer" ); + originating_peer->we_need_sync_items_from_peer = false; - uint32_t new_number_of_unfetched_items = calculate_unsynced_block_count_from_all_peers(); - _total_number_of_unfetched_items = new_number_of_unfetched_items; - if( new_number_of_unfetched_items == 0 ) - _delegate->sync_status( blockchain_item_ids_inventory_message_received.item_type, 0 ); + uint32_t new_number_of_unfetched_items = calculate_unsynced_block_count_from_all_peers(); + _total_number_of_unfetched_items = new_number_of_unfetched_items; + if( new_number_of_unfetched_items == 0 ) + _delegate->sync_status( blockchain_item_ids_inventory_message_received.item_type, 0 ); - return; - } + return; + } - std::deque item_hashes_received( blockchain_item_ids_inventory_message_received.item_hashes_available.begin(), - blockchain_item_ids_inventory_message_received.item_hashes_available.end() ); - originating_peer->number_of_unfetched_item_ids = blockchain_item_ids_inventory_message_received.total_remaining_item_count; - // flush any items this peer sent us that we've already received and processed from another peer - if (!item_hashes_received.empty() && - originating_peer->ids_of_items_to_get.empty()) - { - bool is_first_item_for_other_peer = false; - for (const peer_connection_ptr& peer : _active_connections) - if (peer != originating_peer->shared_from_this() && - !peer->ids_of_items_to_get.empty() && - peer->ids_of_items_to_get.front() == blockchain_item_ids_inventory_message_received.item_hashes_available.front()) + std::deque item_hashes_received( blockchain_item_ids_inventory_message_received.item_hashes_available.begin(), + blockchain_item_ids_inventory_message_received.item_hashes_available.end() ); + originating_peer->number_of_unfetched_item_ids = blockchain_item_ids_inventory_message_received.total_remaining_item_count; + // flush any items this peer sent us that we've already received and processed from another peer + if (!item_hashes_received.empty() && + originating_peer->ids_of_items_to_get.empty()) + { + bool is_first_item_for_other_peer = false; + for (const peer_connection_ptr& peer : _active_connections) + if (peer != originating_peer->shared_from_this() && + !peer->ids_of_items_to_get.empty() && + peer->ids_of_items_to_get.front() == blockchain_item_ids_inventory_message_received.item_hashes_available.front()) + { + dlog("The item ${newitem} is the first item for peer ${peer}", + ("newitem", blockchain_item_ids_inventory_message_received.item_hashes_available.front()) + ("peer", peer->get_remote_endpoint())); + is_first_item_for_other_peer = true; + break; + } + dlog("is_first_item_for_other_peer: ${is_first}. item_hashes_received.size() = ${size}", + ("is_first", is_first_item_for_other_peer)("size", item_hashes_received.size())); + if (!is_first_item_for_other_peer) { - dlog("The item ${newitem} is the first item for peer ${peer}", - ("newitem", blockchain_item_ids_inventory_message_received.item_hashes_available.front()) - ("peer", peer->get_remote_endpoint())); - is_first_item_for_other_peer = true; - break; + while (!item_hashes_received.empty() && + _delegate->has_item(item_id(blockchain_item_ids_inventory_message_received.item_type, + item_hashes_received.front()))) + { + assert(item_hashes_received.front() != item_hash_t()); + originating_peer->last_block_delegate_has_seen = item_hashes_received.front(); + originating_peer->last_block_time_delegate_has_seen = _delegate->get_block_time(item_hashes_received.front()); + dlog("popping item because delegate has already seen it. peer ${peer}'s last block the delegate has seen is now ${block_id} (actual block #${actual_block_num})", + ("peer", originating_peer->get_remote_endpoint()) + ("block_id", originating_peer->last_block_delegate_has_seen) + ("actual_block_num", _delegate->get_block_number(item_hashes_received.front()))); + + item_hashes_received.pop_front(); + } + dlog("after removing all items we have already seen, item_hashes_received.size() = ${size}", ("size", item_hashes_received.size())); } - dlog("is_first_item_for_other_peer: ${is_first}. item_hashes_received.size() = ${size}", - ("is_first", is_first_item_for_other_peer)("size", item_hashes_received.size())); - if (!is_first_item_for_other_peer) + } + else if (!item_hashes_received.empty()) { - while (!item_hashes_received.empty() && - _delegate->has_item(item_id(blockchain_item_ids_inventory_message_received.item_type, - item_hashes_received.front()))) + // we received a list of items and we already have a list of items to fetch from this peer. + // In the normal case, this list will immediately follow the existing list, meaning the + // last hash of our existing list will match the first hash of the new list. + + // In the much less likely case, we've received a partial list of items from the peer, then + // the peer switched forks before sending us the remaining list. In this case, the first + // hash in the new list may not be the last hash in the existing list (it may be earlier, or + // it may not exist at all. + + // In either case, pop items off the back of our existing list until we find our first + // item, then append our list. + while (!originating_peer->ids_of_items_to_get.empty()) + { + if (item_hashes_received.front() != originating_peer->ids_of_items_to_get.back()) + originating_peer->ids_of_items_to_get.pop_back(); + else + break; + } + if (originating_peer->ids_of_items_to_get.empty()) { - assert(item_hashes_received.front() != item_hash_t()); + // this happens when the peer has switched forks between the last inventory message and + // this one, and there weren't any unfetched items in common + // We don't know where in the blockchain the new front() actually falls, all we can + // expect is that it is a block that we knew about because it should be one of the + // blocks we sent in the initial synopsis. + assert(_delegate->has_item(item_id(_sync_item_type, item_hashes_received.front()))); originating_peer->last_block_delegate_has_seen = item_hashes_received.front(); originating_peer->last_block_time_delegate_has_seen = _delegate->get_block_time(item_hashes_received.front()); - dlog("popping item because delegate has already seen it. peer ${peer}'s last block the delegate has seen is now ${block_id} (actual block #${actual_block_num})", - ("peer", originating_peer->get_remote_endpoint()) - ("block_id", originating_peer->last_block_delegate_has_seen) - ("actual_block_num", _delegate->get_block_number(item_hashes_received.front()))); - item_hashes_received.pop_front(); } - dlog("after removing all items we have already seen, item_hashes_received.size() = ${size}", ("size", item_hashes_received.size())); + else + { + // the common simple case: the new list extends the old. pop off the duplicate element + originating_peer->ids_of_items_to_get.pop_back(); + } } - } - else if (!item_hashes_received.empty()) - { - // we received a list of items and we already have a list of items to fetch from this peer. - // In the normal case, this list will immediately follow the existing list, meaning the - // last hash of our existing list will match the first hash of the new list. - - // In the much less likely case, we've received a partial list of items from the peer, then - // the peer switched forks before sending us the remaining list. In this case, the first - // hash in the new list may not be the last hash in the existing list (it may be earlier, or - // it may not exist at all. - - // In either case, pop items off the back of our existing list until we find our first - // item, then append our list. - while (!originating_peer->ids_of_items_to_get.empty()) + + if (!item_hashes_received.empty() && !originating_peer->ids_of_items_to_get.empty()) + assert(item_hashes_received.front() != originating_peer->ids_of_items_to_get.back()); + + // at any given time, there's a maximum number of blocks that can possibly be out there + // [(now - genesis time) / block interval]. If they offer us more blocks than that, + // they must be an attacker or have a buggy client. + fc::time_point_sec minimum_time_of_last_offered_block = + originating_peer->last_block_time_delegate_has_seen + // timestamp of the block immediately before the first unfetched block + originating_peer->number_of_unfetched_item_ids * GRAPHENE_MIN_BLOCK_INTERVAL; + fc::time_point_sec now = fc::time_point::now(); + if (minimum_time_of_last_offered_block > now + GRAPHENE_NET_FUTURE_SYNC_BLOCKS_GRACE_PERIOD_SEC) { - if (item_hashes_received.front() != originating_peer->ids_of_items_to_get.back()) - originating_peer->ids_of_items_to_get.pop_back(); - else - break; + wlog("Disconnecting from peer ${peer} who offered us an implausible number of blocks, their last block would be in the future (${timestamp})", + ("peer", originating_peer->get_remote_endpoint()) + ("timestamp", minimum_time_of_last_offered_block)); + fc::exception error_for_peer(FC_LOG_MESSAGE(error, "You offered me a list of more sync blocks than could possibly exist. Total blocks offered: ${blocks}, Minimum time of the last block you offered: ${minimum_time_of_last_offered_block}, Now: ${now}", + ("blocks", originating_peer->number_of_unfetched_item_ids) + ("minimum_time_of_last_offered_block", minimum_time_of_last_offered_block) + ("now", now))); + disconnect_from_peer(originating_peer, + "You offered me a list of more sync blocks than could possibly exist", + true, error_for_peer); + return; } - if (originating_peer->ids_of_items_to_get.empty()) + + // append the remaining items to the peer's list + boost::push_back(originating_peer->ids_of_items_to_get, item_hashes_received); + + uint32_t new_number_of_unfetched_items = calculate_unsynced_block_count_from_all_peers(); + if (new_number_of_unfetched_items != _total_number_of_unfetched_items) + _delegate->sync_status(blockchain_item_ids_inventory_message_received.item_type, + new_number_of_unfetched_items); + _total_number_of_unfetched_items = new_number_of_unfetched_items; + + if (blockchain_item_ids_inventory_message_received.total_remaining_item_count != 0) { - // this happens when the peer has switched forks between the last inventory message and - // this one, and there weren't any unfetched items in common - // We don't know where in the blockchain the new front() actually falls, all we can - // expect is that it is a block that we knew about because it should be one of the - // blocks we sent in the initial synopsis. - assert(_delegate->has_item(item_id(_sync_item_type, item_hashes_received.front()))); - originating_peer->last_block_delegate_has_seen = item_hashes_received.front(); - originating_peer->last_block_time_delegate_has_seen = _delegate->get_block_time(item_hashes_received.front()); - item_hashes_received.pop_front(); + // the peer hasn't sent us all the items it knows about. + if (originating_peer->ids_of_items_to_get.size() > GRAPHENE_NET_MIN_BLOCK_IDS_TO_PREFETCH) + { + // we have a good number of item ids from this peer, start fetching blocks from it; + // we'll switch back later to finish the job. + trigger_fetch_sync_items_loop(); + } + else + { + // keep fetching the peer's list of sync items until we get enough to switch into block- + // fetchimg mode + fetch_next_batch_of_item_ids_from_peer(originating_peer); + } } else { - // the common simple case: the new list extends the old. pop off the duplicate element - originating_peer->ids_of_items_to_get.pop_back(); + // the peer has told us about all of the items it knows + if (!originating_peer->ids_of_items_to_get.empty()) + { + // we now know about all of the items the peer knows about, and there are some items on the list + // that we should try to fetch. Kick off the fetch loop. + trigger_fetch_sync_items_loop(); + } + else + { + // If we get here, the peer has sent us a non-empty list of items, but we have already + // received all of the items from other peers. Send a new request to the peer to + // see if we're really in sync + fetch_next_batch_of_item_ids_from_peer(originating_peer); + } } } - - if (!item_hashes_received.empty() && !originating_peer->ids_of_items_to_get.empty()) - assert(item_hashes_received.front() != originating_peer->ids_of_items_to_get.back()); - - // append the remaining items to the peer's list - boost::push_back(originating_peer->ids_of_items_to_get, item_hashes_received); - - originating_peer->number_of_unfetched_item_ids = blockchain_item_ids_inventory_message_received.total_remaining_item_count; - - // at any given time, there's a maximum number of blocks that can possibly be out there - // [(now - genesis time) / block interval]. If they offer us more blocks than that, - // they must be an attacker or have a buggy client. - fc::time_point_sec minimum_time_of_last_offered_block = - originating_peer->last_block_time_delegate_has_seen + // timestamp of the block immediately before the first unfetched block - originating_peer->number_of_unfetched_item_ids * GRAPHENE_MIN_BLOCK_INTERVAL; - if (minimum_time_of_last_offered_block > _delegate->get_blockchain_now() + GRAPHENE_NET_FUTURE_SYNC_BLOCKS_GRACE_PERIOD_SEC) + catch (const fc::canceled_exception&) { - wlog("Disconnecting from peer ${peer} who offered us an implausible number of blocks, their last block would be in the future (${timestamp})", - ("peer", originating_peer->get_remote_endpoint()) - ("timestamp", minimum_time_of_last_offered_block)); - fc::exception error_for_peer(FC_LOG_MESSAGE(error, "You offered me a list of more sync blocks than could possibly exist. Total blocks offered: ${blocks}, Minimum time of the last block you offered: ${minimum_time_of_last_offered_block}, Now: ${now}", - ("blocks", originating_peer->number_of_unfetched_item_ids) - ("minimum_time_of_last_offered_block", minimum_time_of_last_offered_block) - ("now", _delegate->get_blockchain_now()))); - disconnect_from_peer(originating_peer, - "You offered me a list of more sync blocks than could possibly exist", - true, error_for_peer); - return; + throw; } - - uint32_t new_number_of_unfetched_items = calculate_unsynced_block_count_from_all_peers(); - if (new_number_of_unfetched_items != _total_number_of_unfetched_items) - _delegate->sync_status(blockchain_item_ids_inventory_message_received.item_type, - new_number_of_unfetched_items); - _total_number_of_unfetched_items = new_number_of_unfetched_items; - - if (blockchain_item_ids_inventory_message_received.total_remaining_item_count != 0) + catch (const fc::exception& e) { - // the peer hasn't sent us all the items it knows about. - if (originating_peer->ids_of_items_to_get.size() > GRAPHENE_NET_MIN_BLOCK_IDS_TO_PREFETCH) - { - // we have a good number of item ids from this peer, start fetching blocks from it; - // we'll switch back later to finish the job. - trigger_fetch_sync_items_loop(); - } - else - { - // keep fetching the peer's list of sync items until we get enough to switch into block- - // fetchimg mode - fetch_next_batch_of_item_ids_from_peer(originating_peer); - } + elog("Caught unexpected exception: ${e}", ("e", e)); + assert(false && "exceptions not expected here"); } - else + catch (const std::exception& e) { - // the peer has told us about all of the items it knows - if (!originating_peer->ids_of_items_to_get.empty()) - { - // we now know about all of the items the peer knows about, and there are some items on the list - // that we should try to fetch. Kick off the fetch loop. - trigger_fetch_sync_items_loop(); - } - else - { - // If we get here, the peer has sent us a non-empty list of items, but we have already - // received all of the items from other peers. Send a new request to the peer to - // see if we're really in sync - fetch_next_batch_of_item_ids_from_peer(originating_peer); - } + elog("Caught unexpected exception: ${e}", ("e", e.what())); + assert(false && "exceptions not expected here"); + } + catch (...) + { + elog("Caught unexpected exception, could break sync operation"); } } else @@ -2802,7 +2326,7 @@ namespace graphene { namespace net { namespace detail { return; } - auto sync_item_iter = originating_peer->sync_items_requested_from_peer.find(requested_item); + auto sync_item_iter = originating_peer->sync_items_requested_from_peer.find(requested_item.item_hash); if (sync_item_iter != originating_peer->sync_items_requested_from_peer.end()) { originating_peer->sync_items_requested_from_peer.erase(sync_item_iter); @@ -2812,7 +2336,7 @@ namespace graphene { namespace net { namespace detail { else disconnect_from_peer(originating_peer, "You are missing a sync item you claim to have, your database is probably corrupted. Try --rebuild-index.",true, fc::exception(FC_LOG_MESSAGE(error,"You are missing a sync item you claim to have, your database is probably corrupted. Try --rebuild-index.", - ("item_id",requested_item)))); + ("item_id", requested_item)))); wlog("Peer doesn't have the requested sync item. This really shouldn't happen"); trigger_fetch_sync_items_loop(); return; @@ -2896,12 +2420,12 @@ namespace graphene { namespace net { namespace detail { if( closing_connection_message_received.closing_due_to_error ) { - elog( "Peer ${peer} is disconnecting us because of an error: ${msg}, exception: ${error}", + wlog( "Peer ${peer} is disconnecting us because of an error: ${msg}, exception: ${error}", ( "peer", originating_peer->get_remote_endpoint() ) ( "msg", closing_connection_message_received.reason_for_closing ) ( "error", closing_connection_message_received.error ) ); std::ostringstream message; - message << "Peer " << fc::variant( originating_peer->get_remote_endpoint() ).as_string() << + message << "Peer " << fc::variant( originating_peer->get_remote_endpoint(), GRAPHENE_NET_MAX_NESTED_OBJECTS ).as_string() << " disconnected us: " << closing_connection_message_received.reason_for_closing; fc::exception detailed_error(FC_LOG_MESSAGE(warn, "Peer ${peer} is disconnecting us because of an error: ${msg}, exception: ${error}", ( "peer", originating_peer->get_remote_endpoint() ) @@ -2990,8 +2514,8 @@ namespace graphene { namespace net { namespace detail { // received yet, reschedule them to be fetched from another peer if (!originating_peer->sync_items_requested_from_peer.empty()) { - for (auto sync_item_and_time : originating_peer->sync_items_requested_from_peer) - _active_sync_requests.erase(sync_item_and_time.first.item_hash); + for (auto sync_item : originating_peer->sync_items_requested_from_peer) + _active_sync_requests.erase(sync_item); trigger_fetch_sync_items_loop(); } @@ -3270,7 +2794,33 @@ namespace graphene { namespace net { namespace detail { block_processed_this_iteration = true; } else + { dlog("Already received and accepted this block (presumably through normal inventory mechanism), treating it as accepted"); + std::vector< peer_connection_ptr > peers_needing_next_batch; + for (const peer_connection_ptr& peer : _active_connections) + { + auto items_being_processed_iter = peer->ids_of_items_being_processed.find(received_block_iter->block_id); + if (items_being_processed_iter != peer->ids_of_items_being_processed.end()) + { + peer->ids_of_items_being_processed.erase(items_being_processed_iter); + dlog("Removed item from ${endpoint}'s list of items being processed, still processing ${len} blocks", + ("endpoint", peer->get_remote_endpoint())("len", peer->ids_of_items_being_processed.size())); + + // if we just processed the last item in our list from this peer, we will want to + // send another request to find out if we are now in sync (this is normally handled in + // send_sync_block_to_node_delegate) + if (peer->ids_of_items_to_get.empty() && + peer->number_of_unfetched_item_ids == 0 && + peer->ids_of_items_being_processed.empty()) + { + dlog("We received last item in our list for peer ${endpoint}, setup to do a sync check", ("endpoint", peer->get_remote_endpoint())); + peers_needing_next_batch.push_back( peer ); + } + } + } + for( const peer_connection_ptr& peer : peers_needing_next_batch ) + fetch_next_batch_of_item_ids_from_peer(peer.get()); + } break; // start iterating _received_sync_items from the beginning } // end if potential_first_block @@ -3347,7 +2897,7 @@ namespace graphene { namespace net { namespace detail { bool new_transaction_discovered = false; for (const item_hash_t& transaction_message_hash : contained_transaction_message_ids) { - size_t items_erased = _items_to_fetch.get().erase(item_id(trx_message_type, transaction_message_hash)); + /*size_t items_erased =*/ _items_to_fetch.get().erase(item_id(trx_message_type, transaction_message_hash)); // there are two ways we could behave here: we could either act as if we received // the transaction outside the block and offer it to our peers, or we could just // forget about it (we would still advertise this block to our peers so they should @@ -3481,24 +3031,48 @@ namespace graphene { namespace net { namespace detail { else { // not during normal operation. see if we requested it during sync - auto sync_item_iter = originating_peer->sync_items_requested_from_peer.find(item_id(graphene::net::block_message_type, - block_message_to_process.block_id)); + auto sync_item_iter = originating_peer->sync_items_requested_from_peer.find( block_message_to_process.block_id); if (sync_item_iter != originating_peer->sync_items_requested_from_peer.end()) { originating_peer->sync_items_requested_from_peer.erase(sync_item_iter); - _active_sync_requests.erase(block_message_to_process.block_id); - process_block_during_sync(originating_peer, block_message_to_process, message_hash); - if (originating_peer->idle()) + // if exceptions are throw here after removing the sync item from the list (above), + // it could leave our sync in a stalled state. Wrap a try/catch around the rest + // of the function so we can log if this ever happens. + try { - // we have finished fetching a batch of items, so we either need to grab another batch of items - // or we need to get another list of item ids. - if (originating_peer->number_of_unfetched_item_ids > 0 && - originating_peer->ids_of_items_to_get.size() < GRAPHENE_NET_MIN_BLOCK_IDS_TO_PREFETCH) - fetch_next_batch_of_item_ids_from_peer(originating_peer); - else - trigger_fetch_sync_items_loop(); + originating_peer->last_sync_item_received_time = fc::time_point::now(); + _active_sync_requests.erase(block_message_to_process.block_id); + process_block_during_sync(originating_peer, block_message_to_process, message_hash); + if (originating_peer->idle()) + { + // we have finished fetching a batch of items, so we either need to grab another batch of items + // or we need to get another list of item ids. + if (originating_peer->number_of_unfetched_item_ids > 0 && + originating_peer->ids_of_items_to_get.size() < GRAPHENE_NET_MIN_BLOCK_IDS_TO_PREFETCH) + fetch_next_batch_of_item_ids_from_peer(originating_peer); + else + trigger_fetch_sync_items_loop(); + } + return; + } + catch (const fc::canceled_exception& e) + { + throw; + } + catch (const fc::exception& e) + { + elog("Caught unexpected exception: ${e}", ("e", e)); + assert(false && "exceptions not expected here"); + } + catch (const std::exception& e) + { + elog("Caught unexpected exception: ${e}", ("e", e.what())); + assert(false && "exceptions not expected here"); + } + catch (...) + { + elog("Caught unexpected exception, could break sync operation"); } - return; } } @@ -3757,7 +3331,7 @@ namespace graphene { namespace net { namespace detail { user_data["bitness"] = *peer->bitness; user_data["user_agent"] = peer->user_agent; - user_data["last_known_block_hash"] = peer->last_block_delegate_has_seen; + user_data["last_known_block_hash"] = fc::variant( peer->last_block_delegate_has_seen, 1 ); user_data["last_known_block_number"] = _delegate->get_block_number(peer->last_block_delegate_has_seen); user_data["last_known_block_time"] = peer->last_block_time_delegate_has_seen; @@ -4192,7 +3766,7 @@ namespace graphene { namespace net { namespace detail { // limit the rate at which we accept connections to mitigate DOS attacks fc::usleep( fc::milliseconds(10) ); - } FC_CAPTURE_AND_RETHROW() + } FC_CAPTURE_AND_LOG( (0) ) } } // accept_loop() @@ -4368,7 +3942,7 @@ namespace graphene { namespace net { namespace detail { { try { - _node_configuration = fc::json::from_file( configuration_file_name ).as(); + _node_configuration = fc::json::from_file( configuration_file_name ).as(GRAPHENE_NET_MAX_NESTED_OBJECTS); ilog( "Loaded configuration from file ${filename}", ("filename", configuration_file_name ) ); if( _node_configuration.private_key == fc::ecc::private_key() ) @@ -4393,7 +3967,7 @@ namespace graphene { namespace net { namespace detail { _node_configuration = detail::node_configuration(); #ifdef GRAPHENE_TEST_NETWORK - uint32_t port = GRAPHENE_NET_TEST_P2P_PORT + GRAPHENE_TEST_NETWORK_VERSION; + uint32_t port = GRAPHENE_NET_TEST_P2P_PORT; #else uint32_t port = GRAPHENE_NET_DEFAULT_P2P_PORT; #endif @@ -4481,7 +4055,7 @@ namespace graphene { namespace net { namespace detail { error_message_stream << "Unable to listen for connections on port " << listen_endpoint.port() << ", retrying in a few seconds\n"; error_message_stream << "You can wait for it to become available, or restart this program using\n"; - error_message_stream << "the --p2p-port option to specify another port\n"; + error_message_stream << "the --p2p-endpoint option to specify another port\n"; first = false; } else @@ -4489,7 +4063,8 @@ namespace graphene { namespace net { namespace detail { error_message_stream << "\nStill waiting for port " << listen_endpoint.port() << " to become available\n"; } std::string error_message = error_message_stream.str(); - ulog(error_message); + wlog(error_message); + std::cout << "\033[31m" << error_message; _delegate->error_encountered( error_message, fc::oexception() ); fc::usleep( fc::seconds(5 ) ); } @@ -4731,20 +4306,19 @@ namespace graphene { namespace net { namespace detail { peer_to_disconnect->send_message( closing_message ); } - // notify the user. This will be useful in testing, but we might want to remove it later; - // it makes good sense to notify the user if other nodes think she is behaving badly, but + // notify the user. This will be useful in testing, but we might want to remove it later. + // It makes good sense to notify the user if other nodes think she is behaving badly, but // if we're just detecting and dissconnecting other badly-behaving nodes, they don't really care. if (caused_by_error) { std::ostringstream error_message; - error_message << "I am disconnecting peer " << fc::variant( peer_to_disconnect->get_remote_endpoint() ).as_string() << + error_message << "I am disconnecting peer " << fc::variant( peer_to_disconnect->get_remote_endpoint(), GRAPHENE_NET_MAX_NESTED_OBJECTS ).as_string() << " for reason: " << reason_for_disconnect; _delegate->error_encountered(error_message.str(), fc::oexception()); dlog(error_message.str()); } else dlog("Disconnecting from ${peer} for ${reason}", ("peer",peer_to_disconnect->get_remote_endpoint()) ("reason",reason_for_disconnect)); - // peer_to_disconnect->close_connection(); } void node_impl::listen_on_endpoint( const fc::ip::endpoint& ep, bool wait_if_not_available ) @@ -4803,7 +4377,7 @@ namespace graphene { namespace net { namespace detail { peer_details["version"] = ""; peer_details["subver"] = peer->user_agent; peer_details["inbound"] = peer->direction == peer_connection_direction::inbound; - peer_details["firewall_status"] = peer->is_firewalled; + peer_details["firewall_status"] = fc::variant( peer->is_firewalled, 1 ); peer_details["startingheight"] = ""; peer_details["banscore"] = ""; peer_details["syncnode"] = ""; @@ -4837,7 +4411,7 @@ namespace graphene { namespace net { namespace detail { // provide these for debugging // warning: these are just approximations, if the peer is "downstream" of us, they may // have received blocks from other peers that we are unaware of - peer_details["current_head_block"] = peer->last_block_delegate_has_seen; + peer_details["current_head_block"] = fc::variant( peer->last_block_delegate_has_seen, 1 ); peer_details["current_head_block_number"] = _delegate->get_block_number(peer->last_block_delegate_has_seen); peer_details["current_head_block_time"] = peer->last_block_time_delegate_has_seen; @@ -4913,17 +4487,17 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); if (params.contains("peer_connection_retry_timeout")) - _peer_connection_retry_timeout = params["peer_connection_retry_timeout"].as(); + _peer_connection_retry_timeout = params["peer_connection_retry_timeout"].as(1); if (params.contains("desired_number_of_connections")) - _desired_number_of_connections = params["desired_number_of_connections"].as(); + _desired_number_of_connections = params["desired_number_of_connections"].as(1); if (params.contains("maximum_number_of_connections")) - _maximum_number_of_connections = params["maximum_number_of_connections"].as(); + _maximum_number_of_connections = params["maximum_number_of_connections"].as(1); if (params.contains("maximum_number_of_blocks_to_handle_at_one_time")) - _maximum_number_of_blocks_to_handle_at_one_time = params["maximum_number_of_blocks_to_handle_at_one_time"].as(); + _maximum_number_of_blocks_to_handle_at_one_time = params["maximum_number_of_blocks_to_handle_at_one_time"].as(1); if (params.contains("maximum_number_of_sync_blocks_to_prefetch")) - _maximum_number_of_sync_blocks_to_prefetch = params["maximum_number_of_sync_blocks_to_prefetch"].as(); + _maximum_number_of_sync_blocks_to_prefetch = params["maximum_number_of_sync_blocks_to_prefetch"].as(1); if (params.contains("maximum_blocks_per_peer_during_syncing")) - _maximum_blocks_per_peer_during_syncing = params["maximum_blocks_per_peer_during_syncing"].as(); + _maximum_blocks_per_peer_during_syncing = params["maximum_blocks_per_peer_during_syncing"].as(1); _desired_number_of_connections = std::min(_desired_number_of_connections, _maximum_number_of_connections); @@ -5008,9 +4582,9 @@ namespace graphene { namespace net { namespace detail { VERIFY_CORRECT_THREAD(); fc::mutable_variant_object info; info["listening_on"] = _actual_listening_endpoint; - info["node_public_key"] = _node_public_key; - info["node_id"] = _node_id; - info["firewalled"] = _is_firewalled; + info["node_public_key"] = fc::variant( _node_public_key, 1 ); + info["node_id"] = fc::variant( _node_id, 1 ); + info["firewalled"] = fc::variant( _is_firewalled, 1 ); return info; } fc::variant_object node_impl::network_get_usage_stats() const @@ -5038,9 +4612,9 @@ namespace graphene { namespace net { namespace detail { std::plus()); fc::mutable_variant_object result; - result["usage_by_second"] = network_usage_by_second; - result["usage_by_minute"] = network_usage_by_minute; - result["usage_by_hour"] = network_usage_by_hour; + result["usage_by_second"] = fc::variant( network_usage_by_second, 2 ); + result["usage_by_minute"] = fc::variant( network_usage_by_minute, 2 ); + result["usage_by_hour"] = fc::variant( network_usage_by_hour, 2 ); return result; } @@ -5447,14 +5021,6 @@ namespace graphene { namespace net { namespace detail { INVOKE_AND_COLLECT_STATISTICS(get_block_time, block_id); } - /** returns graphene::blockchain::now() */ - fc::time_point_sec statistics_gathering_node_delegate_wrapper::get_blockchain_now() - { - // this function doesn't need to block, - ASSERT_TASK_NOT_PREEMPTED(); - return _node_delegate->get_blockchain_now(); - } - item_hash_t statistics_gathering_node_delegate_wrapper::get_head_block_id() const { INVOKE_AND_COLLECT_STATISTICS(get_head_block_id); diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx new file mode 100644 index 0000000000..6cebda8f8f --- /dev/null +++ b/libraries/net/node_impl.hxx @@ -0,0 +1,521 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace graphene { namespace net { namespace detail { + +// when requesting items from peers, we want to prioritize any blocks before +// transactions, but otherwise request items in the order we heard about them +struct prioritized_item_id +{ + item_id item; + unsigned sequence_number; + fc::time_point timestamp; // the time we last heard about this item in an inventory message + + prioritized_item_id(const item_id& item, unsigned sequence_number) : + item(item), + sequence_number(sequence_number), + timestamp(fc::time_point::now()) + {} + bool operator<(const prioritized_item_id& rhs) const + { + static_assert(graphene::net::block_message_type > graphene::net::trx_message_type, + "block_message_type must be greater than trx_message_type for prioritized_item_ids to sort correctly"); + if (item.item_type != rhs.item.item_type) + return item.item_type > rhs.item.item_type; + return (signed)(rhs.sequence_number - sequence_number) > 0; + } +}; + +class statistics_gathering_node_delegate_wrapper : public node_delegate +{ +private: + node_delegate *_node_delegate; + fc::thread *_thread; + + typedef boost::accumulators::accumulator_set > call_stats_accumulator; +#define NODE_DELEGATE_METHOD_NAMES (has_item) \ + (handle_message) \ + (handle_block) \ + (handle_transaction) \ + (get_block_ids) \ + (get_item) \ + (get_chain_id) \ + (get_blockchain_synopsis) \ + (sync_status) \ + (connection_count_changed) \ + (get_block_number) \ + (get_block_time) \ + (get_head_block_id) \ + (estimate_last_known_fork_from_git_revision_timestamp) \ + (error_encountered) \ + (get_current_block_interval_in_seconds) + + + +#define DECLARE_ACCUMULATOR(r, data, method_name) \ + mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator)); \ + mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator)); \ + mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator)); + BOOST_PP_SEQ_FOR_EACH(DECLARE_ACCUMULATOR, unused, NODE_DELEGATE_METHOD_NAMES) +#undef DECLARE_ACCUMULATOR + + class call_statistics_collector + { + private: + fc::time_point _call_requested_time; + fc::time_point _begin_execution_time; + fc::time_point _execution_completed_time; + const char* _method_name; + call_stats_accumulator* _execution_accumulator; + call_stats_accumulator* _delay_before_accumulator; + call_stats_accumulator* _delay_after_accumulator; + public: + class actual_execution_measurement_helper + { + call_statistics_collector &_collector; + public: + actual_execution_measurement_helper(call_statistics_collector& collector) : + _collector(collector) + { + _collector.starting_execution(); + } + ~actual_execution_measurement_helper() + { + _collector.execution_completed(); + } + }; + call_statistics_collector(const char* method_name, + call_stats_accumulator* execution_accumulator, + call_stats_accumulator* delay_before_accumulator, + call_stats_accumulator* delay_after_accumulator) : + _call_requested_time(fc::time_point::now()), + _method_name(method_name), + _execution_accumulator(execution_accumulator), + _delay_before_accumulator(delay_before_accumulator), + _delay_after_accumulator(delay_after_accumulator) + {} + ~call_statistics_collector() + { + fc::time_point end_time(fc::time_point::now()); + fc::microseconds actual_execution_time(_execution_completed_time - _begin_execution_time); + fc::microseconds delay_before(_begin_execution_time - _call_requested_time); + fc::microseconds delay_after(end_time - _execution_completed_time); + fc::microseconds total_duration(actual_execution_time + delay_before + delay_after); + (*_execution_accumulator)(actual_execution_time.count()); + (*_delay_before_accumulator)(delay_before.count()); + (*_delay_after_accumulator)(delay_after.count()); + if (total_duration > fc::milliseconds(500)) + { + ilog("Call to method node_delegate::${method} took ${total_duration}us, longer than our target maximum of 500ms", + ("method", _method_name) + ("total_duration", total_duration.count())); + ilog("Actual execution took ${execution_duration}us, with a ${delegate_delay}us delay before the delegate thread started " + "executing the method, and a ${p2p_delay}us delay after it finished before the p2p thread started processing the response", + ("execution_duration", actual_execution_time) + ("delegate_delay", delay_before) + ("p2p_delay", delay_after)); + } + } + void starting_execution() + { + _begin_execution_time = fc::time_point::now(); + } + void execution_completed() + { + _execution_completed_time = fc::time_point::now(); + } + }; + public: + statistics_gathering_node_delegate_wrapper(node_delegate* delegate, fc::thread* thread_for_delegate_calls); + + fc::variant_object get_call_statistics(); + + bool has_item( const graphene::net::item_id& id ) override; + void handle_message( const message& ) override; + bool handle_block( const graphene::net::block_message& block_message, bool sync_mode, std::vector& contained_transaction_message_ids ) override; + void handle_transaction( const graphene::net::trx_message& transaction_message ) override; + std::vector get_block_ids(const std::vector& blockchain_synopsis, + uint32_t& remaining_item_count, + uint32_t limit = 2000) override; + message get_item( const item_id& id ) override; + graphene::chain::chain_id_type get_chain_id() const override; + std::vector get_blockchain_synopsis(const item_hash_t& reference_point, + uint32_t number_of_blocks_after_reference_point) override; + void sync_status( uint32_t item_type, uint32_t item_count ) override; + void connection_count_changed( uint32_t c ) override; + uint32_t get_block_number(const item_hash_t& block_id) override; + fc::time_point_sec get_block_time(const item_hash_t& block_id) override; + item_hash_t get_head_block_id() const override; + uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override; + void error_encountered(const std::string& message, const fc::oexception& error) override; + uint8_t get_current_block_interval_in_seconds() const override; + }; + +class node_impl : public peer_connection_delegate +{ + public: +#ifdef P2P_IN_DEDICATED_THREAD + std::shared_ptr _thread; +#endif // P2P_IN_DEDICATED_THREAD + std::unique_ptr _delegate; + fc::sha256 _chain_id; + +#define NODE_CONFIGURATION_FILENAME "node_config.json" +#define POTENTIAL_PEER_DATABASE_FILENAME "peers.json" + fc::path _node_configuration_directory; + node_configuration _node_configuration; + + /// stores the endpoint we're listening on. This will be the same as + // _node_configuration.listen_endpoint, unless that endpoint was already + // in use. + fc::ip::endpoint _actual_listening_endpoint; + + /// we determine whether we're firewalled by asking other nodes. Store the result here: + firewalled_state _is_firewalled; + /// if we're behind NAT, our listening endpoint address will appear different to the rest of the world. store it here. + fc::optional _publicly_visible_listening_endpoint; + fc::time_point _last_firewall_check_message_sent; + + /// used by the task that manages connecting to peers + // @{ + std::list _add_once_node_list; /// list of peers we want to connect to as soon as possible + + peer_database _potential_peer_db; + fc::promise::ptr _retrigger_connect_loop_promise; + bool _potential_peer_database_updated; + fc::future _p2p_network_connect_loop_done; + // @} + + /// used by the task that fetches sync items during synchronization + // @{ + fc::promise::ptr _retrigger_fetch_sync_items_loop_promise; + bool _sync_items_to_fetch_updated; + fc::future _fetch_sync_items_loop_done; + + typedef std::unordered_map active_sync_requests_map; + + active_sync_requests_map _active_sync_requests; /// list of sync blocks we've asked for from peers but have not yet received + std::list _new_received_sync_items; /// list of sync blocks we've just received but haven't yet tried to process + std::list _received_sync_items; /// list of sync blocks we've received, but can't yet process because we are still missing blocks that come earlier in the chain + // @} + + fc::future _process_backlog_of_sync_blocks_done; + bool _suspend_fetching_sync_blocks; + + /// used by the task that fetches items during normal operation + // @{ + fc::promise::ptr _retrigger_fetch_item_loop_promise; + bool _items_to_fetch_updated; + fc::future _fetch_item_loop_done; + + struct item_id_index{}; + typedef boost::multi_index_container >, + boost::multi_index::hashed_unique, + boost::multi_index::member, + std::hash > > + > items_to_fetch_set_type; + unsigned _items_to_fetch_sequence_counter; + items_to_fetch_set_type _items_to_fetch; /// list of items we know another peer has and we want + peer_connection::timestamped_items_set_type _recently_failed_items; /// list of transactions we've recently pushed and had rejected by the delegate + // @} + + /// used by the task that advertises inventory during normal operation + // @{ + fc::promise::ptr _retrigger_advertise_inventory_loop_promise; + fc::future _advertise_inventory_loop_done; + std::unordered_set _new_inventory; /// list of items we have received but not yet advertised to our peers + // @} + + fc::future _terminate_inactive_connections_loop_done; + uint8_t _recent_block_interval_in_seconds; // a cached copy of the block interval, to avoid a thread hop to the blockchain to get the current value + + std::string _user_agent_string; + /** _node_public_key is a key automatically generated when the client is first run, stored in + * node_config.json. It doesn't really have much of a purpose yet, there was just some thought + * that we might someday have a use for nodes having a private key (sent in hello messages) + */ + node_id_t _node_public_key; + /** + * _node_id is a random number generated each time the client is launched, used to prevent us + * from connecting to the same client multiple times (sent in hello messages). + * Since this was introduced after the hello_message was finalized, this is sent in the + * user_data field. + * While this shares the same underlying type as a public key, it is really just a random + * number. + */ + node_id_t _node_id; + + /** if we have less than `_desired_number_of_connections`, we will try to connect with more nodes */ + uint32_t _desired_number_of_connections; + /** if we have _maximum_number_of_connections or more, we will refuse any inbound connections */ + uint32_t _maximum_number_of_connections; + /** retry connections to peers that have failed or rejected us this often, in seconds */ + uint32_t _peer_connection_retry_timeout; + /** how many seconds of inactivity are permitted before disconnecting a peer */ + uint32_t _peer_inactivity_timeout; + + fc::tcp_server _tcp_server; + fc::future _accept_loop_complete; + + /** Stores all connections which have not yet finished key exchange or are still sending initial handshaking messages + * back and forth (not yet ready to initiate syncing) */ + std::unordered_set _handshaking_connections; + /** stores fully established connections we're either syncing with or in normal operation with */ + std::unordered_set _active_connections; + /** stores connections we've closed (sent closing message, not actually closed), but are still waiting for the remote end to close before we delete them */ + std::unordered_set _closing_connections; + /** stores connections we've closed, but are still waiting for the OS to notify us that the socket is really closed */ + std::unordered_set _terminating_connections; + + boost::circular_buffer _most_recent_blocks_accepted; // the /n/ most recent blocks we've accepted (currently tuned to the max number of connections) + + uint32_t _sync_item_type; + uint32_t _total_number_of_unfetched_items; /// the number of items we still need to fetch while syncing + std::vector _hard_fork_block_numbers; /// list of all block numbers where there are hard forks + + blockchain_tied_message_cache _message_cache; /// cache message we have received and might be required to provide to other peers via inventory requests + + fc::rate_limiting_group _rate_limiter; + + uint32_t _last_reported_number_of_connections; // number of connections last reported to the client (to avoid sending duplicate messages) + + bool _peer_advertising_disabled; + + fc::future _fetch_updated_peer_lists_loop_done; + + boost::circular_buffer _average_network_read_speed_seconds; + boost::circular_buffer _average_network_write_speed_seconds; + boost::circular_buffer _average_network_read_speed_minutes; + boost::circular_buffer _average_network_write_speed_minutes; + boost::circular_buffer _average_network_read_speed_hours; + boost::circular_buffer _average_network_write_speed_hours; + unsigned _average_network_usage_second_counter; + unsigned _average_network_usage_minute_counter; + + fc::time_point_sec _bandwidth_monitor_last_update_time; + fc::future _bandwidth_monitor_loop_done; + + fc::future _dump_node_status_task_done; + + /* We have two alternate paths through the schedule_peer_for_deletion code -- one that + * uses a mutex to prevent one fiber from adding items to the queue while another is deleting + * items from it, and one that doesn't. The one that doesn't is simpler and more efficient + * code, but we're keeping around the version that uses the mutex because it crashes, and + * this crash probably indicates a bug in our underlying threading code that needs + * fixing. To produce the bug, define USE_PEERS_TO_DELETE_MUTEX and then connect up + * to the network and set your desired/max connection counts high + */ +//#define USE_PEERS_TO_DELETE_MUTEX 1 +#ifdef USE_PEERS_TO_DELETE_MUTEX + fc::mutex _peers_to_delete_mutex; +#endif + std::list _peers_to_delete; + fc::future _delayed_peer_deletion_task_done; + +#ifdef ENABLE_P2P_DEBUGGING_API + std::set _allowed_peers; +#endif // ENABLE_P2P_DEBUGGING_API + + bool _node_is_shutting_down; // set to true when we begin our destructor, used to prevent us from starting new tasks while we're shutting down + + unsigned _maximum_number_of_blocks_to_handle_at_one_time; + unsigned _maximum_number_of_sync_blocks_to_prefetch; + unsigned _maximum_blocks_per_peer_during_syncing; + + std::list > _handle_message_calls_in_progress; + + node_impl(const std::string& user_agent); + virtual ~node_impl(); + + void save_node_configuration(); + + void p2p_network_connect_loop(); + void trigger_p2p_network_connect_loop(); + + bool have_already_received_sync_item( const item_hash_t& item_hash ); + void request_sync_item_from_peer( const peer_connection_ptr& peer, const item_hash_t& item_to_request ); + void request_sync_items_from_peer( const peer_connection_ptr& peer, const std::vector& items_to_request ); + void fetch_sync_items_loop(); + void trigger_fetch_sync_items_loop(); + + bool is_item_in_any_peers_inventory(const item_id& item) const; + void fetch_items_loop(); + void trigger_fetch_items_loop(); + + void advertise_inventory_loop(); + void trigger_advertise_inventory_loop(); + + void terminate_inactive_connections_loop(); + + void fetch_updated_peer_lists_loop(); + void update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second); + void bandwidth_monitor_loop(); + void dump_node_status_task(); + + bool is_accepting_new_connections(); + bool is_wanting_new_connections(); + uint32_t get_number_of_connections(); + peer_connection_ptr get_peer_by_node_id(const node_id_t& id); + + bool is_already_connected_to_id(const node_id_t& node_id); + bool merge_address_info_with_potential_peer_database( const std::vector addresses ); + void display_current_connections(); + uint32_t calculate_unsynced_block_count_from_all_peers(); + std::vector create_blockchain_synopsis_for_peer( const peer_connection* peer ); + void fetch_next_batch_of_item_ids_from_peer( peer_connection* peer, bool reset_fork_tracking_data_for_peer = false ); + + fc::variant_object generate_hello_user_data(); + void parse_hello_user_data_for_peer( peer_connection* originating_peer, const fc::variant_object& user_data ); + + void on_message( peer_connection* originating_peer, + const message& received_message ) override; + + void on_hello_message( peer_connection* originating_peer, + const hello_message& hello_message_received ); + + void on_connection_accepted_message( peer_connection* originating_peer, + const connection_accepted_message& connection_accepted_message_received ); + + void on_connection_rejected_message( peer_connection* originating_peer, + const connection_rejected_message& connection_rejected_message_received ); + + void on_address_request_message( peer_connection* originating_peer, + const address_request_message& address_request_message_received ); + + void on_address_message( peer_connection* originating_peer, + const address_message& address_message_received ); + + void on_fetch_blockchain_item_ids_message( peer_connection* originating_peer, + const fetch_blockchain_item_ids_message& fetch_blockchain_item_ids_message_received ); + + void on_blockchain_item_ids_inventory_message( peer_connection* originating_peer, + const blockchain_item_ids_inventory_message& blockchain_item_ids_inventory_message_received ); + + void on_fetch_items_message( peer_connection* originating_peer, + const fetch_items_message& fetch_items_message_received ); + + void on_item_not_available_message( peer_connection* originating_peer, + const item_not_available_message& item_not_available_message_received ); + + void on_item_ids_inventory_message( peer_connection* originating_peer, + const item_ids_inventory_message& item_ids_inventory_message_received ); + + void on_closing_connection_message( peer_connection* originating_peer, + const closing_connection_message& closing_connection_message_received ); + + void on_current_time_request_message( peer_connection* originating_peer, + const current_time_request_message& current_time_request_message_received ); + + void on_current_time_reply_message( peer_connection* originating_peer, + const current_time_reply_message& current_time_reply_message_received ); + + void forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state); + + void on_check_firewall_message(peer_connection* originating_peer, + const check_firewall_message& check_firewall_message_received); + + void on_check_firewall_reply_message(peer_connection* originating_peer, + const check_firewall_reply_message& check_firewall_reply_message_received); + + void on_get_current_connections_request_message(peer_connection* originating_peer, + const get_current_connections_request_message& get_current_connections_request_message_received); + + void on_get_current_connections_reply_message(peer_connection* originating_peer, + const get_current_connections_reply_message& get_current_connections_reply_message_received); + + void on_connection_closed(peer_connection* originating_peer) override; + + void send_sync_block_to_node_delegate(const graphene::net::block_message& block_message_to_send); + void process_backlog_of_sync_blocks(); + void trigger_process_backlog_of_sync_blocks(); + void process_block_during_sync(peer_connection* originating_peer, const graphene::net::block_message& block_message, const message_hash_type& message_hash); + void process_block_during_normal_operation(peer_connection* originating_peer, const graphene::net::block_message& block_message, const message_hash_type& message_hash); + void process_block_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash); + + void process_ordinary_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash); + + void start_synchronizing(); + void start_synchronizing_with_peer(const peer_connection_ptr& peer); + + void new_peer_just_added(const peer_connection_ptr& peer); /// called after a peer finishes handshaking, kicks off syncing + + void close(); + + void accept_connection_task(peer_connection_ptr new_peer); + void accept_loop(); + void send_hello_message(const peer_connection_ptr& peer); + void connect_to_task(peer_connection_ptr new_peer, const fc::ip::endpoint& remote_endpoint); + bool is_connection_to_endpoint_in_progress(const fc::ip::endpoint& remote_endpoint); + + void move_peer_to_active_list(const peer_connection_ptr& peer); + void move_peer_to_closing_list(const peer_connection_ptr& peer); + void move_peer_to_terminating_list(const peer_connection_ptr& peer); + + peer_connection_ptr get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ); + + void dump_node_status(); + + void delayed_peer_deletion_task(); + void schedule_peer_for_deletion(const peer_connection_ptr& peer_to_delete); + + void disconnect_from_peer( peer_connection* originating_peer, + const std::string& reason_for_disconnect, + bool caused_by_error = false, + const fc::oexception& additional_data = fc::oexception() ); + + // methods implementing node's public interface + void set_node_delegate(node_delegate* del, fc::thread* thread_for_delegate_calls); + void load_configuration( const fc::path& configuration_directory ); + void listen_to_p2p_network(); + void connect_to_p2p_network(); + void add_node( const fc::ip::endpoint& ep ); + void initiate_connect_to(const peer_connection_ptr& peer); + void connect_to_endpoint(const fc::ip::endpoint& ep); + void listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available); + void accept_incoming_connections(bool accept); + void listen_on_port( uint16_t port, bool wait_if_not_available ); + + fc::ip::endpoint get_actual_listening_endpoint() const; + std::vector get_connected_peers() const; + uint32_t get_connection_count() const; + + void broadcast(const message& item_to_broadcast, const message_propagation_data& propagation_data); + void broadcast(const message& item_to_broadcast); + void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers); + bool is_connected() const; + std::vector get_potential_peers() const; + void set_advanced_node_parameters( const fc::variant_object& params ); + + fc::variant_object get_advanced_node_parameters(); + message_propagation_data get_transaction_propagation_data( const graphene::net::transaction_id_type& transaction_id ); + message_propagation_data get_block_propagation_data( const graphene::net::block_id_type& block_id ); + + node_id_t get_node_id() const; + void set_allowed_peers( const std::vector& allowed_peers ); + void clear_peer_database(); + void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second ); + void disable_peer_advertising(); + fc::variant_object get_call_statistics() const; + message get_message_for_item(const item_id& item) override; + + fc::variant_object network_get_info() const; + fc::variant_object network_get_usage_stats() const; + + bool is_hard_fork_block(uint32_t block_number) const; + uint32_t get_next_known_hard_fork_block_number(uint32_t block_number) const; + }; // end class node_impl + +}}} // end of namespace graphene::net::detail diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index 4dfcec3bb2..23ac403ab0 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -29,6 +29,8 @@ #include +#include + #ifdef DEFAULT_LOGGER # undef DEFAULT_LOGGER #endif @@ -86,11 +88,12 @@ namespace graphene { namespace net inhibit_fetching_sync_blocks(false), transaction_fetching_inhibited_until(fc::time_point::min()), last_known_fork_block_number(0), - firewall_check_state(nullptr) + firewall_check_state(nullptr), #ifndef NDEBUG - ,_thread(&fc::thread::current()), - _send_message_queue_tasks_running(0) + _thread(&fc::thread::current()), + _send_message_queue_tasks_running(0), #endif + _currently_handling_message(false) { } @@ -257,7 +260,7 @@ namespace graphene { namespace net } catch ( fc::exception& e ) { - elog( "fatal: error connecting to peer ${remote_endpoint}: ${e}", ("remote_endpoint", remote_endpoint )("e", e.to_detail_string() ) ); + wlog( "error connecting to peer ${remote_endpoint}: ${e}", ("remote_endpoint", remote_endpoint )("e", e.to_detail_string() ) ); throw; } } // connect_to() @@ -265,6 +268,10 @@ namespace graphene { namespace net void peer_connection::on_message( message_oriented_connection* originating_connection, const message& received_message ) { VERIFY_CORRECT_THREAD(); + _currently_handling_message = true; + BOOST_SCOPE_EXIT(this_) { + this_->_currently_handling_message = false; + } BOOST_SCOPE_EXIT_END _node->on_message( this, received_message ); } @@ -305,24 +312,24 @@ namespace graphene { namespace net } catch (const fc::exception& send_error) { - elog("Error sending message: ${exception}. Closing connection.", ("exception", send_error)); + wlog("Error sending message: ${exception}. Closing connection.", ("exception", send_error)); try { close_connection(); } catch (const fc::exception& close_error) { - elog("Caught error while closing connection: ${exception}", ("exception", close_error)); + wlog("Caught error while closing connection: ${exception}", ("exception", close_error)); } return; } catch (const std::exception& e) { - elog("message_oriented_exception::send_message() threw a std::exception(): ${what}", ("what", e.what())); + wlog("message_oriented_exception::send_message() threw a std::exception(): ${what}", ("what", e.what())); } catch (...) { - elog("message_oriented_exception::send_message() threw an unhandled exception"); + wlog("message_oriented_exception::send_message() threw an unhandled exception"); } _queued_messages.front()->transmission_finish_time = fc::time_point::now(); _total_queued_messages_size -= _queued_messages.front()->get_size_in_queue(); @@ -338,7 +345,7 @@ namespace graphene { namespace net _queued_messages.emplace(std::move(message_to_send)); if (_total_queued_messages_size > GRAPHENE_NET_MAXIMUM_QUEUED_MESSAGES_IN_BYTES) { - elog("send queue exceeded maximum size of ${max} bytes (current size ${current} bytes)", + wlog("send queue exceeded maximum size of ${max} bytes (current size ${current} bytes)", ("max", GRAPHENE_NET_MAXIMUM_QUEUED_MESSAGES_IN_BYTES)("current", _total_queued_messages_size)); try { @@ -346,7 +353,7 @@ namespace graphene { namespace net } catch (const fc::exception& e) { - elog("Caught error while closing connection: ${exception}", ("exception", e)); + wlog("Caught error while closing connection: ${exception}", ("exception", e)); } return; } @@ -438,18 +445,24 @@ namespace graphene { namespace net _remote_endpoint = new_remote_endpoint; } - bool peer_connection::busy() + bool peer_connection::busy() const { VERIFY_CORRECT_THREAD(); return !items_requested_from_peer.empty() || !sync_items_requested_from_peer.empty() || item_ids_requested_from_peer; } - bool peer_connection::idle() + bool peer_connection::idle() const { VERIFY_CORRECT_THREAD(); return !busy(); } + bool peer_connection::is_currently_handling_message() const + { + VERIFY_CORRECT_THREAD(); + return _currently_handling_message; + } + bool peer_connection::is_transaction_fetching_inhibited() const { VERIFY_CORRECT_THREAD(); diff --git a/libraries/net/peer_database.cpp b/libraries/net/peer_database.cpp index c24568fce5..2b20364e31 100644 --- a/libraries/net/peer_database.cpp +++ b/libraries/net/peer_database.cpp @@ -34,8 +34,7 @@ #include #include - - +#include namespace graphene { namespace net { namespace detail @@ -81,7 +80,7 @@ namespace graphene { namespace net { public: typedef peer_database_impl::potential_peer_set::index::type::iterator last_seen_time_index_iterator; last_seen_time_index_iterator _iterator; - peer_database_iterator_impl(const last_seen_time_index_iterator& iterator) : + explicit peer_database_iterator_impl(const last_seen_time_index_iterator& iterator) : _iterator(iterator) {} }; @@ -95,9 +94,8 @@ namespace graphene { namespace net { { try { - std::vector peer_records = fc::json::from_file(_peer_database_filename).as >(); + std::vector peer_records = fc::json::from_file(_peer_database_filename).as >( GRAPHENE_NET_MAX_NESTED_OBJECTS ); std::copy(peer_records.begin(), peer_records.end(), std::inserter(_potential_peer_set, _potential_peer_set.end())); -#define MAXIMUM_PEERDB_SIZE 1000 if (_potential_peer_set.size() > MAXIMUM_PEERDB_SIZE) { // prune database to a reasonable size @@ -125,7 +123,7 @@ namespace graphene { namespace net { fc::path peer_database_filename_dir = _peer_database_filename.parent_path(); if (!fc::exists(peer_database_filename_dir)) fc::create_directories(peer_database_filename_dir); - fc::json::save_to_file(peer_records, _peer_database_filename); + fc::json::save_to_file( peer_records, _peer_database_filename, GRAPHENE_NET_MAX_NESTED_OBJECTS ); } catch (const fc::exception& e) { diff --git a/libraries/p2p/CMakeLists.txt b/libraries/p2p/CMakeLists.txt deleted file mode 100644 index 6b5918d51a..0000000000 --- a/libraries/p2p/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -file(GLOB HEADERS "include/graphene/p2p/*.hpp") - -set(SOURCES node.cpp - stcp_socket.cpp - peer_connection.cpp - message_oriented_connection.cpp) - -add_library( graphene_p2p ${SOURCES} ${HEADERS} ) - -target_link_libraries( graphene_p2p - PUBLIC fc graphene_db ) -target_include_directories( graphene_p2p - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" - PRIVATE "${CMAKE_SOURCE_DIR}/libraries/chain/include" -) - -#if(MSVC) -# set_source_files_properties( node.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) -#endif(MSVC) - -#if (USE_PCH) -# set_target_properties(graphene_p2p PROPERTIES COTIRE_ADD_UNITY_BUILD FALSE) -# cotire(graphene_p2p ) -#endif(USE_PCH) - -install( TARGETS - graphene_p2p - - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) diff --git a/libraries/p2p/design.md b/libraries/p2p/design.md deleted file mode 100644 index 96653d7e84..0000000000 --- a/libraries/p2p/design.md +++ /dev/null @@ -1,96 +0,0 @@ -# Network Protocol 2 - -Building a low-latency network requires P2P nodes that have low-latency -connections and a protocol designed to minimize latency. for the purpose -of this document we will assume that two nodes are located on opposite -sides of the globe with a ping time of 250ms. - - -## Announce, Request, Send Protocol -Under the prior network archtiecture, transactions and blocks were broadcast -in a manner similar to the Bitcoin protocol: inventory messages notify peers of -transactions and blocks, then peers fetch the transaction or block from one -peer. After validating the item a node will broadcast an inventory message to -its peers. - -Under this model it will take 0.75 seconds for a peer to communicate a transaction -or block to another peer even if their size was 0 and there was no processing overhead. -This level of performance is unacceptable for a network attempting to produce one block -every second. - -This prior protocol also sent every transaction twice: initial broadcast, and again as -part of a block. - - -## Push Protocol -To minimize latency each node needs to immediately broadcast the data it receives -to its peers after validating it. Given the average transaction size is less than -100 bytes, it is almost as effecient to send the transaction as it is to send -the notice (assuming a 20 byte transaction id) - -Each node implements the following protocol: - - - onReceiveTransaction( from_peer, transaction ) - if( isKnown( transaction.id() ) ) - return - - markKnown( transaction.id() ) - - if( !validate( transaction ) ) - return - - for( peer : peers ) - if( peer != from_peer ) - send( peer, transaction ) - - - onReceiveBlock( from_peer, block_summary ) - if( isKnown( block_summary ) - return - - full_block = reconstructFullBlcok( from_peer, block_summary ) - if( !full_block ) disconnect from_peer - - markKnown( block_summary ) - - if( !pushBlock( full_block ) ) disconnect from_peer - - for( peer : peers ) - if( peer != from_peer ) - send( peer, block_summary ) - - - onHello( new_peer, new_peer_head_block_num ) - - replyHello( new_peer ) // ack the hello message with our timestamp to measure latency - - if( peers.size() >= max_peers ) - send( new_peer, peers ) - disconnect( new_peer ) - return - - while( new_peer_head_block_num < our_head_block_num ) - sendFullBlock( new_peer, ++new_peer_head_block_num ) - - new_peer.synced = true - for( peer : peers ) - send( peer, new_peer ) - - onHelloReply( from_peer, hello_reply ) - update_latency_measure, disconnect if too slow - - onReceivePeers( from_peer, peers ) - addToPotentialPeers( peers ) - - onUpdateConnectionsTimer - if( peers.size() < desired_peers ) - connect( random_potential_peer ) - - onFullBlock( from_peer, full_block ) - if( !pushBlock( full_block ) ) disconnect from_peer - - onStartup - init_potential_peers from config - start onUpdateConnectionsTimer - diff --git a/libraries/p2p/include/graphene/p2p/message.hpp b/libraries/p2p/include/graphene/p2p/message.hpp deleted file mode 100644 index 3a913507cf..0000000000 --- a/libraries/p2p/include/graphene/p2p/message.hpp +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include -#include -#include -#include -#include -#include - -namespace graphene { namespace p2p { - using namespace graphene::chain; - - struct message_header - { - uint32_t size; // number of bytes in message, capped at MAX_MESSAGE_SIZE - uint32_t msg_type; - }; - - typedef fc::uint160_t message_hash_type; - - /** - * Abstracts the process of packing/unpacking a message for a - * particular channel. - */ - struct message : public message_header - { - std::vector data; - - message(){} - - message( message&& m ) - :message_header(m),data( std::move(m.data) ){} - - message( const message& m ) - :message_header(m),data( m.data ){} - - /** - * Assumes that T::type specifies the message type - */ - template - message( const T& m ) - { - msg_type = T::type; - data = fc::raw::pack(m); - size = (uint32_t)data.size(); - } - - fc::uint160_t id()const - { - return fc::ripemd160::hash( data.data(), (uint32_t)data.size() ); - } - - /** - * Automatically checks the type and deserializes T in the - * opposite process from the constructor. - */ - template - T as()const - { - try { - FC_ASSERT( msg_type == T::type ); - T tmp; - if( data.size() ) - { - fc::datastream ds( data.data(), data.size() ); - fc::raw::unpack( ds, tmp ); - } - else - { - // just to make sure that tmp shouldn't have any data - fc::datastream ds( nullptr, 0 ); - fc::raw::unpack( ds, tmp ); - } - return tmp; - } FC_RETHROW_EXCEPTIONS( warn, - "error unpacking network message as a '${type}' ${x} !=? ${msg_type}", - ("type", fc::get_typename::name() ) - ("x", T::type) - ("msg_type", msg_type) - ); - } - }; - - enum core_message_type_enum { - hello_message_type = 1000, - transaction_message_type = 1001, - block_message_type = 1002, - peer_message_type = 1003, - error_message_type = 1004 - }; - - struct hello_message - { - static const core_message_type_enum type; - - std::string user_agent; - uint16_t version; - fc::time_point timestamp; - - fc::ip::address inbound_address; - uint16_t inbound_port; - uint16_t outbound_port; - public_key_type node_public_key; - fc::sha256 chain_id; - fc::variant_object user_data; - block_id_type head_block; - }; - - struct hello_reply_message - { - static const core_message_type_enum type; - - fc::time_point hello_timestamp; - fc::time_point reply_timestamp; - }; - - struct transaction_message - { - static const core_message_type_enum type; - signed_transaction trx; - }; - - struct block_summary_message - { - static const core_message_type_enum type; - - signed_block_header header; - vector transaction_ids; - }; - - struct full_block_message - { - static const core_message_type_enum type; - signed_block block; - }; - - struct peers_message - { - static const core_message_type_enum type; - - vector peers; - }; - - struct error_message - { - static const core_message_type_enum type; - string message; - }; - - -} } // graphene::p2p - -FC_REFLECT( graphene::p2p::message_header, (size)(msg_type) ) -FC_REFLECT_DERIVED( graphene::p2p::message, (graphene::p2p::message_header), (data) ) -FC_REFLECT_ENUM( graphene::p2p::core_message_type_enum, - (hello_message_type) - (transaction_message_type) - (block_message_type) - (peer_message_type) - (error_message_type) -) diff --git a/libraries/p2p/include/graphene/p2p/message_oriented_connection.hpp b/libraries/p2p/include/graphene/p2p/message_oriented_connection.hpp deleted file mode 100644 index b2a586d1bb..0000000000 --- a/libraries/p2p/include/graphene/p2p/message_oriented_connection.hpp +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include -#include - -namespace graphene { namespace p2p { - - namespace detail { class message_oriented_connection_impl; } - - class message_oriented_connection; - - /** receives incoming messages from a message_oriented_connection object */ - class message_oriented_connection_delegate - { - public: - virtual void on_message( message_oriented_connection* originating_connection, - const message& received_message) = 0; - - virtual void on_connection_closed(message_oriented_connection* originating_connection) = 0; - }; - - /** uses a secure socket to create a connection that reads and writes a stream of `fc::p2p::message` objects */ - class message_oriented_connection - { - public: - message_oriented_connection(message_oriented_connection_delegate* delegate = nullptr); - ~message_oriented_connection(); - fc::tcp_socket& get_socket(); - - void accept(); - void bind(const fc::ip::endpoint& local_endpoint); - void connect_to(const fc::ip::endpoint& remote_endpoint); - - void send_message(const message& message_to_send); - void close_connection(); - void destroy_connection(); - - uint64_t get_total_bytes_sent() const; - uint64_t get_total_bytes_received() const; - fc::time_point get_last_message_sent_time() const; - fc::time_point get_last_message_received_time() const; - fc::time_point get_connection_time() const; - fc::sha512 get_shared_secret() const; - private: - std::unique_ptr my; - }; - typedef std::shared_ptr message_oriented_connection_ptr; - -} } // graphene::net diff --git a/libraries/p2p/include/graphene/p2p/node.hpp b/libraries/p2p/include/graphene/p2p/node.hpp deleted file mode 100644 index b89c1d54a7..0000000000 --- a/libraries/p2p/include/graphene/p2p/node.hpp +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#pragma once -#include -#include - - - -namespace graphene { namespace p2p { - using namespace graphene::chain; - - struct node_config - { - fc::ip::endpoint server_endpoint; - bool wait_if_not_available = true; - uint32_t desired_peers; - uint32_t max_peers; - /** receive, but don't rebroadcast data */ - bool subscribe_only = false; - public_key_type node_id; - vector seed_nodes; - }; - - struct by_remote_endpoint; - struct by_peer_id; - - /** - * @ingroup object_index - */ - typedef multi_index_container< - peer_connection_ptr, - indexed_by< - ordered_unique< tag, - const_mem_fun< peer_connection, fc::ip::endpoint, &peer_connection::get_remote_endpoint > >, - ordered_unique< tag, member< peer_connection, public_key_type, &peer_connection::node_id > > - > - > peer_connection_index; - - - class node : public std::enable_shared_from_this - { - public: - server( chain_database& db ); - - void add_peer( const fc::ip::endpoint& ep ); - void configure( const node_config& cfg ); - - void on_incomming_connection( peer_connection_ptr new_peer ); - void on_hello( peer_connection_ptr new_peer, hello_message m ); - void on_transaction( peer_connection_ptr from_peer, transaction_message m ); - void on_block( peer_connection_ptr from_peer, block_message m ); - void on_peers( peer_connection_ptr from_peer, peers_message m ); - void on_error( peer_connection_ptr from_peer, error_message m ); - void on_full_block( peer_connection_ptr from_peer, full_block_message m ); - void on_update_connections(); - - private: - /** - * Specifies the network interface and port upon which incoming - * connections should be accepted. - */ - void listen_on_endpoint( fc::ip::endpoint ep, bool wait_if_not_available ); - void accept_loop(); - - graphene::chain::database& _db; - - fc::tcp_server _tcp_server; - fc::ip::endpoint _actual_listening_endpoint; - fc::future _accept_loop_complete; - peer_connection_index _peers; - - }; - -} } /// graphene::p2p diff --git a/libraries/p2p/include/graphene/p2p/peer_connection.hpp b/libraries/p2p/include/graphene/p2p/peer_connection.hpp deleted file mode 100644 index e120c1c0f5..0000000000 --- a/libraries/p2p/include/graphene/p2p/peer_connection.hpp +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -namespace graphene { namespace p2p { - - class peer_connection; - class peer_connection_delegate - { - public: - virtual void on_message(peer_connection* originating_peer, const message& received_message) = 0; - virtual void on_connection_closed(peer_connection* originating_peer) = 0; - }; - - class peer_connection; - typedef std::shared_ptr peer_connection_ptr; - - - /** - * Each connection maintains its own queue of messages to be sent, when an item - * is first pushed to the queue it starts an async fiber that will sequentially write - * all items until there is nothing left to be sent. - * - * If a particular connection is unable to keep up with the real-time stream of - * messages to be sent then it will be disconnected. The backlog will be measured in - * seconds. - * - * A multi-index container that tracks the - */ - class peer_connection : public message_oriented_connection_delegate, - public std::enable_shared_from_this - { - public: - enum direction_type { inbound, outbound }; - enum connection_state { - connecting = 0, - syncing = 1, - synced = 2 - }; - - fc::time_point connection_initiation_time; - fc::time_point connection_closed_time; - fc::time_point connection_terminated_time; - direction_type direction = outbound; - connection_state state = connecting; - bool is_firewalled = true - - //connection_state state; - fc::microseconds clock_offset; - fc::microseconds round_trip_delay; - - /// data about the peer node - /// @{ - - /** the unique identifier we'll use to refer to the node with. zero-initialized before - * we receive the hello message, at which time it will be filled with either the "node_id" - * from the user_data field of the hello, or if none is present it will be filled with a - * copy of node_public_key */ - public_key_type node_id; - uint32_t core_protocol_version; - std::string user_agent; - - fc::optional graphene_git_revision_sha; - fc::optional graphene_git_revision_unix_timestamp; - fc::optional fc_git_revision_sha; - fc::optional fc_git_revision_unix_timestamp; - fc::optional platform; - fc::optional bitness; - - // for inbound connections, these fields record what the peer sent us in - // its hello message. For outbound, they record what we sent the peer - // in our hello message - fc::ip::address inbound_address; - uint16_t inbound_port; - uint16_t outbound_port; - /// @} - - void send( transaction_message_ptr msg ) - { - // if not in sent_or_received then insert into _pending_send - // if process_send_queue is invalid or complete then - // async process_send_queue - } - - void received_transaction( const transaction_id_type& id ) - { - _sent_or_received.insert(id); - } - - void process_send_queue() - { - // while _pending_send.size() || _pending_blocks.size() - // while there are pending blocks, then take the oldest - // for each transaction id, verify that it exists in _sent_or_received - // else find it in the _pending_send queue and send it - // send one from _pending_send - } - - - std::unordered_map _pending_send; - /// todo: make multi-index that tracks how long items have been cached and removes them - /// after a resasonable period of time (say 10 seconds) - std::unordered_set _sent_or_received; - std::map _pending_blocks; - - - fc::ip::endpoint get_remote_endpoint()const - { return get_socket().get_remote_endpoint(); } - - void on_message(message_oriented_connection* originating_connection, - const message& received_message) override - { - switch( core_message_type_enum( received_message.type ) ) - { - case hello_message_type: - _node->on_hello( shared_from_this(), - received_message.as() ); - break; - case transaction_message_type: - _node->on_transaction( shared_from_this(), - received_message.as() ); - break; - case block_message_type: - _node->on_block( shared_from_this(), - received_message.as() ); - break; - case peer_message_type: - _node->on_peers( shared_from_this(), - received_message.as() ); - break; - } - } - - void on_connection_closed(message_oriented_connection* originating_connection) override - { - _node->on_close( shared_from_this() ); - } - - fc::tcp_socket& get_socket() { return _message_connection.get_socket(); } - - private: - peer_connection_delegate* _node; - fc::optional _remote_endpoint; - message_oriented_connection _message_connection; - - }; - typedef std::shared_ptr peer_connection_ptr; - - - } } // end namespace graphene::p2p - -// not sent over the wire, just reflected for logging -FC_REFLECT_ENUM(graphene::p2p::peer_connection::connection_state, (connecting)(syncing)(synced) ) -FC_REFLECT_ENUM(graphene::p2p::peer_connection::direction_type, (inbound)(outbound) ) diff --git a/libraries/p2p/include/graphene/p2p/stcp_socket.hpp b/libraries/p2p/include/graphene/p2p/stcp_socket.hpp deleted file mode 100644 index 333d96bb82..0000000000 --- a/libraries/p2p/include/graphene/p2p/stcp_socket.hpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once -#include -#include -#include - -namespace graphene { namespace p2p { - -/** - * Uses ECDH to negotiate a aes key for communicating - * with other nodes on the network. - */ -class stcp_socket : public virtual fc::iostream -{ - public: - stcp_socket(); - ~stcp_socket(); - fc::tcp_socket& get_socket() { return _sock; } - void accept(); - - void connect_to( const fc::ip::endpoint& remote_endpoint ); - void bind( const fc::ip::endpoint& local_endpoint ); - - virtual size_t readsome( char* buffer, size_t max ); - virtual size_t readsome( const std::shared_ptr& buf, size_t len, size_t offset ); - virtual bool eof()const; - - virtual size_t writesome( const char* buffer, size_t len ); - virtual size_t writesome( const std::shared_ptr& buf, size_t len, size_t offset ); - - virtual void flush(); - virtual void close(); - - using istream::get; - void get( char& c ) { read( &c, 1 ); } - fc::sha512 get_shared_secret() const { return _shared_secret; } - private: - void do_key_exchange(); - - fc::sha512 _shared_secret; - fc::ecc::private_key _priv_key; - fc::array _buf; - //uint32_t _buf_len; - fc::tcp_socket _sock; - fc::aes_encoder _send_aes; - fc::aes_decoder _recv_aes; - std::shared_ptr _read_buffer; - std::shared_ptr _write_buffer; -#ifndef NDEBUG - bool _read_buffer_in_use; - bool _write_buffer_in_use; -#endif -}; - -typedef std::shared_ptr stcp_socket_ptr; - -} } // graphene::p2p diff --git a/libraries/p2p/message_oriented_connection.cpp b/libraries/p2p/message_oriented_connection.cpp deleted file mode 100644 index a9e23c3471..0000000000 --- a/libraries/p2p/message_oriented_connection.cpp +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef DEFAULT_LOGGER -# undef DEFAULT_LOGGER -#endif -#define DEFAULT_LOGGER "p2p" - -#ifndef NDEBUG -# define VERIFY_CORRECT_THREAD() assert(_thread->is_current()) -#else -# define VERIFY_CORRECT_THREAD() do {} while (0) -#endif - -namespace graphene { namespace p2p { - namespace detail - { - class message_oriented_connection_impl - { - private: - message_oriented_connection* _self; - message_oriented_connection_delegate *_delegate; - stcp_socket _sock; - fc::future _read_loop_done; - uint64_t _bytes_received; - uint64_t _bytes_sent; - - fc::time_point _connected_time; - fc::time_point _last_message_received_time; - fc::time_point _last_message_sent_time; - - bool _send_message_in_progress; - -#ifndef NDEBUG - fc::thread* _thread; -#endif - - void read_loop(); - void start_read_loop(); - public: - fc::tcp_socket& get_socket(); - void accept(); - void connect_to(const fc::ip::endpoint& remote_endpoint); - void bind(const fc::ip::endpoint& local_endpoint); - - message_oriented_connection_impl(message_oriented_connection* self, - message_oriented_connection_delegate* delegate = nullptr); - ~message_oriented_connection_impl(); - - void send_message(const message& message_to_send); - void close_connection(); - void destroy_connection(); - - uint64_t get_total_bytes_sent() const; - uint64_t get_total_bytes_received() const; - - fc::time_point get_last_message_sent_time() const; - fc::time_point get_last_message_received_time() const; - fc::time_point get_connection_time() const { return _connected_time; } - fc::sha512 get_shared_secret() const; - }; - - message_oriented_connection_impl::message_oriented_connection_impl(message_oriented_connection* self, - message_oriented_connection_delegate* delegate) - : _self(self), - _delegate(delegate), - _bytes_received(0), - _bytes_sent(0), - _send_message_in_progress(false) -#ifndef NDEBUG - ,_thread(&fc::thread::current()) -#endif - { - } - message_oriented_connection_impl::~message_oriented_connection_impl() - { - VERIFY_CORRECT_THREAD(); - destroy_connection(); - } - - fc::tcp_socket& message_oriented_connection_impl::get_socket() - { - VERIFY_CORRECT_THREAD(); - return _sock.get_socket(); - } - - void message_oriented_connection_impl::accept() - { - VERIFY_CORRECT_THREAD(); - _sock.accept(); - assert(!_read_loop_done.valid()); // check to be sure we never launch two read loops - _read_loop_done = fc::async([=](){ read_loop(); }, "message read_loop"); - } - - void message_oriented_connection_impl::connect_to(const fc::ip::endpoint& remote_endpoint) - { - VERIFY_CORRECT_THREAD(); - _sock.connect_to(remote_endpoint); - assert(!_read_loop_done.valid()); // check to be sure we never launch two read loops - _read_loop_done = fc::async([=](){ read_loop(); }, "message read_loop"); - } - - void message_oriented_connection_impl::bind(const fc::ip::endpoint& local_endpoint) - { - VERIFY_CORRECT_THREAD(); - _sock.bind(local_endpoint); - } - - - void message_oriented_connection_impl::read_loop() - { - VERIFY_CORRECT_THREAD(); - const int BUFFER_SIZE = 16; - const int LEFTOVER = BUFFER_SIZE - sizeof(message_header); - static_assert(BUFFER_SIZE >= sizeof(message_header), "insufficient buffer"); - - _connected_time = fc::time_point::now(); - - fc::oexception exception_to_rethrow; - bool call_on_connection_closed = false; - - try - { - message m; - while( true ) - { - char buffer[BUFFER_SIZE]; - _sock.read(buffer, BUFFER_SIZE); - _bytes_received += BUFFER_SIZE; - memcpy((char*)&m, buffer, sizeof(message_header)); - - FC_ASSERT( m.size <= MAX_MESSAGE_SIZE, "", ("m.size",m.size)("MAX_MESSAGE_SIZE",MAX_MESSAGE_SIZE) ); - - size_t remaining_bytes_with_padding = 16 * ((m.size - LEFTOVER + 15) / 16); - m.data.resize(LEFTOVER + remaining_bytes_with_padding); //give extra 16 bytes to allow for padding added in send call - std::copy(buffer + sizeof(message_header), buffer + sizeof(buffer), m.data.begin()); - if (remaining_bytes_with_padding) - { - _sock.read(&m.data[LEFTOVER], remaining_bytes_with_padding); - _bytes_received += remaining_bytes_with_padding; - } - m.data.resize(m.size); // truncate off the padding bytes - - _last_message_received_time = fc::time_point::now(); - - try - { - // message handling errors are warnings... - _delegate->on_message(_self, m); - } - /// Dedicated catches needed to distinguish from general fc::exception - catch ( const fc::canceled_exception& e ) { throw e; } - catch ( const fc::eof_exception& e ) { throw e; } - catch ( const fc::exception& e) - { - /// Here loop should be continued so exception should be just caught locally. - wlog( "message transmission failed ${er}", ("er", e.to_detail_string() ) ); - throw; - } - } - } - catch ( const fc::canceled_exception& e ) - { - wlog( "caught a canceled_exception in read_loop. this should mean we're in the process of deleting this object already, so there's no need to notify the delegate: ${e}", ("e", e.to_detail_string() ) ); - throw; - } - catch ( const fc::eof_exception& e ) - { - wlog( "disconnected ${e}", ("e", e.to_detail_string() ) ); - call_on_connection_closed = true; - } - catch ( const fc::exception& e ) - { - elog( "disconnected ${er}", ("er", e.to_detail_string() ) ); - call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", e.to_detail_string()))); - } - catch ( const std::exception& e ) - { - elog( "disconnected ${er}", ("er", e.what() ) ); - call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", e.what()))); - } - catch ( ... ) - { - elog( "unexpected exception" ); - call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", fc::except_str()))); - } - - if (call_on_connection_closed) - _delegate->on_connection_closed(_self); - - if (exception_to_rethrow) - throw *exception_to_rethrow; - } - - void message_oriented_connection_impl::send_message(const message& message_to_send) - { - VERIFY_CORRECT_THREAD(); -#if 0 // this gets too verbose -#ifndef NDEBUG - fc::optional remote_endpoint; - if (_sock.get_socket().is_open()) - remote_endpoint = _sock.get_socket().remote_endpoint(); - struct scope_logger { - const fc::optional& endpoint; - scope_logger(const fc::optional& endpoint) : endpoint(endpoint) { dlog("entering message_oriented_connection::send_message() for peer ${endpoint}", ("endpoint", endpoint)); } - ~scope_logger() { dlog("leaving message_oriented_connection::send_message() for peer ${endpoint}", ("endpoint", endpoint)); } - } send_message_scope_logger(remote_endpoint); -#endif -#endif - struct verify_no_send_in_progress { - bool& var; - verify_no_send_in_progress(bool& var) : var(var) - { - if (var) - elog("Error: two tasks are calling message_oriented_connection::send_message() at the same time"); - assert(!var); - var = true; - } - ~verify_no_send_in_progress() { var = false; } - } _verify_no_send_in_progress(_send_message_in_progress); - - try - { - size_t size_of_message_and_header = sizeof(message_header) + message_to_send.size; - if( message_to_send.size > MAX_MESSAGE_SIZE ) - elog("Trying to send a message larger than MAX_MESSAGE_SIZE. This probably won't work..."); - //pad the message we send to a multiple of 16 bytes - size_t size_with_padding = 16 * ((size_of_message_and_header + 15) / 16); - std::unique_ptr padded_message(new char[size_with_padding]); - memcpy(padded_message.get(), (char*)&message_to_send, sizeof(message_header)); - memcpy(padded_message.get() + sizeof(message_header), message_to_send.data.data(), message_to_send.size ); - _sock.write(padded_message.get(), size_with_padding); - _sock.flush(); - _bytes_sent += size_with_padding; - _last_message_sent_time = fc::time_point::now(); - } FC_RETHROW_EXCEPTIONS( warn, "unable to send message" ); - } - - void message_oriented_connection_impl::close_connection() - { - VERIFY_CORRECT_THREAD(); - _sock.close(); - } - - void message_oriented_connection_impl::destroy_connection() - { - VERIFY_CORRECT_THREAD(); - - fc::optional remote_endpoint; - if (_sock.get_socket().is_open()) - remote_endpoint = _sock.get_socket().remote_endpoint(); - ilog( "in destroy_connection() for ${endpoint}", ("endpoint", remote_endpoint) ); - - if (_send_message_in_progress) - elog("Error: message_oriented_connection is being destroyed while a send_message is in progress. " - "The task calling send_message() should have been canceled already"); - assert(!_send_message_in_progress); - - try - { - _read_loop_done.cancel_and_wait(__FUNCTION__); - } - catch ( const fc::exception& e ) - { - wlog( "Exception thrown while canceling message_oriented_connection's read_loop, ignoring: ${e}", ("e",e) ); - } - catch (...) - { - wlog( "Exception thrown while canceling message_oriented_connection's read_loop, ignoring" ); - } - } - - uint64_t message_oriented_connection_impl::get_total_bytes_sent() const - { - VERIFY_CORRECT_THREAD(); - return _bytes_sent; - } - - uint64_t message_oriented_connection_impl::get_total_bytes_received() const - { - VERIFY_CORRECT_THREAD(); - return _bytes_received; - } - - fc::time_point message_oriented_connection_impl::get_last_message_sent_time() const - { - VERIFY_CORRECT_THREAD(); - return _last_message_sent_time; - } - - fc::time_point message_oriented_connection_impl::get_last_message_received_time() const - { - VERIFY_CORRECT_THREAD(); - return _last_message_received_time; - } - - fc::sha512 message_oriented_connection_impl::get_shared_secret() const - { - VERIFY_CORRECT_THREAD(); - return _sock.get_shared_secret(); - } - - } // end namespace graphene::p2p::detail - - - message_oriented_connection::message_oriented_connection(message_oriented_connection_delegate* delegate) : - my(new detail::message_oriented_connection_impl(this, delegate)) - { - } - - message_oriented_connection::~message_oriented_connection() - { - } - - fc::tcp_socket& message_oriented_connection::get_socket() - { - return my->get_socket(); - } - - void message_oriented_connection::accept() - { - my->accept(); - } - - void message_oriented_connection::connect_to(const fc::ip::endpoint& remote_endpoint) - { - my->connect_to(remote_endpoint); - } - - void message_oriented_connection::bind(const fc::ip::endpoint& local_endpoint) - { - my->bind(local_endpoint); - } - - void message_oriented_connection::send_message(const message& message_to_send) - { - my->send_message(message_to_send); - } - - void message_oriented_connection::close_connection() - { - my->close_connection(); - } - - void message_oriented_connection::destroy_connection() - { - my->destroy_connection(); - } - - uint64_t message_oriented_connection::get_total_bytes_sent() const - { - return my->get_total_bytes_sent(); - } - - uint64_t message_oriented_connection::get_total_bytes_received() const - { - return my->get_total_bytes_received(); - } - - fc::time_point message_oriented_connection::get_last_message_sent_time() const - { - return my->get_last_message_sent_time(); - } - - fc::time_point message_oriented_connection::get_last_message_received_time() const - { - return my->get_last_message_received_time(); - } - fc::time_point message_oriented_connection::get_connection_time() const - { - return my->get_connection_time(); - } - fc::sha512 message_oriented_connection::get_shared_secret() const - { - return my->get_shared_secret(); - } - -} } // end namespace graphene::p2p diff --git a/libraries/p2p/node.cpp b/libraries/p2p/node.cpp deleted file mode 100644 index 3de4104797..0000000000 --- a/libraries/p2p/node.cpp +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include - -namespace graphene { namespace p2p { - - node::node( chain_database& db ) - :_db(db) - { - - } - - node::~node() - { - - } - - void node::add_peer( const fc::ip::endpoint& ep ) - { - - } - - void node::configure( const node_config& cfg ) - { - listen_on_endpoint( cfg.server_endpoint, wait_if_not_available ); - - /** don't allow node to go out of scope until accept loop exits */ - auto self = shared_from_this(); - _accept_loop_complete = fc::async( [self](){ self->accept_loop(); } ) - } - - void node::accept_loop() - { - auto self = shared_from_this(); - while( !_accept_loop_complete.canceled() ) - { - try { - auto new_peer = std::make_shared(self); - _tcp_server.accept( new_peer.get_socket() ); - - if( _accept_loop_complete.canceled() ) - return; - - _peers.insert( new_peer ); - - - - // limit the rate at which we accept connections to mitigate DOS attacks - fc::usleep( fc::milliseconds(10) ); - } FC_CAPTURE_AND_RETHROW() - } - } // accept_loop() - - - - void node::listen_on_endpoint( fc::ip::endpoint ep, bool wait_if_not_available ) - { - if( ep.port() != 0 ) - { - // if the user specified a port, we only want to bind to it if it's not already - // being used by another application. During normal operation, we set the - // SO_REUSEADDR/SO_REUSEPORT flags so that we can bind outbound sockets to the - // same local endpoint as we're listening on here. On some platforms, setting - // those flags will prevent us from detecting that other applications are - // listening on that port. We'd like to detect that, so we'll set up a temporary - // tcp server without that flag to see if we can listen on that port. - bool first = true; - for( ;; ) - { - bool listen_failed = false; - - try - { - fc::tcp_server temporary_server; - if( listen_endpoint.get_address() != fc::ip::address() ) - temporary_server.listen( ep ); - else - temporary_server.listen( ep.port() ); - break; - } - catch ( const fc::exception&) - { - listen_failed = true; - } - - if (listen_failed) - { - if( wait_if_endpoint_is_busy ) - { - std::ostringstream error_message_stream; - if( first ) - { - error_message_stream << "Unable to listen for connections on port " - << ep.port() - << ", retrying in a few seconds\n"; - error_message_stream << "You can wait for it to become available, or restart " - "this program using\n"; - error_message_stream << "the --p2p-port option to specify another port\n"; - first = false; - } - else - { - error_message_stream << "\nStill waiting for port " << listen_endpoint.port() << " to become available\n"; - } - - std::string error_message = error_message_stream.str(); - ulog(error_message); - fc::usleep( fc::seconds(5 ) ); - } - else // don't wait, just find a random port - { - wlog( "unable to bind on the requested endpoint ${endpoint}, " - "which probably means that endpoint is already in use", - ( "endpoint", ep ) ); - ep.set_port( 0 ); - } - } // if (listen_failed) - } // for(;;) - } // if (listen_endpoint.port() != 0) - - - _tcp_server.set_reuse_address(); - try - { - if( ep.get_address() != fc::ip::address() ) - _tcp_server.listen( ep ); - else - _tcp_server.listen( ep.port() ); - - _actual_listening_endpoint = _tcp_server.get_local_endpoint(); - ilog( "listening for connections on endpoint ${endpoint} (our first choice)", - ( "endpoint", _actual_listening_endpoint ) ); - } - catch ( fc::exception& e ) - { - FC_RETHROW_EXCEPTION( e, error, - "unable to listen on ${endpoint}", ("endpoint",listen_endpoint ) ); - } - } - - - -} } diff --git a/libraries/p2p/peer_connection.cpp b/libraries/p2p/peer_connection.cpp deleted file mode 100644 index 0f5b6e0c41..0000000000 --- a/libraries/p2p/peer_connection.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include - -namespace graphene { namespace p2p { - -} } //graphene::p2p - - diff --git a/libraries/p2p/stcp_socket.cpp b/libraries/p2p/stcp_socket.cpp deleted file mode 100644 index 0a54bc2e05..0000000000 --- a/libraries/p2p/stcp_socket.cpp +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include - -namespace graphene { namespace p2p { - -stcp_socket::stcp_socket() -//:_buf_len(0) -#ifndef NDEBUG - : _read_buffer_in_use(false), - _write_buffer_in_use(false) -#endif -{ -} -stcp_socket::~stcp_socket() -{ -} - -void stcp_socket::do_key_exchange() -{ - _priv_key = fc::ecc::private_key::generate(); - fc::ecc::public_key pub = _priv_key.get_public_key(); - fc::ecc::public_key_data s = pub.serialize(); - std::shared_ptr serialized_key_buffer(new char[sizeof(fc::ecc::public_key_data)], [](char* p){ delete[] p; }); - memcpy(serialized_key_buffer.get(), (char*)&s, sizeof(fc::ecc::public_key_data)); - _sock.write( serialized_key_buffer, sizeof(fc::ecc::public_key_data) ); - _sock.read( serialized_key_buffer, sizeof(fc::ecc::public_key_data) ); - fc::ecc::public_key_data rpub; - memcpy((char*)&rpub, serialized_key_buffer.get(), sizeof(fc::ecc::public_key_data)); - - _shared_secret = _priv_key.get_shared_secret( rpub ); -// ilog("shared secret ${s}", ("s", shared_secret) ); - _send_aes.init( fc::sha256::hash( (char*)&_shared_secret, sizeof(_shared_secret) ), - fc::city_hash_crc_128((char*)&_shared_secret,sizeof(_shared_secret) ) ); - _recv_aes.init( fc::sha256::hash( (char*)&_shared_secret, sizeof(_shared_secret) ), - fc::city_hash_crc_128((char*)&_shared_secret,sizeof(_shared_secret) ) ); -} - - -void stcp_socket::connect_to( const fc::ip::endpoint& remote_endpoint ) -{ - _sock.connect_to( remote_endpoint ); - do_key_exchange(); -} - -void stcp_socket::bind( const fc::ip::endpoint& local_endpoint ) -{ - _sock.bind(local_endpoint); -} - -/** - * This method must read at least 16 bytes at a time from - * the underlying TCP socket so that it can decrypt them. It - * will buffer any left-over. - */ -size_t stcp_socket::readsome( char* buffer, size_t len ) -{ try { - assert( len > 0 && (len % 16) == 0 ); - -#ifndef NDEBUG - // This code was written with the assumption that you'd only be making one call to readsome - // at a time so it reuses _read_buffer. If you really need to make concurrent calls to - // readsome(), you'll need to prevent reusing _read_buffer here - struct check_buffer_in_use { - bool& _buffer_in_use; - check_buffer_in_use(bool& buffer_in_use) : _buffer_in_use(buffer_in_use) { assert(!_buffer_in_use); _buffer_in_use = true; } - ~check_buffer_in_use() { assert(_buffer_in_use); _buffer_in_use = false; } - } buffer_in_use_checker(_read_buffer_in_use); -#endif - - const size_t read_buffer_length = 4096; - if (!_read_buffer) - _read_buffer.reset(new char[read_buffer_length], [](char* p){ delete[] p; }); - - len = std::min(read_buffer_length, len); - - size_t s = _sock.readsome( _read_buffer, len, 0 ); - if( s % 16 ) - { - _sock.read(_read_buffer, 16 - (s%16), s); - s += 16-(s%16); - } - _recv_aes.decode( _read_buffer.get(), s, buffer ); - return s; -} FC_RETHROW_EXCEPTIONS( warn, "", ("len",len) ) } - -size_t stcp_socket::readsome( const std::shared_ptr& buf, size_t len, size_t offset ) -{ - return readsome(buf.get() + offset, len); -} - -bool stcp_socket::eof()const -{ - return _sock.eof(); -} - -size_t stcp_socket::writesome( const char* buffer, size_t len ) -{ try { - assert( len > 0 && (len % 16) == 0 ); - -#ifndef NDEBUG - // This code was written with the assumption that you'd only be making one call to writesome - // at a time so it reuses _write_buffer. If you really need to make concurrent calls to - // writesome(), you'll need to prevent reusing _write_buffer here - struct check_buffer_in_use { - bool& _buffer_in_use; - check_buffer_in_use(bool& buffer_in_use) : _buffer_in_use(buffer_in_use) { assert(!_buffer_in_use); _buffer_in_use = true; } - ~check_buffer_in_use() { assert(_buffer_in_use); _buffer_in_use = false; } - } buffer_in_use_checker(_write_buffer_in_use); -#endif - - const std::size_t write_buffer_length = 4096; - if (!_write_buffer) - _write_buffer.reset(new char[write_buffer_length], [](char* p){ delete[] p; }); - len = std::min(write_buffer_length, len); - memset(_write_buffer.get(), 0, len); // just in case aes.encode screws up - /** - * every sizeof(crypt_buf) bytes the aes channel - * has an error and doesn't decrypt properly... disable - * for now because we are going to upgrade to something - * better. - */ - uint32_t ciphertext_len = _send_aes.encode( buffer, len, _write_buffer.get() ); - assert(ciphertext_len == len); - _sock.write( _write_buffer, ciphertext_len ); - return ciphertext_len; -} FC_RETHROW_EXCEPTIONS( warn, "", ("len",len) ) } - -size_t stcp_socket::writesome( const std::shared_ptr& buf, size_t len, size_t offset ) -{ - return writesome(buf.get() + offset, len); -} - -void stcp_socket::flush() -{ - _sock.flush(); -} - - -void stcp_socket::close() -{ - try - { - _sock.close(); - }FC_RETHROW_EXCEPTIONS( warn, "error closing stcp socket" ); -} - -void stcp_socket::accept() -{ - do_key_exchange(); -} - - -}} // namespace graphene::p2p - diff --git a/libraries/plugins/CMakeLists.txt b/libraries/plugins/CMakeLists.txt index 7a02ceba30..caacb8bd53 100644 --- a/libraries/plugins/CMakeLists.txt +++ b/libraries/plugins/CMakeLists.txt @@ -1,5 +1,9 @@ add_subdirectory( witness ) add_subdirectory( account_history ) +add_subdirectory( elasticsearch ) add_subdirectory( market_history ) +add_subdirectory( grouped_orders ) add_subdirectory( delayed_node ) -add_subdirectory( debug_witness ) \ No newline at end of file +add_subdirectory( debug_witness ) +add_subdirectory( snapshot ) +add_subdirectory( es_objects ) diff --git a/libraries/plugins/README.md b/libraries/plugins/README.md new file mode 100644 index 0000000000..18037d6c6e --- /dev/null +++ b/libraries/plugins/README.md @@ -0,0 +1,21 @@ +# BitShares Plugins + +The bitshares plugins are a collection of tools that brings new functionality without the need of modifications in the consensus and more sensitive areas of the bitshares-core. + +The main source of I/O of the bitshares blockchain is the API. Plugins are a more powerful alternative to build more complex developments for when the current API is not enough. + +Plugins are optional to run by node operator according to their needs. However, all plugins here will be compiled. There are plans for optional build of plugins at: [Issue 533](https://github.com/bitshares/bitshares-core/issues/533) + +# Available Plugins + +Folder | Name | Description | Category | Status | SpaceID +-----------------------------------|--------------------------|-----------------------------------------------------------------------------|----------------|---------------|--------------| +[account_history](account_history) | Account History | Save account history data | History | Stable | 4 +[debug_witness](debug_witness) | Debug Witness | Run "what-if" tests | Debug | Stable | +[delayed_node](delayed_node) | Delayed Node | Avoid forks by running a several times confirmed and delayed blockchain | Business | Stable | +[elasticsearch](elasticsearch) | ElasticSearch Operations | Save account history data into elasticsearch database | History | Experimental | 6 +[es_objects](es_objects) | ElasticSearch Objects | Save selected objects into elasticsearch database | History | Experimental | +[grouped_orders](grouped_orders) | Grouped Orders | Expose api to create a grouped order book of bitshares markets | Market data | Experimental | +[market_history](market_history) | Market History | Save market history data | Market data | Stable | 5 +[snapshot](snapshot) | Snapshot | Get a json of all objects in blockchain at a specificed time or block | Debug | Stable | +[witness](witness) | Witness | Generate and sign blocks | Block producer | Stable | diff --git a/libraries/plugins/account_history/CMakeLists.txt b/libraries/plugins/account_history/CMakeLists.txt index 18fd613539..4af81abb12 100644 --- a/libraries/plugins/account_history/CMakeLists.txt +++ b/libraries/plugins/account_history/CMakeLists.txt @@ -19,3 +19,5 @@ install( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/account_history" ) + diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index bdd1fb230d..4697837faa 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -24,7 +24,7 @@ #include -#include +#include #include #include @@ -34,7 +34,6 @@ #include #include -#include #include namespace graphene { namespace account_history { @@ -64,6 +63,13 @@ class account_history_plugin_impl account_history_plugin& _self; flat_set _tracked_accounts; + bool _partial_operations = false; + primary_index< operation_history_index >* _oho_index; + uint64_t _max_ops_per_account = -1; + private: + /** add one history record, then check and remove the earliest history record */ + void add_account_history( const account_id_type account_id, const operation_history_id_type op_id ); + }; account_history_plugin_impl::~account_history_plugin_impl() @@ -75,80 +81,177 @@ void account_history_plugin_impl::update_account_histories( const signed_block& { graphene::chain::database& db = database(); const vector >& hist = db.get_applied_operations(); + bool is_first = true; + auto skip_oho_id = [&is_first,&db,this]() { + if( is_first && db._undo_db.enabled() ) // this ensures that the current id is rolled back on undo + { + db.remove( db.create( []( operation_history_object& obj) {} ) ); + is_first = false; + } + else + _oho_index->use_next_id(); + }; + for( const optional< operation_history_object >& o_op : hist ) { - // add to the operation history index - const auto& oho = db.create( [&]( operation_history_object& h ) - { - if( o_op.valid() ) - h = *o_op; - } ); + optional oho; + + auto create_oho = [&]() { + is_first = false; + return optional( db.create( [&]( operation_history_object& h ) + { + if( o_op.valid() ) + { + h.op = o_op->op; + h.result = o_op->result; + h.block_num = o_op->block_num; + h.trx_in_block = o_op->trx_in_block; + h.op_in_trx = o_op->op_in_trx; + h.virtual_op = o_op->virtual_op; + } + } ) ); + }; - if( !o_op.valid() ) + if( !o_op.valid() || ( _max_ops_per_account == 0 && _partial_operations ) ) { - ilog( "removing failed operation with ID: ${id}", ("id", oho.id) ); - db.remove( oho ); + // Note: the 2nd and 3rd checks above are for better performance, when the db is not clean, + // they will break consistency of account_stats.total_ops and removed_ops and most_recent_op + skip_oho_id(); continue; } + else if( !_partial_operations ) + // add to the operation history index + oho = create_oho(); const operation_history_object& op = *o_op; // get the set of accounts this operation applies to flat_set impacted; vector other; - operation_get_required_authorities( op.op, impacted, impacted, other ); + operation_get_required_authorities( op.op, impacted, impacted, other ); // fee_payer is added here if( op.op.which() == operation::tag< account_create_operation >::value ) - impacted.insert( oho.result.get() ); + impacted.insert( op.result.get() ); else - graphene::app::operation_get_impacted_accounts( op.op, impacted ); + graphene::chain::operation_get_impacted_accounts( op.op, impacted ); for( auto& a : other ) for( auto& item : a.account_auths ) impacted.insert( item.first ); + // be here, either _max_ops_per_account > 0, or _partial_operations == false, or both + // if _partial_operations == false, oho should have been created above + // so the only case should be checked here is: + // whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true + // for each operation this account applies to that is in the config link it into the history - if( _tracked_accounts.size() == 0 ) + if( _tracked_accounts.size() == 0 ) // tracking all accounts { - for( auto& account_id : impacted ) + // if tracking all accounts, when impacted is not empty (although it will always be), + // still need to create oho if _max_ops_per_account > 0 and _partial_operations == true + // so always need to create oho if not done + if (!impacted.empty() && !oho.valid()) { oho = create_oho(); } + + if( _max_ops_per_account > 0 ) { - // we don't do index_account_keys here anymore, because - // that indexing now happens in observers' post_evaluate() - - // add history - const auto& stats_obj = account_id(db).statistics(db); - const auto& ath = db.create( [&]( account_transaction_history_object& obj ){ - obj.operation_id = oho.id; - obj.account = account_id; - obj.sequence = stats_obj.total_ops+1; - obj.next = stats_obj.most_recent_op; - }); - db.modify( stats_obj, [&]( account_statistics_object& obj ){ - obj.most_recent_op = ath.id; - obj.total_ops = ath.sequence; - }); + // Note: the check above is for better performance, when the db is not clean, + // it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op, + // but it ensures it's safe to remove old entries in add_account_history(...) + for( auto& account_id : impacted ) + { + // we don't do index_account_keys here anymore, because + // that indexing now happens in observers' post_evaluate() + + // add history + add_account_history( account_id, oho->id ); + } } } - else + else // tracking a subset of accounts + { + // whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true ? + // the answer: only need to create oho if a tracked account is impacted and need to save history + + if( _max_ops_per_account > 0 ) + { + // Note: the check above is for better performance, when the db is not clean, + // it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op, + // but it ensures it's safe to remove old entries in add_account_history(...) + for( auto account_id : _tracked_accounts ) + { + if( impacted.find( account_id ) != impacted.end() ) + { + if (!oho.valid()) { oho = create_oho(); } + // add history + add_account_history( account_id, oho->id ); + } + } + } + } + if (_partial_operations && ! oho.valid()) + skip_oho_id(); + } +} + +void account_history_plugin_impl::add_account_history( const account_id_type account_id, const operation_history_id_type op_id ) +{ + graphene::chain::database& db = database(); + const auto& stats_obj = account_id(db).statistics(db); + // add new entry + const auto& ath = db.create( [&]( account_transaction_history_object& obj ){ + obj.operation_id = op_id; + obj.account = account_id; + obj.sequence = stats_obj.total_ops + 1; + obj.next = stats_obj.most_recent_op; + }); + db.modify( stats_obj, [&]( account_statistics_object& obj ){ + obj.most_recent_op = ath.id; + obj.total_ops = ath.sequence; + }); + // remove the earliest account history entry if too many + // _max_ops_per_account is guaranteed to be non-zero outside + if( stats_obj.total_ops - stats_obj.removed_ops > _max_ops_per_account ) + { + // look for the earliest entry + const auto& his_idx = db.get_index_type(); + const auto& by_seq_idx = his_idx.indices().get(); + auto itr = by_seq_idx.lower_bound( boost::make_tuple( account_id, 0 ) ); + // make sure don't remove the one just added + if( itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath.id ) { - for( auto account_id : _tracked_accounts ) + // if found, remove the entry, and adjust account stats object + const auto remove_op_id = itr->operation_id; + const auto itr_remove = itr; + ++itr; + db.remove( *itr_remove ); + db.modify( stats_obj, [&]( account_statistics_object& obj ){ + obj.removed_ops = obj.removed_ops + 1; + }); + // modify previous node's next pointer + // this should be always true, but just have a check here + if( itr != by_seq_idx.end() && itr->account == account_id ) + { + db.modify( *itr, [&]( account_transaction_history_object& obj ){ + obj.next = account_transaction_history_id_type(); + }); + } + // else need to modify the head pointer, but it shouldn't be true + + // remove the operation history entry (1.11.x) if configured and no reference left + if( _partial_operations ) { - if( impacted.find( account_id ) != impacted.end() ) + // check for references + const auto& by_opid_idx = his_idx.indices().get(); + if( by_opid_idx.find( remove_op_id ) == by_opid_idx.end() ) { - // add history - const auto& stats_obj = account_id(db).statistics(db); - const auto& ath = db.create( [&]( account_transaction_history_object& obj ){ - obj.operation_id = oho.id; - obj.next = stats_obj.most_recent_op; - }); - db.modify( stats_obj, [&]( account_statistics_object& obj ){ - obj.most_recent_op = ath.id; - }); + // if no reference, remove + db.remove( remove_op_id(db) ); } } } } } + } // end namespace detail @@ -177,6 +280,8 @@ void account_history_plugin::plugin_set_program_options( { cli.add_options() ("track-account", boost::program_options::value>()->composing()->multitoken(), "Account ID to track history for (may specify multiple times)") + ("partial-operations", boost::program_options::value(), "Keep only those operations in memory that are related to account history tracking") + ("max-ops-per-account", boost::program_options::value(), "Maximum number of operations per account will be kept in memory") ; cfg.add(cli); } @@ -184,10 +289,16 @@ void account_history_plugin::plugin_set_program_options( void account_history_plugin::plugin_initialize(const boost::program_options::variables_map& options) { database().applied_block.connect( [&]( const signed_block& b){ my->update_account_histories(b); } ); - database().add_index< primary_index< simple_index< operation_history_object > > >(); + my->_oho_index = database().add_index< primary_index< operation_history_index > >(); database().add_index< primary_index< account_transaction_history_index > >(); - LOAD_VALUE_SET(options, "tracked-accounts", my->_tracked_accounts, graphene::chain::account_id_type); + LOAD_VALUE_SET(options, "track-account", my->_tracked_accounts, graphene::chain::account_id_type); + if (options.count("partial-operations")) { + my->_partial_operations = options["partial-operations"].as(); + } + if (options.count("max-ops-per-account")) { + my->_max_ops_per_account = options["max-ops-per-account"].as(); + } } void account_history_plugin::plugin_startup() diff --git a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp index ef89c488f5..7bec37ddfe 100644 --- a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp +++ b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp @@ -47,13 +47,12 @@ namespace graphene { namespace account_history { // time. // #ifndef ACCOUNT_HISTORY_SPACE_ID -#define ACCOUNT_HISTORY_SPACE_ID 5 +#define ACCOUNT_HISTORY_SPACE_ID 4 #endif enum account_history_object_type { - key_account_object_type = 0, - bucket_object_type = 1 ///< used in market_history_plugin + key_account_object_type = 0 }; diff --git a/libraries/plugins/debug_witness/debug_api.cpp b/libraries/plugins/debug_witness/debug_api.cpp index 5c2d9a3720..43ffd6cd38 100644 --- a/libraries/plugins/debug_witness/debug_api.cpp +++ b/libraries/plugins/debug_witness/debug_api.cpp @@ -21,12 +21,11 @@ namespace detail { class debug_api_impl { public: - debug_api_impl( graphene::app::application& _app ); + explicit debug_api_impl( graphene::app::application& _app ); void debug_push_blocks( const std::string& src_filename, uint32_t count ); void debug_generate_blocks( const std::string& debug_key, uint32_t count ); void debug_update_object( const fc::variant_object& update ); - //void debug_save_db( std::string db_path ); void debug_stream_json_objects( const std::string& filename ); void debug_stream_json_objects_flush(); std::shared_ptr< graphene::debug_witness_plugin::debug_witness_plugin > get_plugin(); @@ -70,7 +69,6 @@ void debug_api_impl::debug_push_blocks( const std::string& src_filename, uint32_ } } ilog( "Completed loading block_database successfully" ); - return; } } @@ -92,7 +90,7 @@ void debug_api_impl::debug_generate_blocks( const std::string& debug_key, uint32 if( scheduled_key != debug_public_key ) { ilog( "Modified key for witness ${w}", ("w", scheduled_witness) ); - fc::mutable_variant_object update; + fc::limited_mutable_variant_object update( GRAPHENE_MAX_NESTED_OBJECTS ); update("_action", "update")("id", scheduled_witness)("signing_key", debug_public_key); db->debug_update( update ); } diff --git a/libraries/plugins/debug_witness/debug_witness.cpp b/libraries/plugins/debug_witness/debug_witness.cpp index 7bb5562dad..7268006d3b 100644 --- a/libraries/plugins/debug_witness/debug_witness.cpp +++ b/libraries/plugins/debug_witness/debug_witness.cpp @@ -25,11 +25,9 @@ #include #include -#include #include -#include #include #include @@ -48,7 +46,7 @@ void debug_witness_plugin::plugin_set_program_options( { auto default_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(std::string("nathan"))); command_line_options.add_options() - ("private-key", bpo::value>()->composing()->multitoken()-> + ("debug-private-key", bpo::value>()->composing()->multitoken()-> DEFAULT_VALUE_VECTOR(std::make_pair(chain::public_key_type(default_priv_key.get_public_key()), graphene::utilities::key_to_wif(default_priv_key))), "Tuple of [PublicKey, WIF private key] (may specify multiple times)"); config_file_options.add(command_line_options); @@ -64,12 +62,12 @@ void debug_witness_plugin::plugin_initialize(const boost::program_options::varia ilog("debug_witness plugin: plugin_initialize() begin"); _options = &options; - if( options.count("private-key") ) + if( options.count("debug-private-key") ) { - const std::vector key_id_to_wif_pair_strings = options["private-key"].as>(); + const std::vector key_id_to_wif_pair_strings = options["debug-private-key"].as>(); for (const std::string& key_id_to_wif_pair_string : key_id_to_wif_pair_strings) { - auto key_id_to_wif_pair = graphene::app::dejsonify >(key_id_to_wif_pair_string); + auto key_id_to_wif_pair = graphene::app::dejsonify >(key_id_to_wif_pair_string, GRAPHENE_MAX_NESTED_OBJECTS); idump((key_id_to_wif_pair)); fc::optional private_key = graphene::utilities::wif_to_key(key_id_to_wif_pair.second); if (!private_key) @@ -78,7 +76,7 @@ void debug_witness_plugin::plugin_initialize(const boost::program_options::varia // just here to ease the transition, can be removed soon try { - private_key = fc::variant(key_id_to_wif_pair.second).as(); + private_key = fc::variant( key_id_to_wif_pair.second, GRAPHENE_MAX_NESTED_OBJECTS ).as( GRAPHENE_MAX_NESTED_OBJECTS ); } catch (const fc::exception&) { @@ -99,13 +97,13 @@ void debug_witness_plugin::plugin_startup() // connect needed signals _applied_block_conn = db.applied_block.connect([this](const graphene::chain::signed_block& b){ on_applied_block(b); }); - _changed_objects_conn = db.changed_objects.connect([this](const std::vector& ids){ on_changed_objects(ids); }); - _removed_objects_conn = db.removed_objects.connect([this](const std::vector& objs){ on_removed_objects(objs); }); + _changed_objects_conn = db.changed_objects.connect([this](const std::vector& ids, const fc::flat_set& impacted_accounts){ on_changed_objects(ids, impacted_accounts); }); + _removed_objects_conn = db.removed_objects.connect([this](const std::vector& ids, const std::vector& objs, const fc::flat_set& impacted_accounts){ on_removed_objects(ids, objs, impacted_accounts); }); return; } -void debug_witness_plugin::on_changed_objects( const std::vector& ids ) +void debug_witness_plugin::on_changed_objects( const std::vector& ids, const fc::flat_set& impacted_accounts ) { if( _json_object_stream && (ids.size() > 0) ) { @@ -113,11 +111,7 @@ void debug_witness_plugin::on_changed_objects( const std::vectorto_variant() ) << '\n'; } @@ -125,9 +119,8 @@ void debug_witness_plugin::on_changed_objects( const std::vector objs ) +void debug_witness_plugin::on_removed_objects( const std::vector& ids, const std::vector objs, const fc::flat_set& impacted_accounts ) { - /* if( _json_object_stream ) { for( const graphene::db::object* obj : objs ) @@ -135,7 +128,6 @@ void debug_witness_plugin::on_removed_objects( const std::vectorid ) << "}\n"; } } - */ } void debug_witness_plugin::on_applied_block( const graphene::chain::signed_block& b ) diff --git a/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp b/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp index 0e5c173f46..22c71236b5 100644 --- a/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp +++ b/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp @@ -25,8 +25,10 @@ #include #include +#include #include +#include namespace graphene { namespace debug_witness_plugin { @@ -50,13 +52,13 @@ class debug_witness_plugin : public graphene::app::plugin { private: - void on_changed_objects( const std::vector& ids ); - void on_removed_objects( const std::vector objs ); + void on_changed_objects( const std::vector& ids, const fc::flat_set& impacted_accounts ); + void on_removed_objects( const std::vector& ids, const std::vector objs, const fc::flat_set& impacted_accounts ); void on_applied_block( const graphene::chain::signed_block& b ); boost::program_options::variables_map _options; - std::map _private_keys; + std::map _private_keys; std::shared_ptr< std::ofstream > _json_object_stream; boost::signals2::scoped_connection _applied_block_conn; diff --git a/libraries/plugins/delayed_node/delayed_node_plugin.cpp b/libraries/plugins/delayed_node/delayed_node_plugin.cpp index fb70cb6852..c56f8bb6b3 100644 --- a/libraries/plugins/delayed_node/delayed_node_plugin.cpp +++ b/libraries/plugins/delayed_node/delayed_node_plugin.cpp @@ -30,8 +30,6 @@ #include #include #include -#include - namespace graphene { namespace delayed_node { namespace bpo = boost::program_options; @@ -49,7 +47,7 @@ struct delayed_node_plugin_impl { } delayed_node_plugin::delayed_node_plugin() - : my(new detail::delayed_node_plugin_impl) + : my(nullptr) {} delayed_node_plugin::~delayed_node_plugin() @@ -58,14 +56,14 @@ delayed_node_plugin::~delayed_node_plugin() void delayed_node_plugin::plugin_set_program_options(bpo::options_description& cli, bpo::options_description& cfg) { cli.add_options() - ("trusted-node", boost::program_options::value()->required(), "RPC endpoint of a trusted validating node (required)") + ("trusted-node", boost::program_options::value(), "RPC endpoint of a trusted validating node (required for delayed_node)") ; cfg.add(cli); } void delayed_node_plugin::connect() { - my->client_connection = std::make_shared(*my->client.connect(my->remote_endpoint)); + my->client_connection = std::make_shared(*my->client.connect(my->remote_endpoint), GRAPHENE_NET_MAX_NESTED_OBJECTS); my->database_api = my->client_connection->get_remote_api(0); my->client_connection_closed = my->client_connection->closed.connect([this] { connection_failed(); @@ -74,6 +72,8 @@ void delayed_node_plugin::connect() void delayed_node_plugin::plugin_initialize(const boost::program_options::variables_map& options) { + FC_ASSERT(options.count("trusted-node") > 0); + my = std::unique_ptr{ new detail::delayed_node_plugin_impl() }; my->remote_endpoint = "ws://" + options.at("trusted-node").as(); } @@ -101,8 +101,10 @@ void delayed_node_plugin::sync_with_trusted_node() while( remote_dpo.last_irreversible_block_num > db.head_block_num() ) { fc::optional block = my->database_api->get_block( db.head_block_num()+1 ); + // TODO: during sync, decouple requesting blocks from preprocessing + applying them FC_ASSERT(block, "Trusted node claims it has blocks it doesn't actually have."); ilog("Pushing block #${n}", ("n", block->block_num())); + db.precompute_parallel( *block, graphene::chain::database::skip_nothing ).wait(); db.push_block(*block); synced_blocks++; } @@ -142,7 +144,7 @@ void delayed_node_plugin::plugin_startup() connect(); my->database_api->set_block_applied_callback([this]( const fc::variant& block_id ) { - fc::from_variant( block_id, my->last_received_remote_head ); + fc::from_variant( block_id, my->last_received_remote_head, GRAPHENE_MAX_NESTED_OBJECTS ); } ); return; } diff --git a/libraries/plugins/elasticsearch/CMakeLists.txt b/libraries/plugins/elasticsearch/CMakeLists.txt new file mode 100644 index 0000000000..ce9c7c1d1d --- /dev/null +++ b/libraries/plugins/elasticsearch/CMakeLists.txt @@ -0,0 +1,29 @@ +file(GLOB HEADERS "include/graphene/elasticsearch/*.hpp") + +add_library( graphene_elasticsearch + elasticsearch_plugin.cpp + ) +find_package(CURL REQUIRED) +include_directories(${CURL_INCLUDE_DIRS}) +if(MSVC) + set_source_files_properties(elasticsearch_plugin.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) +if(CURL_STATICLIB) + SET_TARGET_PROPERTIES(graphene_elasticsearch PROPERTIES + COMPILE_DEFINITIONS "CURL_STATICLIB") +endif(CURL_STATICLIB) +target_link_libraries( graphene_elasticsearch graphene_chain graphene_app ${CURL_LIBRARIES} ) +target_include_directories( graphene_elasticsearch + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" + PUBLIC "${CURL_INCLUDE_DIR}" ) + + +install( TARGETS + graphene_elasticsearch + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/elasticsearch" ) + diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp new file mode 100644 index 0000000000..c2cfcb9129 --- /dev/null +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +namespace graphene { namespace elasticsearch { + +namespace detail +{ + +class elasticsearch_plugin_impl +{ + public: + elasticsearch_plugin_impl(elasticsearch_plugin& _plugin) + : _self( _plugin ) + { curl = curl_easy_init(); } + virtual ~elasticsearch_plugin_impl(); + + bool update_account_histories( const signed_block& b ); + + graphene::chain::database& database() + { + return _self.database(); + } + + elasticsearch_plugin& _self; + primary_index< operation_history_index >* _oho_index; + + std::string _elasticsearch_node_url = "http://localhost:9200/"; + uint32_t _elasticsearch_bulk_replay = 10000; + uint32_t _elasticsearch_bulk_sync = 100; + bool _elasticsearch_visitor = false; + std::string _elasticsearch_basic_auth = ""; + std::string _elasticsearch_index_prefix = "bitshares-"; + bool _elasticsearch_operation_object = false; + uint32_t _elasticsearch_start_es_after_block = 0; + CURL *curl; // curl handler + vector bulk_lines; // vector of op lines + vector prepare; + + graphene::utilities::ES es; + uint32_t limit_documents; + int16_t op_type; + operation_history_struct os; + block_struct bs; + visitor_struct vs; + bulk_struct bulk_line_struct; + std::string bulk_line; + std::string index_name; + bool is_sync = false; + private: + bool add_elasticsearch( const account_id_type account_id, const optional& oho, const uint32_t block_number ); + const account_transaction_history_object& addNewEntry(const account_statistics_object& stats_obj, + const account_id_type& account_id, + const optional & oho); + const account_statistics_object& getStatsObject(const account_id_type& account_id); + void growStats(const account_statistics_object& stats_obj, const account_transaction_history_object& ath); + void getOperationType(const optional & oho); + void doOperationHistory(const optional & oho); + void doBlock(uint32_t trx_in_block, const signed_block& b); + void doVisitor(const optional & oho); + void checkState(const fc::time_point_sec& block_time); + void cleanObjects(const account_transaction_history_id_type& ath, const account_id_type& account_id); + void createBulkLine(const account_transaction_history_object& ath); + void prepareBulk(const account_transaction_history_id_type& ath_id); + void populateESstruct(); +}; + +elasticsearch_plugin_impl::~elasticsearch_plugin_impl() +{ + return; +} + +bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b ) +{ + checkState(b.timestamp); + index_name = graphene::utilities::generateIndexName(b.timestamp, _elasticsearch_index_prefix); + + graphene::chain::database& db = database(); + const vector >& hist = db.get_applied_operations(); + bool is_first = true; + auto skip_oho_id = [&is_first,&db,this]() { + if( is_first && db._undo_db.enabled() ) // this ensures that the current id is rolled back on undo + { + db.remove( db.create( []( operation_history_object& obj) {} ) ); + is_first = false; + } + else + _oho_index->use_next_id(); + }; + for( const optional< operation_history_object >& o_op : hist ) { + optional oho; + + auto create_oho = [&]() { + is_first = false; + return optional( + db.create([&](operation_history_object &h) { + if (o_op.valid()) + { + h.op = o_op->op; + h.result = o_op->result; + h.block_num = o_op->block_num; + h.trx_in_block = o_op->trx_in_block; + h.op_in_trx = o_op->op_in_trx; + h.virtual_op = o_op->virtual_op; + } + })); + }; + + if( !o_op.valid() ) { + skip_oho_id(); + continue; + } + oho = create_oho(); + + // populate what we can before impacted loop + getOperationType(oho); + doOperationHistory(oho); + doBlock(oho->trx_in_block, b); + if(_elasticsearch_visitor) + doVisitor(oho); + + const operation_history_object& op = *o_op; + + // get the set of accounts this operation applies to + flat_set impacted; + vector other; + operation_get_required_authorities( op.op, impacted, impacted, other ); // fee_payer is added here + + if( op.op.which() == operation::tag< account_create_operation >::value ) + impacted.insert( op.result.get() ); + else + graphene::chain::operation_get_impacted_accounts( op.op, impacted ); + + for( auto& a : other ) + for( auto& item : a.account_auths ) + impacted.insert( item.first ); + + for( auto& account_id : impacted ) + { + if(!add_elasticsearch( account_id, oho, b.block_num() )) + return false; + } + } + // we send bulk at end of block when we are in sync for better real time client experience + if(is_sync) + { + populateESstruct(); + if(es.bulk_lines.size() > 0) + { + prepare.clear(); + if(!graphene::utilities::SendBulk(std::move(es))) + return false; + else + bulk_lines.clear(); + } + } + + if(bulk_lines.size() != limit_documents) + bulk_lines.reserve(limit_documents); + + return true; +} + +void elasticsearch_plugin_impl::checkState(const fc::time_point_sec& block_time) +{ + if((fc::time_point::now() - block_time) < fc::seconds(30)) + { + limit_documents = _elasticsearch_bulk_sync; + is_sync = true; + } + else + { + limit_documents = _elasticsearch_bulk_replay; + is_sync = false; + } +} + +void elasticsearch_plugin_impl::getOperationType(const optional & oho) +{ + if (!oho->id.is_null()) + op_type = oho->op.which(); +} + +void elasticsearch_plugin_impl::doOperationHistory(const optional & oho) +{ + os.trx_in_block = oho->trx_in_block; + os.op_in_trx = oho->op_in_trx; + os.operation_result = fc::json::to_string(oho->result); + os.virtual_op = oho->virtual_op; + + if(_elasticsearch_operation_object) { + oho->op.visit(fc::from_static_variant(os.op_object, FC_PACK_MAX_DEPTH)); + adaptor_struct adaptor; + os.op_object = adaptor.adapt(os.op_object.get_object()); + } + else + os.op = fc::json::to_string(oho->op); + +} + +void elasticsearch_plugin_impl::doBlock(uint32_t trx_in_block, const signed_block& b) +{ + std::string trx_id = ""; + if(trx_in_block < b.transactions.size()) + trx_id = b.transactions[trx_in_block].id().str(); + bs.block_num = b.block_num(); + bs.block_time = b.timestamp; + bs.trx_id = trx_id; +} + +void elasticsearch_plugin_impl::doVisitor(const optional & oho) +{ + graphene::chain::database& db = database(); + + operation_visitor o_v; + oho->op.visit(o_v); + + auto fee_asset = o_v.fee_asset(db); + vs.fee_data.asset = o_v.fee_asset; + vs.fee_data.asset_name = fee_asset.symbol; + vs.fee_data.amount = o_v.fee_amount; + vs.fee_data.amount_units = (o_v.fee_amount.value)/(double)asset::scaled_precision(fee_asset.precision).value; + + auto transfer_asset = o_v.transfer_asset_id(db); + vs.transfer_data.asset = o_v.transfer_asset_id; + vs.transfer_data.asset_name = transfer_asset.symbol; + vs.transfer_data.amount = o_v.transfer_amount; + vs.transfer_data.amount_units = (o_v.transfer_amount.value)/(double)asset::scaled_precision(transfer_asset.precision).value; + vs.transfer_data.from = o_v.transfer_from; + vs.transfer_data.to = o_v.transfer_to; + + auto fill_pays_asset = o_v.fill_pays_asset_id(db); + auto fill_receives_asset = o_v.fill_receives_asset_id(db); + vs.fill_data.order_id = o_v.fill_order_id; + vs.fill_data.account_id = o_v.fill_account_id; + vs.fill_data.pays_asset_id = o_v.fill_pays_asset_id; + vs.fill_data.pays_asset_name = fill_pays_asset.symbol; + vs.fill_data.pays_amount = o_v.fill_pays_amount; + vs.fill_data.pays_amount_units = (o_v.fill_pays_amount.value)/(double)asset::scaled_precision(fill_pays_asset.precision).value; + vs.fill_data.receives_asset_id = o_v.fill_receives_asset_id; + vs.fill_data.receives_asset_name = fill_receives_asset.symbol; + vs.fill_data.receives_amount = o_v.fill_receives_amount; + vs.fill_data.receives_amount_units = (o_v.fill_receives_amount.value)/(double)asset::scaled_precision(fill_receives_asset.precision).value; + + auto fill_price = (o_v.fill_receives_amount.value/(double)asset::scaled_precision(fill_receives_asset.precision).value) / + (o_v.fill_pays_amount.value/(double)asset::scaled_precision(fill_pays_asset.precision).value); + vs.fill_data.fill_price_units = fill_price; + vs.fill_data.fill_price = o_v.fill_fill_price; + vs.fill_data.is_maker = o_v.fill_is_maker; +} + +bool elasticsearch_plugin_impl::add_elasticsearch( const account_id_type account_id, + const optional & oho, + const uint32_t block_number) +{ + const auto &stats_obj = getStatsObject(account_id); + const auto &ath = addNewEntry(stats_obj, account_id, oho); + growStats(stats_obj, ath); + if(block_number > _elasticsearch_start_es_after_block) { + createBulkLine(ath); + prepareBulk(ath.id); + } + cleanObjects(ath.id, account_id); + + if (curl && bulk_lines.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech + prepare.clear(); + populateESstruct(); + if(!graphene::utilities::SendBulk(std::move(es))) + return false; + else + bulk_lines.clear(); + } + + return true; +} + +const account_statistics_object& elasticsearch_plugin_impl::getStatsObject(const account_id_type& account_id) +{ + graphene::chain::database& db = database(); + const auto &stats_obj = db.get_account_stats_by_owner(account_id); + + return stats_obj; +} + +const account_transaction_history_object& elasticsearch_plugin_impl::addNewEntry(const account_statistics_object& stats_obj, + const account_id_type& account_id, + const optional & oho) +{ + graphene::chain::database& db = database(); + const auto &ath = db.create([&](account_transaction_history_object &obj) { + obj.operation_id = oho->id; + obj.account = account_id; + obj.sequence = stats_obj.total_ops + 1; + obj.next = stats_obj.most_recent_op; + }); + + return ath; +} + +void elasticsearch_plugin_impl::growStats(const account_statistics_object& stats_obj, + const account_transaction_history_object& ath) +{ + graphene::chain::database& db = database(); + db.modify(stats_obj, [&](account_statistics_object &obj) { + obj.most_recent_op = ath.id; + obj.total_ops = ath.sequence; + }); +} + +void elasticsearch_plugin_impl::createBulkLine(const account_transaction_history_object& ath) +{ + bulk_line_struct.account_history = ath; + bulk_line_struct.operation_history = os; + bulk_line_struct.operation_type = op_type; + bulk_line_struct.operation_id_num = ath.operation_id.instance.value; + bulk_line_struct.block_data = bs; + if(_elasticsearch_visitor) + bulk_line_struct.additional_data = vs; + bulk_line = fc::json::to_string(bulk_line_struct, fc::json::legacy_generator); +} + +void elasticsearch_plugin_impl::prepareBulk(const account_transaction_history_id_type& ath_id) +{ + const std::string _id = fc::json::to_string(ath_id); + fc::mutable_variant_object bulk_header; + bulk_header["_index"] = index_name; + bulk_header["_type"] = "data"; + bulk_header["_id"] = fc::to_string(ath_id.space_id) + "." + fc::to_string(ath_id.type_id) + "." + + fc::to_string(ath_id.instance.value); + prepare = graphene::utilities::createBulk(bulk_header, std::move(bulk_line)); + std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk_lines)); + prepare.clear(); +} + +void elasticsearch_plugin_impl::cleanObjects(const account_transaction_history_id_type& ath_id, const account_id_type& account_id) +{ + graphene::chain::database& db = database(); + // remove everything except current object from ath + const auto &his_idx = db.get_index_type(); + const auto &by_seq_idx = his_idx.indices().get(); + auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0)); + if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath_id) { + // if found, remove the entry + const auto remove_op_id = itr->operation_id; + const auto itr_remove = itr; + ++itr; + db.remove( *itr_remove ); + // modify previous node's next pointer + // this should be always true, but just have a check here + if( itr != by_seq_idx.end() && itr->account == account_id ) + { + db.modify( *itr, [&]( account_transaction_history_object& obj ){ + obj.next = account_transaction_history_id_type(); + }); + } + // do the same on oho + const auto &by_opid_idx = his_idx.indices().get(); + if (by_opid_idx.find(remove_op_id) == by_opid_idx.end()) { + db.remove(remove_op_id(db)); + } + } +} + +void elasticsearch_plugin_impl::populateESstruct() +{ + es.curl = curl; + es.bulk_lines = std::move(bulk_lines); + es.elasticsearch_url = _elasticsearch_node_url; + es.auth = _elasticsearch_basic_auth; + es.index_prefix = _elasticsearch_index_prefix; + es.endpoint = ""; + es.query = ""; +} + +} // end namespace detail + +elasticsearch_plugin::elasticsearch_plugin() : + my( new detail::elasticsearch_plugin_impl(*this) ) +{ +} + +elasticsearch_plugin::~elasticsearch_plugin() +{ +} + +std::string elasticsearch_plugin::plugin_name()const +{ + return "elasticsearch"; +} +std::string elasticsearch_plugin::plugin_description()const +{ + return "Stores account history data in elasticsearch database(EXPERIMENTAL)."; +} + +void elasticsearch_plugin::plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg + ) +{ + cli.add_options() + ("elasticsearch-node-url", boost::program_options::value(), "Elastic Search database node url(http://localhost:9200/)") + ("elasticsearch-bulk-replay", boost::program_options::value(), "Number of bulk documents to index on replay(10000)") + ("elasticsearch-bulk-sync", boost::program_options::value(), "Number of bulk documents to index on a syncronied chain(100)") + ("elasticsearch-visitor", boost::program_options::value(), "Use visitor to index additional data(slows down the replay(false))") + ("elasticsearch-basic-auth", boost::program_options::value(), "Pass basic auth to elasticsearch database('')") + ("elasticsearch-index-prefix", boost::program_options::value(), "Add a prefix to the index(bitshares-)") + ("elasticsearch-operation-object", boost::program_options::value(), "Save operation as object(false)") + ("elasticsearch-start-es-after-block", boost::program_options::value(), "Start doing ES job after block(0)") + ; + cfg.add(cli); +} + +void elasticsearch_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + database().applied_block.connect( [&]( const signed_block& b) { + if (!my->update_account_histories(b)) + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error populating ES database, we are going to keep trying."); + } ); + + my->_oho_index = database().add_index< primary_index< operation_history_index > >(); + database().add_index< primary_index< account_transaction_history_index > >(); + + if (options.count("elasticsearch-node-url")) { + my->_elasticsearch_node_url = options["elasticsearch-node-url"].as(); + } + if (options.count("elasticsearch-bulk-replay")) { + my->_elasticsearch_bulk_replay = options["elasticsearch-bulk-replay"].as(); + } + if (options.count("elasticsearch-bulk-sync")) { + my->_elasticsearch_bulk_sync = options["elasticsearch-bulk-sync"].as(); + } + if (options.count("elasticsearch-visitor")) { + my->_elasticsearch_visitor = options["elasticsearch-visitor"].as(); + } + if (options.count("elasticsearch-basic-auth")) { + my->_elasticsearch_basic_auth = options["elasticsearch-basic-auth"].as(); + } + if (options.count("elasticsearch-index-prefix")) { + my->_elasticsearch_index_prefix = options["elasticsearch-index-prefix"].as(); + } + if (options.count("elasticsearch-operation-object")) { + my->_elasticsearch_operation_object = options["elasticsearch-operation-object"].as(); + } + if (options.count("elasticsearch-start-es-after-block")) { + my->_elasticsearch_start_es_after_block = options["elasticsearch-start-es-after-block"].as(); + } +} + +void elasticsearch_plugin::plugin_startup() +{ + graphene::utilities::ES es; + es.curl = my->curl; + es.elasticsearch_url = my->_elasticsearch_node_url; + es.auth = my->_elasticsearch_basic_auth; + + if(!graphene::utilities::checkES(es)) + FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_elasticsearch_node_url)); + ilog("elasticsearch ACCOUNT HISTORY: plugin_startup() begin"); +} + +} } diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp new file mode 100644 index 0000000000..a5ee7417c7 --- /dev/null +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include +#include + +namespace graphene { namespace elasticsearch { + using namespace chain; + +// +// Plugins should #define their SPACE_ID's so plugins with +// conflicting SPACE_ID assignments can be compiled into the +// same binary (by simply re-assigning some of the conflicting #defined +// SPACE_ID's in a build script). +// +// Assignment of SPACE_ID's cannot be done at run-time because +// various template automagic depends on them being known at compile +// time. +// +#ifndef ELASTICSEARCH_SPACE_ID +#define ELASTICSEARCH_SPACE_ID 6 +#endif + +namespace detail +{ + class elasticsearch_plugin_impl; +} + +class elasticsearch_plugin : public graphene::app::plugin +{ + public: + elasticsearch_plugin(); + virtual ~elasticsearch_plugin(); + + std::string plugin_name()const override; + std::string plugin_description()const override; + virtual void plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg) override; + virtual void plugin_initialize(const boost::program_options::variables_map& options) override; + virtual void plugin_startup() override; + + friend class detail::elasticsearch_plugin_impl; + std::unique_ptr my; +}; + +struct operation_visitor +{ + typedef void result_type; + + share_type fee_amount; + asset_id_type fee_asset; + + asset_id_type transfer_asset_id; + share_type transfer_amount; + account_id_type transfer_from; + account_id_type transfer_to; + + void operator()( const graphene::chain::transfer_operation& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + + transfer_asset_id = o.amount.asset_id; + transfer_amount = o.amount.amount; + transfer_from = o.from; + transfer_to = o.to; + } + + object_id_type fill_order_id; + account_id_type fill_account_id; + asset_id_type fill_pays_asset_id; + share_type fill_pays_amount; + asset_id_type fill_receives_asset_id; + share_type fill_receives_amount; + double fill_fill_price; + bool fill_is_maker; + + void operator()( const graphene::chain::fill_order_operation& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + + fill_order_id = o.order_id; + fill_account_id = o.account_id; + fill_pays_asset_id = o.pays.asset_id; + fill_pays_amount = o.pays.amount; + fill_receives_asset_id = o.receives.asset_id; + fill_receives_amount = o.receives.amount; + fill_fill_price = o.fill_price.to_real(); + fill_is_maker = o.is_maker; + } + + template + void operator()( const T& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + } +}; + +struct operation_history_struct { + int trx_in_block; + int op_in_trx; + std::string operation_result; + int virtual_op; + std::string op; + variant op_object; +}; + +struct block_struct { + int block_num; + fc::time_point_sec block_time; + std::string trx_id; +}; + +struct fee_struct { + asset_id_type asset; + std::string asset_name; + share_type amount; + double amount_units; +}; + +struct transfer_struct { + asset_id_type asset; + std::string asset_name; + share_type amount; + double amount_units; + account_id_type from; + account_id_type to; +}; + +struct fill_struct { + object_id_type order_id; + account_id_type account_id; + asset_id_type pays_asset_id; + std::string pays_asset_name; + share_type pays_amount; + double pays_amount_units; + asset_id_type receives_asset_id; + std::string receives_asset_name; + share_type receives_amount; + double receives_amount_units; + double fill_price; + double fill_price_units; + bool is_maker; +}; + +struct visitor_struct { + fee_struct fee_data; + transfer_struct transfer_data; + fill_struct fill_data; +}; + +struct bulk_struct { + account_transaction_history_object account_history; + operation_history_struct operation_history; + int operation_type; + int operation_id_num; + block_struct block_data; + optional additional_data; +}; + +struct adaptor_struct { + variant adapt(const variant_object& op) + { + fc::mutable_variant_object o(op); + vector keys_to_rename; + for (auto i = o.begin(); i != o.end(); ++i) + { + auto& element = (*i).value(); + if (element.is_object()) + { + const string& name = (*i).key(); + auto& vo = element.get_object(); + if (vo.contains(name.c_str())) + keys_to_rename.emplace_back(name); + element = adapt(vo); + } + else if (element.is_array()) + adapt(element.get_array()); + } + for (const auto& i : keys_to_rename) + { + string new_name = i + "_"; + o[new_name] = variant(o[i]); + o.erase(i); + } + + if (o.find("memo") != o.end()) + { + auto& memo = o["memo"]; + if (memo.is_string()) + { + o["memo_"] = o["memo"]; + o.erase("memo"); + } + else if (memo.is_object()) + { + fc::mutable_variant_object tmp(memo.get_object()); + if (tmp.find("nonce") != tmp.end()) + { + tmp["nonce"] = tmp["nonce"].as_string(); + o["memo"] = tmp; + } + } + } + if (o.find("new_parameters") != o.end()) + { + auto& tmp = o["new_parameters"]; + if (tmp.is_object()) + { + fc::mutable_variant_object tmp2(tmp.get_object()); + if (tmp2.find("current_fees") != tmp2.end()) + { + tmp2.erase("current_fees"); + o["new_parameters"] = tmp2; + } + } + } + if (o.find("owner") != o.end() && o["owner"].is_string()) + { + o["owner_"] = o["owner"].as_string(); + o.erase("owner"); + } + if (o.find("proposed_ops") != o.end()) + { + o["proposed_ops"] = fc::json::to_string(o["proposed_ops"]); + } + if (o.find("initializer") != o.end()) + { + o["initializer"] = fc::json::to_string(o["initializer"]); + } + if (o.find("policy") != o.end()) + { + o["policy"] = fc::json::to_string(o["policy"]); + } + if (o.find("predicates") != o.end()) + { + o["predicates"] = fc::json::to_string(o["predicates"]); + } + if (o.find("active_special_authority") != o.end()) + { + o["active_special_authority"] = fc::json::to_string(o["active_special_authority"]); + } + if (o.find("owner_special_authority") != o.end()) + { + o["owner_special_authority"] = fc::json::to_string(o["owner_special_authority"]); + } + + + variant v; + fc::to_variant(o, v, FC_PACK_MAX_DEPTH); + return v; + } + + void adapt(fc::variants& v) + { + for (auto& array_element : v) + { + if (array_element.is_object()) + array_element = adapt(array_element.get_object()); + else if (array_element.is_array()) + adapt(array_element.get_array()); + else + array_element = array_element.as_string(); + } + } +}; + +} } //graphene::elasticsearch + +FC_REFLECT( graphene::elasticsearch::operation_history_struct, (trx_in_block)(op_in_trx)(operation_result)(virtual_op)(op)(op_object) ) +FC_REFLECT( graphene::elasticsearch::block_struct, (block_num)(block_time)(trx_id) ) +FC_REFLECT( graphene::elasticsearch::fee_struct, (asset)(asset_name)(amount)(amount_units) ) +FC_REFLECT( graphene::elasticsearch::transfer_struct, (asset)(asset_name)(amount)(amount_units)(from)(to) ) +FC_REFLECT( graphene::elasticsearch::fill_struct, (order_id)(account_id)(pays_asset_id)(pays_asset_name)(pays_amount)(pays_amount_units) + (receives_asset_id)(receives_asset_name)(receives_amount)(receives_amount_units)(fill_price) + (fill_price_units)(is_maker)) +FC_REFLECT( graphene::elasticsearch::visitor_struct, (fee_data)(transfer_data)(fill_data) ) +FC_REFLECT( graphene::elasticsearch::bulk_struct, (account_history)(operation_history)(operation_type)(operation_id_num)(block_data)(additional_data) ) diff --git a/libraries/plugins/es_objects/CMakeLists.txt b/libraries/plugins/es_objects/CMakeLists.txt new file mode 100644 index 0000000000..42d18a6580 --- /dev/null +++ b/libraries/plugins/es_objects/CMakeLists.txt @@ -0,0 +1,29 @@ +file(GLOB HEADERS "include/graphene/es_objects/*.hpp") + +add_library( graphene_es_objects + es_objects.cpp + ) +find_package(CURL REQUIRED) +include_directories(${CURL_INCLUDE_DIRS}) +if(CURL_STATICLIB) + SET_TARGET_PROPERTIES(graphene_es_objects PROPERTIES + COMPILE_DEFINITIONS "CURL_STATICLIB") +endif(CURL_STATICLIB) +if(MSVC) + set_source_files_properties(es_objects.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) + +target_link_libraries( graphene_es_objects graphene_chain graphene_app ${CURL_LIBRARIES} ) +target_include_directories( graphene_es_objects + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + + +install( TARGETS + graphene_es_objects + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/es_objects" ) + diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp new file mode 100644 index 0000000000..21ae19094e --- /dev/null +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace graphene { namespace es_objects { + +namespace detail +{ + +class es_objects_plugin_impl +{ + public: + es_objects_plugin_impl(es_objects_plugin& _plugin) + : _self( _plugin ) + { curl = curl_easy_init(); } + virtual ~es_objects_plugin_impl(); + + bool index_database( const vector& ids, std::string action); + void remove_from_database( object_id_type id, std::string index); + + es_objects_plugin& _self; + std::string _es_objects_elasticsearch_url = "http://localhost:9200/"; + std::string _es_objects_auth = ""; + uint32_t _es_objects_bulk_replay = 10000; + uint32_t _es_objects_bulk_sync = 100; + bool _es_objects_proposals = true; + bool _es_objects_accounts = true; + bool _es_objects_assets = true; + bool _es_objects_balances = true; + bool _es_objects_limit_orders = true; + bool _es_objects_asset_bitasset = true; + std::string _es_objects_index_prefix = "objects-"; + uint32_t _es_objects_start_es_after_block = 0; + CURL *curl; // curl handler + vector bulk; + vector prepare; + + bool _es_objects_keep_only_current = true; + + uint32_t block_number; + fc::time_point_sec block_time; + + private: + template + void prepareTemplate(T blockchain_object, string index_name); +}; + +bool es_objects_plugin_impl::index_database( const vector& ids, std::string action) +{ + graphene::chain::database &db = _self.database(); + + block_time = db.head_block_time(); + block_number = db.head_block_num(); + + if(block_number > _es_objects_start_es_after_block) { + + // check if we are in replay or in sync and change number of bulk documents accordingly + uint32_t limit_documents = 0; + if ((fc::time_point::now() - block_time) < fc::seconds(30)) + limit_documents = _es_objects_bulk_sync; + else + limit_documents = _es_objects_bulk_replay; + + + for (auto const &value: ids) { + if (value.is() && _es_objects_proposals) { + auto obj = db.find_object(value); + auto p = static_cast(obj); + if (p != nullptr) { + if (action == "delete") + remove_from_database(p->id, "proposal"); + else + prepareTemplate(*p, "proposal"); + } + } else if (value.is() && _es_objects_accounts) { + auto obj = db.find_object(value); + auto a = static_cast(obj); + if (a != nullptr) { + if (action == "delete") + remove_from_database(a->id, "account"); + else + prepareTemplate(*a, "account"); + } + } else if (value.is() && _es_objects_assets) { + auto obj = db.find_object(value); + auto a = static_cast(obj); + if (a != nullptr) { + if (action == "delete") + remove_from_database(a->id, "asset"); + else + prepareTemplate(*a, "asset"); + } + } else if (value.is() && _es_objects_balances) { + auto obj = db.find_object(value); + auto b = static_cast(obj); + if (b != nullptr) { + if (action == "delete") + remove_from_database(b->id, "balance"); + else + prepareTemplate(*b, "balance"); + } + } else if (value.is() && _es_objects_limit_orders) { + auto obj = db.find_object(value); + auto l = static_cast(obj); + if (l != nullptr) { + if (action == "delete") + remove_from_database(l->id, "limitorder"); + else + prepareTemplate(*l, "limitorder"); + } + } else if (value.is() && _es_objects_asset_bitasset) { + auto obj = db.find_object(value); + auto ba = static_cast(obj); + if (ba != nullptr) { + if (action == "delete") + remove_from_database(ba->id, "bitasset"); + else + prepareTemplate(*ba, "bitasset"); + } + } + } + + if (curl && bulk.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech + + graphene::utilities::ES es; + es.curl = curl; + es.bulk_lines = bulk; + es.elasticsearch_url = _es_objects_elasticsearch_url; + es.auth = _es_objects_auth; + + if (!graphene::utilities::SendBulk(std::move(es))) + return false; + else + bulk.clear(); + } + } + + return true; +} + +void es_objects_plugin_impl::remove_from_database( object_id_type id, std::string index) +{ + if(_es_objects_keep_only_current) + { + fc::mutable_variant_object delete_line; + delete_line["_id"] = string(id); + delete_line["_index"] = _es_objects_index_prefix + index; + delete_line["_type"] = "data"; + fc::mutable_variant_object final_delete_line; + final_delete_line["delete"] = delete_line; + prepare.push_back(fc::json::to_string(final_delete_line)); + std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); + prepare.clear(); + } +} + +template +void es_objects_plugin_impl::prepareTemplate(T blockchain_object, string index_name) +{ + fc::mutable_variant_object bulk_header; + bulk_header["_index"] = _es_objects_index_prefix + index_name; + bulk_header["_type"] = "data"; + if(_es_objects_keep_only_current) + { + bulk_header["_id"] = string(blockchain_object.id); + } + + adaptor_struct adaptor; + fc::variant blockchain_object_variant; + fc::to_variant( blockchain_object, blockchain_object_variant, GRAPHENE_NET_MAX_NESTED_OBJECTS ); + fc::mutable_variant_object o = adaptor.adapt(blockchain_object_variant.get_object()); + + o["object_id"] = string(blockchain_object.id); + o["block_time"] = block_time; + o["block_number"] = block_number; + + string data = fc::json::to_string(o, fc::json::legacy_generator); + + prepare = graphene::utilities::createBulk(bulk_header, std::move(data)); + std::move(prepare.begin(), prepare.end(), std::back_inserter(bulk)); + prepare.clear(); +} + +es_objects_plugin_impl::~es_objects_plugin_impl() +{ + return; +} + +} // end namespace detail + +es_objects_plugin::es_objects_plugin() : + my( new detail::es_objects_plugin_impl(*this) ) +{ +} + +es_objects_plugin::~es_objects_plugin() +{ +} + +std::string es_objects_plugin::plugin_name()const +{ + return "es_objects"; +} +std::string es_objects_plugin::plugin_description()const +{ + return "Stores blockchain objects in ES database. Experimental."; +} + +void es_objects_plugin::plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg + ) +{ + cli.add_options() + ("es-objects-elasticsearch-url", boost::program_options::value(), "Elasticsearch node url(http://localhost:9200/)") + ("es-objects-auth", boost::program_options::value(), "Basic auth username:password('')") + ("es-objects-bulk-replay", boost::program_options::value(), "Number of bulk documents to index on replay(10000)") + ("es-objects-bulk-sync", boost::program_options::value(), "Number of bulk documents to index on a synchronized chain(100)") + ("es-objects-proposals", boost::program_options::value(), "Store proposal objects(true)") + ("es-objects-accounts", boost::program_options::value(), "Store account objects(true)") + ("es-objects-assets", boost::program_options::value(), "Store asset objects(true)") + ("es-objects-balances", boost::program_options::value(), "Store balances objects(true)") + ("es-objects-limit-orders", boost::program_options::value(), "Store limit order objects(true)") + ("es-objects-asset-bitasset", boost::program_options::value(), "Store feed data(true)") + ("es-objects-index-prefix", boost::program_options::value(), "Add a prefix to the index(objects-)") + ("es-objects-keep-only-current", boost::program_options::value(), "Keep only current state of the objects(true)") + ("es-objects-start-es-after-block", boost::program_options::value(), "Start doing ES job after block(0)") + ; + cfg.add(cli); +} + +void es_objects_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + database().new_objects.connect([&]( const vector& ids, const flat_set& impacted_accounts ) { + if(!my->index_database(ids, "create")) + { + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error creating object from ES database, we are going to keep trying."); + } + }); + database().changed_objects.connect([&]( const vector& ids, const flat_set& impacted_accounts ) { + if(!my->index_database(ids, "update")) + { + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error updating object from ES database, we are going to keep trying."); + } + }); + database().removed_objects.connect([this](const vector& ids, const vector& objs, const flat_set& impacted_accounts) { + if(!my->index_database(ids, "delete")) + { + FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Error deleting object from ES database, we are going to keep trying."); + } + }); + + + if (options.count("es-objects-elasticsearch-url")) { + my->_es_objects_elasticsearch_url = options["es-objects-elasticsearch-url"].as(); + } + if (options.count("es-objects-auth")) { + my->_es_objects_auth = options["es-objects-auth"].as(); + } + if (options.count("es-objects-bulk-replay")) { + my->_es_objects_bulk_replay = options["es-objects-bulk-replay"].as(); + } + if (options.count("es-objects-bulk-sync")) { + my->_es_objects_bulk_sync = options["es-objects-bulk-sync"].as(); + } + if (options.count("es-objects-proposals")) { + my->_es_objects_proposals = options["es-objects-proposals"].as(); + } + if (options.count("es-objects-accounts")) { + my->_es_objects_accounts = options["es-objects-accounts"].as(); + } + if (options.count("es-objects-assets")) { + my->_es_objects_assets = options["es-objects-assets"].as(); + } + if (options.count("es-objects-balances")) { + my->_es_objects_balances = options["es-objects-balances"].as(); + } + if (options.count("es-objects-limit-orders")) { + my->_es_objects_limit_orders = options["es-objects-limit-orders"].as(); + } + if (options.count("es-objects-asset-bitasset")) { + my->_es_objects_asset_bitasset = options["es-objects-asset-bitasset"].as(); + } + if (options.count("es-objects-index-prefix")) { + my->_es_objects_index_prefix = options["es-objects-index-prefix"].as(); + } + if (options.count("es-objects-keep-only-current")) { + my->_es_objects_keep_only_current = options["es-objects-keep-only-current"].as(); + } + if (options.count("es-objects-start-es-after-block")) { + my->_es_objects_start_es_after_block = options["es-objects-start-es-after-block"].as(); + } +} + +void es_objects_plugin::plugin_startup() +{ + graphene::utilities::ES es; + es.curl = my->curl; + es.elasticsearch_url = my->_es_objects_elasticsearch_url; + es.auth = my->_es_objects_auth; + es.auth = my->_es_objects_index_prefix; + + if(!graphene::utilities::checkES(es)) + FC_THROW_EXCEPTION(fc::exception, "ES database is not up in url ${url}", ("url", my->_es_objects_elasticsearch_url)); + ilog("elasticsearch OBJECTS: plugin_startup() begin"); +} + +} } \ No newline at end of file diff --git a/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp new file mode 100644 index 0000000000..fa91e3bde4 --- /dev/null +++ b/libraries/plugins/es_objects/include/graphene/es_objects/es_objects.hpp @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include + +namespace graphene { namespace es_objects { + +using namespace chain; + +namespace detail +{ + class es_objects_plugin_impl; +} + +class es_objects_plugin : public graphene::app::plugin +{ + public: + es_objects_plugin(); + virtual ~es_objects_plugin(); + + std::string plugin_name()const override; + std::string plugin_description()const override; + virtual void plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg) override; + virtual void plugin_initialize(const boost::program_options::variables_map& options) override; + virtual void plugin_startup() override; + + friend class detail::es_objects_plugin_impl; + std::unique_ptr my; +}; + +struct adaptor_struct { + fc::mutable_variant_object adapt(const variant_object &obj) { + fc::mutable_variant_object o(obj); + vector keys_to_rename; + for (auto i = o.begin(); i != o.end(); ++i) { + auto &element = (*i).value(); + if (element.is_object()) { + const string &name = (*i).key(); + auto &vo = element.get_object(); + if (vo.contains(name.c_str())) + keys_to_rename.emplace_back(name); + element = adapt(vo); + } else if (element.is_array()) + adapt(element.get_array()); + } + for (const auto &i : keys_to_rename) { + string new_name = i + "_"; + o[new_name] = variant(o[i]); + o.erase(i); + } + if (o.find("owner") != o.end() && o["owner"].is_string()) + { + o["owner_"] = o["owner"].as_string(); + o.erase("owner"); + } + if (o.find("active_special_authority") != o.end()) + { + o["active_special_authority"] = fc::json::to_string(o["active_special_authority"]); + } + if (o.find("owner_special_authority") != o.end()) + { + o["owner_special_authority"] = fc::json::to_string(o["owner_special_authority"]); + } + if (o.find("feeds") != o.end()) + { + o["feeds"] = fc::json::to_string(o["feeds"]); + } + if (o.find("operations") != o.end()) + { + o["operations"] = fc::json::to_string(o["operations"]); + } + + return o; + } + + void adapt(fc::variants &v) { + for (auto &array_element : v) { + if (array_element.is_object()) + array_element = adapt(array_element.get_object()); + else if (array_element.is_array()) + adapt(array_element.get_array()); + else + array_element = array_element.as_string(); + } + } +}; + +} } //graphene::es_objects diff --git a/libraries/plugins/grouped_orders/CMakeLists.txt b/libraries/plugins/grouped_orders/CMakeLists.txt new file mode 100644 index 0000000000..4ec9f64d27 --- /dev/null +++ b/libraries/plugins/grouped_orders/CMakeLists.txt @@ -0,0 +1,23 @@ +file(GLOB HEADERS "include/graphene/grouped_orders/*.hpp") + +add_library( graphene_grouped_orders + grouped_orders_plugin.cpp + ) + +target_link_libraries( graphene_grouped_orders graphene_chain graphene_app ) +target_include_directories( graphene_grouped_orders + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +if(MSVC) + set_source_files_properties( grouped_orders_plugin.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) + +install( TARGETS + graphene_grouped_orders + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/grouped_orders" ) + diff --git a/libraries/plugins/grouped_orders/grouped_orders_plugin.cpp b/libraries/plugins/grouped_orders/grouped_orders_plugin.cpp new file mode 100644 index 0000000000..ef1ae04cae --- /dev/null +++ b/libraries/plugins/grouped_orders/grouped_orders_plugin.cpp @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2018 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +namespace graphene { namespace grouped_orders { + +namespace detail +{ + +class grouped_orders_plugin_impl +{ + public: + grouped_orders_plugin_impl(grouped_orders_plugin& _plugin) + :_self( _plugin ) {} + virtual ~grouped_orders_plugin_impl(); + + graphene::chain::database& database() + { + return _self.database(); + } + + grouped_orders_plugin& _self; + flat_set _tracked_groups; +}; + +/** + * @brief This secondary index is used to track changes on limit order objects. + */ +class limit_order_group_index : public secondary_index +{ + public: + limit_order_group_index( const flat_set& groups ) : _tracked_groups( groups ) {}; + + virtual void object_inserted( const object& obj ) override; + virtual void object_removed( const object& obj ) override; + virtual void about_to_modify( const object& before ) override; + virtual void object_modified( const object& after ) override; + + const flat_set& get_tracked_groups() const + { return _tracked_groups; } + + const map< limit_order_group_key, limit_order_group_data >& get_order_groups() const + { return _og_data; } + + private: + void remove_order( const limit_order_object& obj, bool remove_empty = true ); + + /** tracked groups */ + flat_set _tracked_groups; + + /** maps the group key to group data */ + map< limit_order_group_key, limit_order_group_data > _og_data; +}; + +void limit_order_group_index::object_inserted( const object& objct ) +{ try { + const limit_order_object& o = static_cast( objct ); + + auto& idx = _og_data; + + for( uint16_t group : get_tracked_groups() ) + { + auto create_ogo = [&]() { + idx[ limit_order_group_key( group, o.sell_price ) ] = limit_order_group_data( o.sell_price, o.for_sale ); + }; + // if idx is empty, insert this order + // Note: not capped + if( idx.empty() ) + { + create_ogo(); + continue; + } + + // cap the price + price capped_price = o.sell_price; + price max = o.sell_price.max(); + price min = o.sell_price.min(); + bool capped_max = false; + bool capped_min = false; + if( o.sell_price > max ) + { + capped_price = max; + capped_max = true; + } + else if( o.sell_price < min ) + { + capped_price = min; + capped_min = true; + } + // if idx is not empty, find the group that is next to this order + auto itr = idx.lower_bound( limit_order_group_key( group, capped_price ) ); + bool check_previous = false; + if( itr == idx.end() || itr->first.group != group + || itr->first.min_price.base.asset_id != o.sell_price.base.asset_id + || itr->first.min_price.quote.asset_id != o.sell_price.quote.asset_id ) + // not same market or group type + check_previous = true; + else // same market and group type + { + bool update_max = false; + if( capped_price > itr->second.max_price ) // implies itr->min_price <= itr->max_price < max + { + update_max = true; + price max_price = itr->first.min_price * ratio_type( GRAPHENE_100_PERCENT + group, GRAPHENE_100_PERCENT ); + // max_price should have been capped here + if( capped_price > max_price ) // new order is out of range + check_previous = true; + } + if( !check_previous ) // new order is within the range + { + if( capped_min && o.sell_price < itr->first.min_price ) + { // need to update itr->min_price here, if itr is below min, and new order is even lower + // TODO improve performance + limit_order_group_data data( itr->second.max_price, o.for_sale + itr->second.total_for_sale ); + idx.erase( itr ); + idx[ limit_order_group_key( group, o.sell_price ) ] = data; + } + else + { + if( update_max || ( capped_max && o.sell_price > itr->second.max_price ) ) + itr->second.max_price = o.sell_price; // store real price here, not capped + itr->second.total_for_sale += o.for_sale; + } + } + } + + if( check_previous ) + { + if( itr == idx.begin() ) // no previous + create_ogo(); + else + { + --itr; // should be valid + if( itr->first.group != group || itr->first.min_price.base.asset_id != o.sell_price.base.asset_id + || itr->first.min_price.quote.asset_id != o.sell_price.quote.asset_id ) + // not same market or group type + create_ogo(); + else // same market and group type + { + // due to lower_bound, always true: capped_price < itr->first.min_price, so no need to check again, + // if new order is in range of itr group, always need to update itr->first.min_price, unless + // o.sell_price is higher than max + price min_price = itr->second.max_price / ratio_type( GRAPHENE_100_PERCENT + group, GRAPHENE_100_PERCENT ); + // min_price should have been capped here + if( capped_price < min_price ) // new order is out of range + create_ogo(); + else if( capped_max && o.sell_price >= itr->first.min_price ) + { // itr is above max, and price of new order is even higher + if( o.sell_price > itr->second.max_price ) + itr->second.max_price = o.sell_price; + itr->second.total_for_sale += o.for_sale; + } + else + { // new order is within the range + // TODO improve performance + limit_order_group_data data( itr->second.max_price, o.for_sale + itr->second.total_for_sale ); + idx.erase( itr ); + idx[ limit_order_group_key( group, o.sell_price ) ] = data; + } + } + } + } + } +} FC_CAPTURE_AND_RETHROW( (objct) ); } + +void limit_order_group_index::object_removed( const object& objct ) +{ try { + const limit_order_object& o = static_cast( objct ); + remove_order( o ); +} FC_CAPTURE_AND_RETHROW( (objct) ); } + +void limit_order_group_index::about_to_modify( const object& objct ) +{ try { + const limit_order_object& o = static_cast( objct ); + remove_order( o, false ); +} FC_CAPTURE_AND_RETHROW( (objct) ); } + +void limit_order_group_index::object_modified( const object& objct ) +{ try { + object_inserted( objct ); +} FC_CAPTURE_AND_RETHROW( (objct) ); } + +void limit_order_group_index::remove_order( const limit_order_object& o, bool remove_empty ) +{ + auto& idx = _og_data; + + for( uint16_t group : get_tracked_groups() ) + { + // find the group that should contain this order + auto itr = idx.lower_bound( limit_order_group_key( group, o.sell_price ) ); + if( itr == idx.end() || itr->first.group != group + || itr->first.min_price.base.asset_id != o.sell_price.base.asset_id + || itr->first.min_price.quote.asset_id != o.sell_price.quote.asset_id + || itr->second.max_price < o.sell_price ) + { + // can not find corresponding group, should not happen + wlog( "can not find the order group containing order for removing (price dismatch): ${o}", ("o",o) ); + continue; + } + else // found + { + if( itr->second.total_for_sale < o.for_sale ) + // should not happen + wlog( "can not find the order group containing order for removing (amount dismatch): ${o}", ("o",o) ); + else if( !remove_empty || itr->second.total_for_sale > o.for_sale ) + itr->second.total_for_sale -= o.for_sale; + else + // it's the only order in the group and need to be removed + idx.erase( itr ); + } + } +} + +grouped_orders_plugin_impl::~grouped_orders_plugin_impl() +{} + +} // end namespace detail + + +grouped_orders_plugin::grouped_orders_plugin() : + my( new detail::grouped_orders_plugin_impl(*this) ) +{ +} + +grouped_orders_plugin::~grouped_orders_plugin() +{ +} + +std::string grouped_orders_plugin::plugin_name()const +{ + return "grouped_orders"; +} + +void grouped_orders_plugin::plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg + ) +{ + cli.add_options() + ("tracked-groups", boost::program_options::value()->default_value("[10,100]"), // 0.1% and 1% + "Group orders by percentage increase on price. Specify a JSON array of numbers here, each number is a group, number 1 means 0.01%. ") + ; + cfg.add(cli); +} + +void grouped_orders_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ try { + + if( options.count( "tracked-groups" ) ) + { + const std::string& groups = options["tracked-groups"].as(); + my->_tracked_groups = fc::json::from_string(groups).as>( 2 ); + my->_tracked_groups.erase( 0 ); + } + else + my->_tracked_groups = fc::json::from_string("[10,100]").as>(2); + + database().add_secondary_index< primary_index, detail::limit_order_group_index >( my->_tracked_groups ); + +} FC_CAPTURE_AND_RETHROW() } + +void grouped_orders_plugin::plugin_startup() +{ +} + +const flat_set& grouped_orders_plugin::tracked_groups() const +{ + return my->_tracked_groups; +} + +const map< limit_order_group_key, limit_order_group_data >& grouped_orders_plugin::limit_order_groups() +{ + const auto& idx = database().get_index_type< limit_order_index >(); + const auto& pidx = dynamic_cast&>(idx); + const auto& logidx = pidx.get_secondary_index< detail::limit_order_group_index >(); + return logidx.get_order_groups(); +} + +} } diff --git a/libraries/plugins/grouped_orders/include/graphene/grouped_orders/grouped_orders_plugin.hpp b/libraries/plugins/grouped_orders/include/graphene/grouped_orders/grouped_orders_plugin.hpp new file mode 100644 index 0000000000..8f91ccbd9c --- /dev/null +++ b/libraries/plugins/grouped_orders/include/graphene/grouped_orders/grouped_orders_plugin.hpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include + +namespace graphene { namespace grouped_orders { +using namespace chain; + +struct limit_order_group_key +{ + limit_order_group_key( const uint16_t g, const price& p ) : group(g), min_price(p) {} + limit_order_group_key() {} + + uint16_t group = 0; ///< percentage, 1 means 1 / 10000 + price min_price; + + friend bool operator < ( const limit_order_group_key& a, const limit_order_group_key& b ) + { + // price is ordered descendingly, same as limit_order_index + return std::tie( a.group, b.min_price ) < std::tie( b.group, a.min_price ); + } + friend bool operator == ( const limit_order_group_key& a, const limit_order_group_key& b ) + { + return std::tie( a.group, a.min_price ) == std::tie( b.group, b.min_price ); + } +}; + +struct limit_order_group_data +{ + limit_order_group_data( const price& p, const share_type s ) : max_price(p), total_for_sale(s) {} + limit_order_group_data() {} + + price max_price; + share_type total_for_sale; ///< asset id is min_price.base.asset_id +}; + +namespace detail +{ + class grouped_orders_plugin_impl; +} + +/** + * The grouped orders plugin can be configured to track any number of price diff percentages via its configuration. + * Every time when there is a change on an order in object database, it will update internal state to reflect the change. + */ +class grouped_orders_plugin : public graphene::app::plugin +{ + public: + grouped_orders_plugin(); + virtual ~grouped_orders_plugin(); + + std::string plugin_name()const override; + virtual void plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg) override; + virtual void plugin_initialize( + const boost::program_options::variables_map& options) override; + virtual void plugin_startup() override; + + const flat_set& tracked_groups()const; + + const map< limit_order_group_key, limit_order_group_data >& limit_order_groups(); + + private: + friend class detail::grouped_orders_plugin_impl; + std::unique_ptr my; +}; + +} } //graphene::grouped_orders + +FC_REFLECT( graphene::grouped_orders::limit_order_group_key, (group)(min_price) ) +FC_REFLECT( graphene::grouped_orders::limit_order_group_data, (max_price)(total_for_sale) ) diff --git a/libraries/plugins/make_new_plugin.sh b/libraries/plugins/make_new_plugin.sh new file mode 100755 index 0000000000..2f178db806 --- /dev/null +++ b/libraries/plugins/make_new_plugin.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Find next available space id by checking all current plugins +next_space_id () { + SPACE_IDS=() + for i in * ; do + if [ -d "$i" ] && [ "$i" != "CMakeFiles" ]; then + cd "$i/include/graphene/$i" + result=$(grep -rnw '.' -e '#define[[:space:]]*[[:alnum:]]*_SPACE_ID') + B=$(echo $result | cut -d ' ' -f 3) + if [[ $B =~ [[:digit:]] ]]; then + SPACE_IDS+=($B) + fi + cd "../../../.." + fi + done + max=$( printf "%d\n" "${SPACE_IDS[@]}" | sort -n | tail -1 ) + next=$(($max + 1)) + return $next +} + +## create new plugin +if [ $# -ne 1 ]; then + echo "Usage: $0 my_new_plugin" + echo "... where my_new_plugin is the name of the plugin you want to create" + exit 1 +fi + +pluginName="$1" + +echo "Copying template..." +cp -r template_plugin "$pluginName" + +echo "Renaming files/directories..." +mv "$pluginName/include/graphene/template_plugin" "$pluginName/include/graphene/$pluginName" +for file in `find "$pluginName" -type f -name '*template_plugin*'`; do mv "$file" `sed s/template_plugin/"$pluginName"/g <<< $file`; done; +echo "Renaming in files..." +find "$pluginName" -type f -exec sed -i "s/template_plugin/$pluginName/g" {} \; +echo "Assigning next available SPACE_ID..." +next_space_id +find "$pluginName" -type f -exec sed -i "s/@SPACE_ID@/$?/g" {} \; + +echo "Done! $pluginName is ready." +echo "Next steps:" +echo "1- Add 'add_subdirectory( $pluginName )' to CmakeLists.txt in this directory." +echo "2- Add 'graphene_$pluginName' to ../../programs/witness_node/CMakeLists.txt with the other plugins." +echo "3- Include plugin header file '#include ' to ../../programs/witness_node/main.cpp." +echo "4- Initialize plugin with the others with 'auto ${pluginName}_plug = node->register_plugin<$pluginName::$pluginName>();' in ../../programs/witness_node/main.cpp" +echo "5- cmake and make" +echo "6- Start plugin with './../programs/witness_node/witness_node --plugins \"$pluginName\"'. After the seed nodes are added you start to see see a msgs from the plugin 'onBlock' " diff --git a/libraries/plugins/market_history/CMakeLists.txt b/libraries/plugins/market_history/CMakeLists.txt index 21b8211f09..47410d7488 100644 --- a/libraries/plugins/market_history/CMakeLists.txt +++ b/libraries/plugins/market_history/CMakeLists.txt @@ -19,3 +19,5 @@ install( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/market_history" ) + diff --git a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp index b4d1254ca0..c1a40637e6 100644 --- a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp +++ b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp @@ -27,6 +27,9 @@ #include #include +#include + +#include namespace graphene { namespace market_history { using namespace chain; @@ -41,10 +44,18 @@ using namespace chain; // various template automagic depends on them being known at compile // time. // -#ifndef ACCOUNT_HISTORY_SPACE_ID -#define ACCOUNT_HISTORY_SPACE_ID 5 +#ifndef MARKET_HISTORY_SPACE_ID +#define MARKET_HISTORY_SPACE_ID 5 #endif +enum market_history_object_type +{ + order_history_object_type = 0, + bucket_object_type = 1, + market_ticker_object_type = 2, + market_ticker_meta_object_type = 3 +}; + struct bucket_key { bucket_key( asset_id_type a, asset_id_type b, uint32_t s, fc::time_point_sec o ) @@ -68,8 +79,8 @@ struct bucket_key struct bucket_object : public abstract_object { - static const uint8_t space_id = ACCOUNT_HISTORY_SPACE_ID; - static const uint8_t type_id = 1; // market_history_plugin type, referenced from account_history_plugin.hpp + static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static const uint8_t type_id = bucket_object_type; price high()const { return asset( high_base, key.base ) / asset( high_quote, key.quote ); } price low()const { return asset( low_base, key.base ) / asset( low_quote, key.quote ); } @@ -101,31 +112,108 @@ struct history_key { }; struct order_history_object : public abstract_object { - history_key key; - fc::time_point_sec time; - fill_order_operation op; + static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static const uint8_t type_id = order_history_object_type; + + history_key key; + fc::time_point_sec time; + fill_order_operation op; +}; +struct order_history_object_key_base_extractor +{ + typedef asset_id_type result_type; + result_type operator()(const order_history_object& o)const { return o.key.base; } +}; +struct order_history_object_key_quote_extractor +{ + typedef asset_id_type result_type; + result_type operator()(const order_history_object& o)const { return o.key.quote; } +}; +struct order_history_object_key_sequence_extractor +{ + typedef int64_t result_type; + result_type operator()(const order_history_object& o)const { return o.key.sequence; } +}; + +struct market_ticker_object : public abstract_object +{ + static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static const uint8_t type_id = market_ticker_object_type; + + asset_id_type base; + asset_id_type quote; + share_type last_day_base; + share_type last_day_quote; + share_type latest_base; + share_type latest_quote; + fc::uint128 base_volume; + fc::uint128 quote_volume; +}; + +struct market_ticker_meta_object : public abstract_object +{ + static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static const uint8_t type_id = market_ticker_meta_object_type; + + object_id_type rolling_min_order_his_id; + bool skip_min_order_his_id = false; }; struct by_key; typedef multi_index_container< bucket_object, indexed_by< - hashed_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, member< object, object_id_type, &object::id > >, ordered_unique< tag, member< bucket_object, bucket_key, &bucket_object::key > > > > bucket_object_multi_index_type; +struct by_market_time; typedef multi_index_container< order_history_object, indexed_by< - hashed_unique< tag, member< object, object_id_type, &object::id > >, - ordered_unique< tag, member< order_history_object, history_key, &order_history_object::key > > + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, member< order_history_object, history_key, &order_history_object::key > >, + ordered_unique< + tag, + composite_key< + order_history_object, + order_history_object_key_base_extractor, + order_history_object_key_quote_extractor, + member, + order_history_object_key_sequence_extractor + >, + composite_key_compare< + std::less< asset_id_type >, + std::less< asset_id_type >, + std::greater< time_point_sec >, + std::less< int64_t > + > + > > > order_history_multi_index_type; +struct by_market; +struct by_volume; +typedef multi_index_container< + market_ticker_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_non_unique< tag, member< market_ticker_object, fc::uint128, &market_ticker_object::base_volume > >, + ordered_unique< + tag, + composite_key< + market_ticker_object, + member, + member + > + > + > +> market_ticker_object_multi_index_type; typedef generic_index bucket_index; typedef generic_index history_index; +typedef generic_index market_ticker_index; namespace detail @@ -154,6 +242,8 @@ class market_history_plugin : public graphene::app::plugin uint32_t max_history()const; const flat_set& tracked_buckets()const; + uint32_t max_order_his_records_per_market()const; + uint32_t max_order_his_seconds_per_market()const; private: friend class detail::market_history_plugin_impl; @@ -165,11 +255,17 @@ class market_history_plugin : public graphene::app::plugin FC_REFLECT( graphene::market_history::history_key, (base)(quote)(sequence) ) FC_REFLECT_DERIVED( graphene::market_history::order_history_object, (graphene::db::object), (key)(time)(op) ) FC_REFLECT( graphene::market_history::bucket_key, (base)(quote)(seconds)(open) ) -FC_REFLECT_DERIVED( graphene::market_history::bucket_object, (graphene::db::object), +FC_REFLECT_DERIVED( graphene::market_history::bucket_object, (graphene::db::object), (key) (high_base)(high_quote) (low_base)(low_quote) (open_base)(open_quote) (close_base)(close_quote) (base_volume)(quote_volume) ) - +FC_REFLECT_DERIVED( graphene::market_history::market_ticker_object, (graphene::db::object), + (base)(quote) + (last_day_base)(last_day_quote) + (latest_base)(latest_quote) + (base_volume)(quote_volume) ) +FC_REFLECT_DERIVED( graphene::market_history::market_ticker_meta_object, (graphene::db::object), + (rolling_min_order_his_id)(skip_min_order_his_id) ) diff --git a/libraries/plugins/market_history/market_history_plugin.cpp b/libraries/plugins/market_history/market_history_plugin.cpp index 28cbb7c794..f6948dc59e 100644 --- a/libraries/plugins/market_history/market_history_plugin.cpp +++ b/libraries/plugins/market_history/market_history_plugin.cpp @@ -34,7 +34,6 @@ #include #include -#include namespace graphene { namespace market_history { @@ -61,16 +60,19 @@ class market_history_plugin_impl market_history_plugin& _self; flat_set _tracked_buckets; uint32_t _maximum_history_per_bucket_size = 1000; + uint32_t _max_order_his_records_per_market = 1000; + uint32_t _max_order_his_seconds_per_market = 259200; }; struct operation_process_fill_order { - market_history_plugin& _plugin; - fc::time_point_sec _now; + market_history_plugin& _plugin; + fc::time_point_sec _now; + const market_ticker_meta_object*& _meta; - operation_process_fill_order( market_history_plugin& mhp, fc::time_point_sec n ) - :_plugin(mhp),_now(n) {} + operation_process_fill_order( market_history_plugin& mhp, fc::time_point_sec n, const market_ticker_meta_object*& meta ) + :_plugin(mhp),_now(n),_meta(meta) {} typedef void result_type; @@ -81,13 +83,12 @@ struct operation_process_fill_order void operator()( const fill_order_operation& o )const { //ilog( "processing ${o}", ("o",o) ); - const auto& buckets = _plugin.tracked_buckets(); auto& db = _plugin.database(); - const auto& bucket_idx = db.get_index_type(); - const auto& history_idx = db.get_index_type().indices().get(); - - auto time = db.head_block_time(); + const auto& order_his_idx = db.get_index_type().indices(); + const auto& history_idx = order_his_idx.get(); + const auto& his_time_idx = order_his_idx.get(); + // To save new filled order data history_key hkey; hkey.base = o.pays.asset_id; hkey.quote = o.receives.asset_id; @@ -97,69 +98,141 @@ struct operation_process_fill_order auto itr = history_idx.lower_bound( hkey ); - if( itr->key.base == hkey.base && itr->key.quote == hkey.quote ) + if( itr != history_idx.end() && itr->key.base == hkey.base && itr->key.quote == hkey.quote ) hkey.sequence = itr->key.sequence - 1; else hkey.sequence = 0; - db.create( [&]( order_history_object& ho ) { + const auto& new_order_his_obj = db.create( [&]( order_history_object& ho ) { ho.key = hkey; - ho.time = time; + ho.time = _now; ho.op = o; }); - hkey.sequence += 200; - itr = history_idx.lower_bound( hkey ); + // save a reference to market ticker meta object + if( _meta == nullptr ) + { + const auto& meta_idx = db.get_index_type>(); + if( meta_idx.size() == 0 ) + _meta = &db.create( [&]( market_ticker_meta_object& mtm ) { + mtm.rolling_min_order_his_id = new_order_his_obj.id; + mtm.skip_min_order_his_id = false; + }); + else + _meta = &( *meta_idx.begin() ); + } - while( itr != history_idx.end() ) + // To remove old filled order data + const auto max_records = _plugin.max_order_his_records_per_market(); + hkey.sequence += max_records; + itr = history_idx.lower_bound( hkey ); + if( itr != history_idx.end() && itr->key.base == hkey.base && itr->key.quote == hkey.quote ) { - if( itr->key.base == hkey.base && itr->key.quote == hkey.quote ) + const auto max_seconds = _plugin.max_order_his_seconds_per_market(); + fc::time_point_sec min_time; + if( min_time + max_seconds < _now ) + min_time = _now - max_seconds; + auto time_itr = his_time_idx.lower_bound( std::make_tuple( hkey.base, hkey.quote, min_time ) ); + if( time_itr != his_time_idx.end() && time_itr->key.base == hkey.base && time_itr->key.quote == hkey.quote ) { - db.remove( *itr ); - itr = history_idx.lower_bound( hkey ); + if( itr->key.sequence >= time_itr->key.sequence ) + { + while( itr != history_idx.end() && itr->key.base == hkey.base && itr->key.quote == hkey.quote ) + { + auto old_itr = itr; + ++itr; + db.remove( *old_itr ); + } + } + else + { + while( time_itr != his_time_idx.end() && time_itr->key.base == hkey.base && time_itr->key.quote == hkey.quote ) + { + auto old_itr = time_itr; + ++time_itr; + db.remove( *old_itr ); + } + } } - else break; } + // To update ticker data and buckets data, only update for maker orders + if( !o.is_maker ) + return; - auto max_history = _plugin.max_history(); - for( auto bucket : buckets ) + bucket_key key; + key.base = o.pays.asset_id; + key.quote = o.receives.asset_id; + + price trade_price = o.pays / o.receives; + + if( key.base > key.quote ) { - auto cutoff = (fc::time_point() + fc::seconds( bucket * max_history)); + std::swap( key.base, key.quote ); + trade_price = ~trade_price; + } + + price fill_price = o.fill_price; + if( fill_price.base.asset_id > fill_price.quote.asset_id ) + fill_price = ~fill_price; - bucket_key key; - key.base = o.pays.asset_id; - key.quote = o.receives.asset_id; + // To update ticker data + const auto& ticker_idx = db.get_index_type().indices().get(); + auto ticker_itr = ticker_idx.find( std::make_tuple( key.base, key.quote ) ); + if( ticker_itr == ticker_idx.end() ) + { + db.create( [&]( market_ticker_object& mt ) { + mt.base = key.base; + mt.quote = key.quote; + mt.last_day_base = 0; + mt.last_day_quote = 0; + mt.latest_base = fill_price.base.amount; + mt.latest_quote = fill_price.quote.amount; + mt.base_volume = trade_price.base.amount.value; + mt.quote_volume = trade_price.quote.amount.value; + }); + } + else + { + db.modify( *ticker_itr, [&]( market_ticker_object& mt ) { + mt.latest_base = fill_price.base.amount; + mt.latest_quote = fill_price.quote.amount; + mt.base_volume += trade_price.base.amount.value; // ignore overflow + mt.quote_volume += trade_price.quote.amount.value; // ignore overflow + }); + } + // To update buckets data + const auto max_history = _plugin.max_history(); + if( max_history == 0 ) return; - /** for every matched order there are two fill order operations created, one for - * each side. We can filter the duplicates by only considering the fill operations where - * the base > quote - */ - if( key.base > key.quote ) - { - //ilog( " skipping because base > quote" ); - continue; - } + const auto& buckets = _plugin.tracked_buckets(); + if( buckets.size() == 0 ) return; - price trade_price = o.pays / o.receives; + const auto& bucket_idx = db.get_index_type(); + for( auto bucket : buckets ) + { + auto bucket_num = _now.sec_since_epoch() / bucket; + fc::time_point_sec cutoff; + if( bucket_num > max_history ) + cutoff = cutoff + ( bucket * ( bucket_num - max_history ) ); key.seconds = bucket; - key.open = fc::time_point() + fc::seconds((_now.sec_since_epoch() / key.seconds) * key.seconds); + key.open = fc::time_point_sec() + ( bucket_num * bucket ); const auto& by_key_idx = bucket_idx.indices().get(); - auto itr = by_key_idx.find( key ); - if( itr == by_key_idx.end() ) + auto bucket_itr = by_key_idx.find( key ); + if( bucket_itr == by_key_idx.end() ) { // create new bucket /* const auto& obj = */ db.create( [&]( bucket_object& b ){ b.key = key; - b.quote_volume += trade_price.quote.amount; - b.base_volume += trade_price.base.amount; - b.open_base = trade_price.base.amount; - b.open_quote = trade_price.quote.amount; - b.close_base = trade_price.base.amount; - b.close_quote = trade_price.quote.amount; + b.base_volume = trade_price.base.amount; + b.quote_volume = trade_price.quote.amount; + b.open_base = fill_price.base.amount; + b.open_quote = fill_price.quote.amount; + b.close_base = fill_price.base.amount; + b.close_quote = fill_price.quote.amount; b.high_base = b.close_base; b.high_quote = b.close_quote; b.low_base = b.close_base; @@ -169,41 +242,48 @@ struct operation_process_fill_order } else { // update existing bucket - //wlog( " before updating bucket ${b}", ("b",*itr) ); - db.modify( *itr, [&]( bucket_object& b ){ - b.base_volume += trade_price.base.amount; - b.quote_volume += trade_price.quote.amount; - b.close_base = trade_price.base.amount; - b.close_quote = trade_price.quote.amount; - if( b.high() < trade_price ) + //wlog( " before updating bucket ${b}", ("b",*bucket_itr) ); + db.modify( *bucket_itr, [&]( bucket_object& b ){ + try { + b.base_volume += trade_price.base.amount; + } catch( fc::overflow_exception& ) { + b.base_volume = std::numeric_limits::max(); + } + try { + b.quote_volume += trade_price.quote.amount; + } catch( fc::overflow_exception& ) { + b.quote_volume = std::numeric_limits::max(); + } + b.close_base = fill_price.base.amount; + b.close_quote = fill_price.quote.amount; + if( b.high() < fill_price ) { b.high_base = b.close_base; b.high_quote = b.close_quote; } - if( b.low() > trade_price ) + if( b.low() > fill_price ) { b.low_base = b.close_base; b.low_quote = b.close_quote; } }); - //wlog( " after bucket bucket ${b}", ("b",*itr) ); + //wlog( " after bucket bucket ${b}", ("b",*bucket_itr) ); } - if( max_history != 0 ) { key.open = fc::time_point_sec(); - auto itr = by_key_idx.lower_bound( key ); + bucket_itr = by_key_idx.lower_bound( key ); - while( itr != by_key_idx.end() && - itr->key.base == key.base && - itr->key.quote == key.quote && - itr->key.seconds == bucket && - itr->key.open < cutoff ) + while( bucket_itr != by_key_idx.end() && + bucket_itr->key.base == key.base && + bucket_itr->key.quote == key.quote && + bucket_itr->key.seconds == bucket && + bucket_itr->key.open < cutoff ) { - // elog( " removing old bucket ${b}", ("b", *itr) ); - auto old_itr = itr; - ++itr; - db.remove( *old_itr ); + // elog( " removing old bucket ${b}", ("b", *bucket_itr) ); + auto old_bucket_itr = bucket_itr; + ++bucket_itr; + db.remove( *old_bucket_itr ); } } } @@ -215,15 +295,90 @@ market_history_plugin_impl::~market_history_plugin_impl() void market_history_plugin_impl::update_market_histories( const signed_block& b ) { - if( _maximum_history_per_bucket_size == 0 ) return; - if( _tracked_buckets.size() == 0 ) return; - graphene::chain::database& db = database(); + const market_ticker_meta_object* _meta = nullptr; + const auto& meta_idx = db.get_index_type>(); + if( meta_idx.size() > 0 ) + _meta = &( *meta_idx.begin() ); const vector >& hist = db.get_applied_operations(); for( const optional< operation_history_object >& o_op : hist ) { if( o_op.valid() ) - o_op->op.visit( operation_process_fill_order( _self, b.timestamp ) ); + { + try + { + o_op->op.visit( operation_process_fill_order( _self, b.timestamp, _meta ) ); + } FC_CAPTURE_AND_LOG( (o_op) ) + } + } + // roll out expired data from ticker + if( _meta != nullptr ) + { + time_point_sec last_day = b.timestamp - 86400; + object_id_type last_min_his_id = _meta->rolling_min_order_his_id; + bool skip = _meta->skip_min_order_his_id; + + const auto& ticker_idx = db.get_index_type().indices().get(); + const auto& history_idx = db.get_index_type().indices().get(); + auto history_itr = history_idx.lower_bound( _meta->rolling_min_order_his_id ); + while( history_itr != history_idx.end() && history_itr->time < last_day ) + { + const fill_order_operation& o = history_itr->op; + if( skip && history_itr->id == _meta->rolling_min_order_his_id ) + skip = false; + else if( o.is_maker ) + { + bucket_key key; + key.base = o.pays.asset_id; + key.quote = o.receives.asset_id; + + price trade_price = o.pays / o.receives; + + if( key.base > key.quote ) + { + std::swap( key.base, key.quote ); + trade_price = ~trade_price; + } + + price fill_price = o.fill_price; + if( fill_price.base.asset_id > fill_price.quote.asset_id ) + fill_price = ~fill_price; + + auto ticker_itr = ticker_idx.find( std::make_tuple( key.base, key.quote ) ); + if( ticker_itr != ticker_idx.end() ) // should always be true + { + db.modify( *ticker_itr, [&]( market_ticker_object& mt ) { + mt.last_day_base = fill_price.base.amount; + mt.last_day_quote = fill_price.quote.amount; + mt.base_volume -= trade_price.base.amount.value; // ignore underflow + mt.quote_volume -= trade_price.quote.amount.value; // ignore underflow + }); + } + } + last_min_his_id = history_itr->id; + ++history_itr; + } + // update meta + if( history_itr != history_idx.end() ) // if still has some data rolling + { + if( history_itr->id != _meta->rolling_min_order_his_id ) // if rolled out some + { + db.modify( *_meta, [&]( market_ticker_meta_object& mtm ) { + mtm.rolling_min_order_his_id = history_itr->id; + mtm.skip_min_order_his_id = false; + }); + } + } + else // if all data are rolled out + { + if( last_min_his_id != _meta->rolling_min_order_his_id ) // if rolled out some + { + db.modify( *_meta, [&]( market_ticker_meta_object& mtm ) { + mtm.rolling_min_order_his_id = last_min_his_id; + mtm.skip_min_order_his_id = true; + }); + } + } } } @@ -254,27 +409,38 @@ void market_history_plugin::plugin_set_program_options( ) { cli.add_options() - ("bucket-size", boost::program_options::value()->default_value("[15,60,300,3600,86400]"), + ("bucket-size", boost::program_options::value()->default_value("[60,300,900,1800,3600,14400,86400]"), "Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers") - ("history-per-size", boost::program_options::value()->default_value(1000), + ("history-per-size", boost::program_options::value()->default_value(1000), "How far back in time to track history for each bucket size, measured in the number of buckets (default: 1000)") + ("max-order-his-records-per-market", boost::program_options::value()->default_value(1000), + "Will only store this amount of matched orders for each market in order history for querying, or those meet the other option, which has more data (default: 1000)") + ("max-order-his-seconds-per-market", boost::program_options::value()->default_value(259200), + "Will only store matched orders in last X seconds for each market in order history for querying, or those meet the other option, which has more data (default: 259200 (3 days))") ; cfg.add(cli); } void market_history_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { - database().applied_block.connect( [&]( const signed_block& b){ my->update_market_histories(b); } ); + database().applied_block.connect( [this]( const signed_block& b){ my->update_market_histories(b); } ); database().add_index< primary_index< bucket_index > >(); database().add_index< primary_index< history_index > >(); + database().add_index< primary_index< market_ticker_index > >(); + database().add_index< primary_index< simple_index< market_ticker_meta_object > > >(); if( options.count( "bucket-size" ) ) { - const std::string& buckets = options["bucket-size"].as(); - my->_tracked_buckets = fc::json::from_string(buckets).as>(); + const std::string& buckets = options["bucket-size"].as(); + my->_tracked_buckets = fc::json::from_string(buckets).as>(2); + my->_tracked_buckets.erase( 0 ); } if( options.count( "history-per-size" ) ) my->_maximum_history_per_bucket_size = options["history-per-size"].as(); + if( options.count( "max-order-his-records-per-market" ) ) + my->_max_order_his_records_per_market = options["max-order-his-records-per-market"].as(); + if( options.count( "max-order-his-seconds-per-market" ) ) + my->_max_order_his_seconds_per_market = options["max-order-his-seconds-per-market"].as(); } FC_CAPTURE_AND_RETHROW() } void market_history_plugin::plugin_startup() @@ -291,4 +457,14 @@ uint32_t market_history_plugin::max_history()const return my->_maximum_history_per_bucket_size; } +uint32_t market_history_plugin::max_order_his_records_per_market()const +{ + return my->_max_order_his_records_per_market; +} + +uint32_t market_history_plugin::max_order_his_seconds_per_market()const +{ + return my->_max_order_his_seconds_per_market; +} + } } diff --git a/libraries/plugins/snapshot/CMakeLists.txt b/libraries/plugins/snapshot/CMakeLists.txt new file mode 100644 index 0000000000..227c386047 --- /dev/null +++ b/libraries/plugins/snapshot/CMakeLists.txt @@ -0,0 +1,17 @@ +file(GLOB HEADERS "include/graphene/snapshot/*.hpp") + +add_library( graphene_snapshot + snapshot.cpp + ) + +target_link_libraries( graphene_snapshot graphene_chain graphene_app ) +target_include_directories( graphene_snapshot + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +install( TARGETS + graphene_snapshot + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) diff --git a/libraries/chain/index.cpp b/libraries/plugins/snapshot/include/graphene/snapshot/snapshot.hpp similarity index 50% rename from libraries/chain/index.cpp rename to libraries/plugins/snapshot/include/graphene/snapshot/snapshot.hpp index 41a469b21a..eb8d3a16cb 100644 --- a/libraries/chain/index.cpp +++ b/libraries/plugins/snapshot/include/graphene/snapshot/snapshot.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2017 Peter Conrad, and contributors. * * The MIT License * @@ -21,23 +21,37 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include -#include +#pragma once + +#include #include -namespace graphene { namespace chain { - void base_primary_index::save_undo( const object& obj ) - { _db.save_undo( obj ); } +#include + +namespace graphene { namespace snapshot_plugin { + +class snapshot_plugin : public graphene::app::plugin { + public: + ~snapshot_plugin() {} + + std::string plugin_name()const override; + std::string plugin_description()const override; + + virtual void plugin_set_program_options( + boost::program_options::options_description &command_line_options, + boost::program_options::options_description &config_file_options + ) override; + + virtual void plugin_initialize( const boost::program_options::variables_map& options ) override; + virtual void plugin_startup() override; + virtual void plugin_shutdown() override; - void base_primary_index::on_add( const object& obj ) - { - _db.save_undo_add( obj ); - for( auto ob : _observers ) ob->on_add( obj ); - } + private: + void check_snapshot( const graphene::chain::signed_block& b); - void base_primary_index::on_remove( const object& obj ) - { _db.save_undo_remove( obj ); for( auto ob : _observers ) ob->on_remove( obj ); } + uint32_t snapshot_block = -1, last_block = 0; + fc::time_point_sec snapshot_time = fc::time_point_sec::maximum(), last_time = fc::time_point_sec(1); + fc::path dest; +}; - void base_primary_index::on_modify( const object& obj ) - {for( auto ob : _observers ) ob->on_modify( obj ); } -} } // graphene::chain +} } //graphene::snapshot_plugin diff --git a/libraries/plugins/snapshot/snapshot.cpp b/libraries/plugins/snapshot/snapshot.cpp new file mode 100644 index 0000000000..f74ad5894a --- /dev/null +++ b/libraries/plugins/snapshot/snapshot.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2017 Peter Conrad, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include + +#include + +#include + +using namespace graphene::snapshot_plugin; +using std::string; +using std::vector; + +namespace bpo = boost::program_options; + +static const char* OPT_BLOCK_NUM = "snapshot-at-block"; +static const char* OPT_BLOCK_TIME = "snapshot-at-time"; +static const char* OPT_DEST = "snapshot-to"; + +void snapshot_plugin::plugin_set_program_options( + boost::program_options::options_description& command_line_options, + boost::program_options::options_description& config_file_options) +{ + command_line_options.add_options() + (OPT_BLOCK_NUM, bpo::value(), "Block number after which to do a snapshot") + (OPT_BLOCK_TIME, bpo::value(), "Block time (ISO format) after which to do a snapshot") + (OPT_DEST, bpo::value(), "Pathname of JSON file where to store the snapshot") + ; + config_file_options.add(command_line_options); +} + +std::string snapshot_plugin::plugin_name()const +{ + return "snapshot"; +} + +std::string snapshot_plugin::plugin_description()const +{ + return "Create snapshots at a specified time or block number."; +} + +void snapshot_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ try { + ilog("snapshot plugin: plugin_initialize() begin"); + + if( options.count(OPT_BLOCK_NUM) || options.count(OPT_BLOCK_TIME) ) + { + FC_ASSERT( options.count(OPT_DEST), "Must specify snapshot-to in addition to snapshot-at-block or snapshot-at-time!" ); + dest = options[OPT_DEST].as(); + if( options.count(OPT_BLOCK_NUM) ) + snapshot_block = options[OPT_BLOCK_NUM].as(); + if( options.count(OPT_BLOCK_TIME) ) + snapshot_time = fc::time_point_sec::from_iso_string( options[OPT_BLOCK_TIME].as() ); + database().applied_block.connect( [&]( const graphene::chain::signed_block& b ) { + check_snapshot( b ); + }); + } + else + FC_ASSERT( !options.count("snapshot-to"), "Must specify snapshot-at-block or snapshot-at-time in addition to snapshot-to!" ); + ilog("snapshot plugin: plugin_initialize() end"); +} FC_LOG_AND_RETHROW() } + +void snapshot_plugin::plugin_startup() {} + +void snapshot_plugin::plugin_shutdown() {} + +static void create_snapshot( const graphene::chain::database& db, const fc::path& dest ) +{ + ilog("snapshot plugin: creating snapshot"); + fc::ofstream out; + try + { + out.open( dest ); + } + catch ( fc::exception& e ) + { + wlog( "Failed to open snapshot destination: ${ex}", ("ex",e) ); + return; + } + for( uint32_t space_id = 0; space_id < 256; space_id++ ) + for( uint32_t type_id = 0; type_id < 256; type_id++ ) + { + try + { + db.get_index( (uint8_t)space_id, (uint8_t)type_id ); + } + catch (fc::assert_exception& e) + { + continue; + } + auto& index = db.get_index( (uint8_t)space_id, (uint8_t)type_id ); + index.inspect_all_objects( [&out]( const graphene::db::object& o ) { + out << fc::json::to_string( o.to_variant() ) << '\n'; + }); + } + out.close(); + ilog("snapshot plugin: created snapshot"); +} + +void snapshot_plugin::check_snapshot( const graphene::chain::signed_block& b ) +{ try { + uint32_t current_block = b.block_num(); + if( (last_block < snapshot_block && snapshot_block <= current_block) + || (last_time < snapshot_time && snapshot_time <= b.timestamp) ) + create_snapshot( database(), dest ); + last_block = current_block; + last_time = b.timestamp; +} FC_LOG_AND_RETHROW() } diff --git a/libraries/plugins/template_plugin/CMakeLists.txt b/libraries/plugins/template_plugin/CMakeLists.txt new file mode 100644 index 0000000000..a8a14584fc --- /dev/null +++ b/libraries/plugins/template_plugin/CMakeLists.txt @@ -0,0 +1,22 @@ +file(GLOB HEADERS "include/graphene/template_plugin/*.hpp") + +add_library( graphene_template_plugin + template_plugin.cpp + ) + +target_link_libraries( graphene_template_plugin graphene_chain graphene_app ) +target_include_directories( graphene_template_plugin + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +if(MSVC) + set_source_files_properties(template_plugin.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) + +install( TARGETS + graphene_template_plugin + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/template_plugin" ) diff --git a/libraries/plugins/template_plugin/include/graphene/template_plugin/template_plugin.hpp b/libraries/plugins/template_plugin/include/graphene/template_plugin/template_plugin.hpp new file mode 100644 index 0000000000..168dac42bf --- /dev/null +++ b/libraries/plugins/template_plugin/include/graphene/template_plugin/template_plugin.hpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 template_plugin and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include + +namespace graphene { namespace template_plugin { +using namespace chain; + +// +// Plugins should #define their SPACE_ID's so plugins with +// conflicting SPACE_ID assignments can be compiled into the +// same binary (by simply re-assigning some of the conflicting #defined +// SPACE_ID's in a build script). +// +// Assignment of SPACE_ID's cannot be done at run-time because +// various template automagic depends on them being known at compile +// time. +// +#ifndef template_plugin_SPACE_ID +#define template_plugin_SPACE_ID @SPACE_ID@ +#endif + + +namespace detail +{ + class template_plugin_impl; +} + +class template_plugin : public graphene::app::plugin +{ + public: + template_plugin(); + virtual ~template_plugin(); + + std::string plugin_name()const override; + std::string plugin_description()const override; + virtual void plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg) override; + virtual void plugin_initialize(const boost::program_options::variables_map& options) override; + virtual void plugin_startup() override; + + friend class detail::template_plugin_impl; + std::unique_ptr my; +}; + +} } //graphene::template diff --git a/libraries/plugins/template_plugin/template_plugin.cpp b/libraries/plugins/template_plugin/template_plugin.cpp new file mode 100644 index 0000000000..2296f25cef --- /dev/null +++ b/libraries/plugins/template_plugin/template_plugin.cpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2018 template_plugin and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +namespace graphene { namespace template_plugin { + +namespace detail +{ + +class template_plugin_impl +{ + public: + template_plugin_impl(template_plugin& _plugin) + : _self( _plugin ) + { } + virtual ~template_plugin_impl(); + + void onBlock( const signed_block& b ); + + graphene::chain::database& database() + { + return _self.database(); + } + + template_plugin& _self; + + std::string _plugin_option = ""; + + private: + +}; + +void template_plugin_impl::onBlock( const signed_block& b ) +{ + wdump((b.block_num())); +} + +template_plugin_impl::~template_plugin_impl() +{ + return; +} + +} // end namespace detail + +template_plugin::template_plugin() : + my( new detail::template_plugin_impl(*this) ) +{ +} + +template_plugin::~template_plugin() +{ +} + +std::string template_plugin::plugin_name()const +{ + return "template_plugin"; +} +std::string template_plugin::plugin_description()const +{ + return "template_plugin description"; +} + +void template_plugin::plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg + ) +{ + cli.add_options() + ("template_plugin_option", boost::program_options::value(), "template_plugin option") + ; + cfg.add(cli); +} + +void template_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + database().applied_block.connect( [&]( const signed_block& b) { + my->onBlock(b); + } ); + + if (options.count("template_plugin")) { + my->_plugin_option = options["template_plugin"].as(); + } +} + +void template_plugin::plugin_startup() +{ + ilog("template_plugin: plugin_startup() begin"); +} + +} } diff --git a/libraries/plugins/witness/CMakeLists.txt b/libraries/plugins/witness/CMakeLists.txt index c82442fff2..95759bbf24 100644 --- a/libraries/plugins/witness/CMakeLists.txt +++ b/libraries/plugins/witness/CMakeLists.txt @@ -4,7 +4,7 @@ add_library( graphene_witness witness.cpp ) -target_link_libraries( graphene_witness graphene_chain graphene_app graphene_time ) +target_link_libraries( graphene_witness graphene_chain graphene_app ) target_include_directories( graphene_witness PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/libraries/plugins/witness/include/graphene/witness/witness.hpp b/libraries/plugins/witness/include/graphene/witness/witness.hpp index e2f60bf8de..0d2eab27ac 100644 --- a/libraries/plugins/witness/include/graphene/witness/witness.hpp +++ b/libraries/plugins/witness/include/graphene/witness/witness.hpp @@ -41,23 +41,14 @@ namespace block_production_condition no_private_key = 4, low_participation = 5, lag = 6, - consecutive = 7, - exception_producing_block = 8 + exception_producing_block = 7, + shutdown = 8 }; } class witness_plugin : public graphene::app::plugin { public: - ~witness_plugin() { - try { - if( _block_production_task.valid() ) - _block_production_task.cancel_and_wait(__FUNCTION__); - } catch(fc::canceled_exception&) { - //Expected exception. Move along. - } catch(fc::exception& e) { - edump((e.to_detail_string())); - } - } + ~witness_plugin() { stop_block_production(); } std::string plugin_name()const override; @@ -67,25 +58,36 @@ class witness_plugin : public graphene::app::plugin { ) override; void set_block_production(bool allow) { _production_enabled = allow; } + void stop_block_production(); virtual void plugin_initialize( const boost::program_options::variables_map& options ) override; virtual void plugin_startup() override; virtual void plugin_shutdown() override; + inline const fc::flat_map< chain::witness_id_type, fc::optional >& get_witness_key_cache() + { return _witness_key_cache; } + private: void schedule_production_loop(); block_production_condition::block_production_condition_enum block_production_loop(); - block_production_condition::block_production_condition_enum maybe_produce_block( fc::mutable_variant_object& capture ); + block_production_condition::block_production_condition_enum maybe_produce_block( fc::limited_mutable_variant_object& capture ); + + /// Fetch signing keys of all witnesses in the cache from object database and update the cache accordingly + void refresh_witness_key_cache(); boost::program_options::variables_map _options; bool _production_enabled = false; - bool _consecutive_production_enabled = false; + bool _shutting_down = false; uint32_t _required_witness_participation = 33 * GRAPHENE_1_PERCENT; uint32_t _production_skip_flags = graphene::chain::database::skip_nothing; - std::map _private_keys; + std::map _private_keys; std::set _witnesses; fc::future _block_production_task; + + /// For tracking signing keys of specified witnesses, only update when applied a block + fc::flat_map< chain::witness_id_type, fc::optional > _witness_key_cache; + }; } } //graphene::witness_plugin diff --git a/libraries/plugins/witness/witness.cpp b/libraries/plugins/witness/witness.cpp index 250438bb4f..af2101e9ea 100644 --- a/libraries/plugins/witness/witness.cpp +++ b/libraries/plugins/witness/witness.cpp @@ -25,11 +25,9 @@ #include #include -#include #include -#include #include #include @@ -42,7 +40,7 @@ namespace bpo = boost::program_options; void new_chain_banner( const graphene::chain::database& db ) { - std::cerr << "\n" + ilog("\n" "********************************\n" "* *\n" "* ------- NEW CHAIN ------ *\n" @@ -50,15 +48,12 @@ void new_chain_banner( const graphene::chain::database& db ) "* ------------------------ *\n" "* *\n" "********************************\n" - "\n"; - if( db.get_slot_at_time( graphene::time::now() ) > 200 ) + "\n"); + if( db.get_slot_at_time( fc::time_point::now() ) > 200 ) { - std::cerr << "Your genesis seems to have an old timestamp\n" - "Please consider using the --genesis-timestamp option to give your genesis a recent timestamp\n" - "\n" - ; + wlog("Your genesis seems to have an old timestamp"); + wlog("Please consider using the --genesis-timestamp option to give your genesis a recent timestamp"); } - return; } void witness_plugin::plugin_set_program_options( @@ -95,8 +90,8 @@ void witness_plugin::plugin_initialize(const boost::program_options::variables_m const std::vector key_id_to_wif_pair_strings = options["private-key"].as>(); for (const std::string& key_id_to_wif_pair_string : key_id_to_wif_pair_strings) { - auto key_id_to_wif_pair = graphene::app::dejsonify >(key_id_to_wif_pair_string); - idump((key_id_to_wif_pair)); + auto key_id_to_wif_pair = graphene::app::dejsonify >(key_id_to_wif_pair_string, 5); + ilog("Public Key: ${public}", ("public", key_id_to_wif_pair.first)); fc::optional private_key = graphene::utilities::wif_to_key(key_id_to_wif_pair.second); if (!private_key) { @@ -104,7 +99,7 @@ void witness_plugin::plugin_initialize(const boost::program_options::variables_m // just here to ease the transition, can be removed soon try { - private_key = fc::variant(key_id_to_wif_pair.second).as(); + private_key = fc::variant(key_id_to_wif_pair.second, 2).as(1); } catch (const fc::exception&) { @@ -121,9 +116,6 @@ void witness_plugin::plugin_startup() { try { ilog("witness plugin: plugin_startup() begin"); chain::database& d = database(); - //Start NTP time client - graphene::time::now(); - if( !_witnesses.empty() ) { ilog("Launching block production for ${n} witnesses.", ("n", _witnesses.size())); @@ -134,31 +126,65 @@ void witness_plugin::plugin_startup() new_chain_banner(d); _production_skip_flags |= graphene::chain::database::skip_undo_history_check; } + refresh_witness_key_cache(); + d.applied_block.connect( [this]( const chain::signed_block& b ) + { + refresh_witness_key_cache(); + }); schedule_production_loop(); - } else - elog("No witnesses configured! Please add witness IDs and private keys to configuration."); + } + else + { + ilog("No witness configured."); + } ilog("witness plugin: plugin_startup() end"); } FC_CAPTURE_AND_RETHROW() } void witness_plugin::plugin_shutdown() { - graphene::time::shutdown_ntp_time(); - return; + stop_block_production(); +} + +void witness_plugin::stop_block_production() +{ + _shutting_down = true; + + try { + if( _block_production_task.valid() ) + _block_production_task.cancel_and_wait(__FUNCTION__); + } catch(fc::canceled_exception&) { + //Expected exception. Move along. + } catch(fc::exception& e) { + edump((e.to_detail_string())); + } +} + +void witness_plugin::refresh_witness_key_cache() +{ + const auto& db = database(); + for( const chain::witness_id_type wit_id : _witnesses ) + { + const chain::witness_object* wit_obj = db.find( wit_id ); + if( wit_obj ) + _witness_key_cache[wit_id] = wit_obj->signing_key; + else + _witness_key_cache[wit_id] = fc::optional(); + } } void witness_plugin::schedule_production_loop() { + if (_shutting_down) return; + //Schedule for the next second's tick regardless of chain state // If we would wait less than 50ms, wait for the whole second. - fc::time_point ntp_now = graphene::time::now(); - fc::time_point fc_now = fc::time_point::now(); - int64_t time_to_next_second = 1000000 - (ntp_now.time_since_epoch().count() % 1000000); + fc::time_point now = fc::time_point::now(); + int64_t time_to_next_second = 1000000 - (now.time_since_epoch().count() % 1000000); if( time_to_next_second < 50000 ) // we must sleep for at least 50ms time_to_next_second += 1000000; - fc::time_point next_wakeup( fc_now + fc::microseconds( time_to_next_second ) ); + fc::time_point next_wakeup( now + fc::microseconds( time_to_next_second ) ); - //wdump( (now.time_since_epoch().count())(next_wakeup.time_since_epoch().count()) ); _block_production_task = fc::schedule([this]{block_production_loop();}, next_wakeup, "Witness Block Production"); } @@ -166,35 +192,41 @@ void witness_plugin::schedule_production_loop() block_production_condition::block_production_condition_enum witness_plugin::block_production_loop() { block_production_condition::block_production_condition_enum result; - fc::mutable_variant_object capture; - try - { - result = maybe_produce_block(capture); - } - catch( const fc::canceled_exception& ) + fc::limited_mutable_variant_object capture( GRAPHENE_MAX_NESTED_OBJECTS ); + + if (_shutting_down) { - //We're trying to exit. Go ahead and let this one out. - throw; + result = block_production_condition::shutdown; } - catch( const fc::exception& e ) + else { - elog("Got exception while generating block:\n${e}", ("e", e.to_detail_string())); - result = block_production_condition::exception_producing_block; + try + { + result = maybe_produce_block(capture); + } + catch( const fc::canceled_exception& ) + { + //We're trying to exit. Go ahead and let this one out. + throw; + } + catch( const fc::exception& e ) + { + elog("Got exception while generating block:\n${e}", ("e", e.to_detail_string())); + result = block_production_condition::exception_producing_block; + } } switch( result ) { case block_production_condition::produced: - ilog("Generated block #${n} with timestamp ${t} at time ${c}", (capture)); + ilog("Generated block #${n} with ${x} transaction(s) and timestamp ${t} at time ${c}", (capture)); break; case block_production_condition::not_synced: ilog("Not producing block because production is disabled until we receive a recent block (see: --enable-stale-production)"); break; case block_production_condition::not_my_turn: - //ilog("Not producing block because it isn't my turn"); break; case block_production_condition::not_time_yet: - // ilog("Not producing block because slot has not yet arrived"); break; case block_production_condition::no_private_key: ilog("Not producing block because I don't have the private key for ${scheduled_key}", (capture) ); @@ -203,12 +235,16 @@ block_production_condition::block_production_condition_enum witness_plugin::bloc elog("Not producing block because node appears to be on a minority fork with only ${pct}% witness participation", (capture) ); break; case block_production_condition::lag: - elog("Not producing block because node didn't wake up within 500ms of the slot time."); - break; - case block_production_condition::consecutive: - elog("Not producing block because the last block was generated by the same witness.\nThis node is probably disconnected from the network so block production has been disabled.\nDisable this check with --allow-consecutive option."); + elog("Not producing block because node didn't wake up within 2500ms of the slot time."); break; case block_production_condition::exception_producing_block: + elog( "exception producing block" ); + break; + case block_production_condition::shutdown: + ilog( "shutdown producing block" ); + return result; + default: + elog( "unknown condition ${result} while producing block", ("result", (unsigned char)result) ); break; } @@ -216,10 +252,10 @@ block_production_condition::block_production_condition_enum witness_plugin::bloc return result; } -block_production_condition::block_production_condition_enum witness_plugin::maybe_produce_block( fc::mutable_variant_object& capture ) +block_production_condition::block_production_condition_enum witness_plugin::maybe_produce_block( fc::limited_mutable_variant_object& capture ) { chain::database& db = database(); - fc::time_point now_fine = graphene::time::now(); + fc::time_point now_fine = fc::time_point::now(); fc::time_point_sec now = now_fine + fc::microseconds( 500000 ); // If the next block production opportunity is in the present or future, we're synced. @@ -258,7 +294,7 @@ block_production_condition::block_production_condition_enum witness_plugin::mayb } fc::time_point_sec scheduled_time = db.get_slot_time( slot ); - graphene::chain::public_key_type scheduled_key = scheduled_witness( db ).signing_key; + graphene::chain::public_key_type scheduled_key = *_witness_key_cache[scheduled_witness]; // should be valid auto private_key_itr = _private_keys.find( scheduled_key ); if( private_key_itr == _private_keys.end() ) @@ -274,7 +310,7 @@ block_production_condition::block_production_condition_enum witness_plugin::mayb return block_production_condition::low_participation; } - if( llabs((scheduled_time - now).count()) > fc::milliseconds( 500 ).count() ) + if( llabs((scheduled_time - now).count()) > fc::milliseconds( 2500 ).count() ) { capture("scheduled_time", scheduled_time)("now", now); return block_production_condition::lag; @@ -286,7 +322,7 @@ block_production_condition::block_production_condition_enum witness_plugin::mayb private_key_itr->second, _production_skip_flags ); - capture("n", block.block_num())("t", block.timestamp)("c", now); + capture("n", block.block_num())("t", block.timestamp)("c", now)("x", block.transactions.size()); fc::async( [this,block](){ p2p_node().broadcast(net::block_message(block)); } ); return block_production_condition::produced; diff --git a/libraries/time/CMakeLists.txt b/libraries/time/CMakeLists.txt deleted file mode 100644 index cc8a909d29..0000000000 --- a/libraries/time/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -file(GLOB HEADERS "include/graphene/time/*.hpp") - -add_library( graphene_time - time.cpp - ) - -target_link_libraries( graphene_time fc ) -target_include_directories( graphene_time - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) - -install( TARGETS - graphene_time - - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) diff --git a/libraries/time/include/graphene/time/time.hpp b/libraries/time/include/graphene/time/time.hpp deleted file mode 100644 index 2979369ce7..0000000000 --- a/libraries/time/include/graphene/time/time.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#pragma once - -#include -#include -#include - -namespace graphene { namespace time { - - typedef fc::signal time_discontinuity_signal_type; - extern time_discontinuity_signal_type time_discontinuity_signal; - - fc::optional ntp_time(); - fc::time_point now(); - fc::time_point nonblocking_now(); // identical to now() but guaranteed not to block - void update_ntp_time(); - fc::microseconds ntp_error(); - void shutdown_ntp_time(); - - void start_simulated_time( const fc::time_point sim_time ); - void advance_simulated_time_to( const fc::time_point sim_time ); - void advance_time( int32_t delta_seconds ); - -} } // graphene::time diff --git a/libraries/time/time.cpp b/libraries/time/time.cpp deleted file mode 100644 index 6ba0c126bf..0000000000 --- a/libraries/time/time.cpp +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include - -#include -#include -#include -#include - -#include - -namespace graphene { namespace time { - -static int32_t simulated_time = 0; -static int32_t adjusted_time_sec = 0; - -time_discontinuity_signal_type time_discontinuity_signal; - -namespace detail -{ - std::atomic ntp_service(nullptr); - fc::mutex ntp_service_initialization_mutex; -} - -fc::optional ntp_time() -{ - fc::ntp* actual_ntp_service = detail::ntp_service.load(); - if (!actual_ntp_service) - { - fc::scoped_lock lock(detail::ntp_service_initialization_mutex); - actual_ntp_service = detail::ntp_service.load(); - if (!actual_ntp_service) - { - actual_ntp_service = new fc::ntp; - detail::ntp_service.store(actual_ntp_service); - } - } - return actual_ntp_service->get_time(); -} - -void shutdown_ntp_time() -{ - fc::ntp* actual_ntp_service = detail::ntp_service.exchange(nullptr); - delete actual_ntp_service; -} - -fc::time_point now() -{ - if( simulated_time ) - return fc::time_point() + fc::seconds( simulated_time + adjusted_time_sec ); - - fc::optional current_ntp_time = ntp_time(); - if( current_ntp_time.valid() ) - return *current_ntp_time + fc::seconds( adjusted_time_sec ); - else - return fc::time_point::now() + fc::seconds( adjusted_time_sec ); -} - -fc::time_point nonblocking_now() -{ - if (simulated_time) - return fc::time_point() + fc::seconds(simulated_time + adjusted_time_sec); - - fc::ntp* actual_ntp_service = detail::ntp_service.load(); - fc::optional current_ntp_time; - if (actual_ntp_service) - current_ntp_time = actual_ntp_service->get_time(); - - if (current_ntp_time) - return *current_ntp_time + fc::seconds(adjusted_time_sec); - else - return fc::time_point::now() + fc::seconds(adjusted_time_sec); -} - -void update_ntp_time() -{ - detail::ntp_service.load()->request_now(); -} - -fc::microseconds ntp_error() -{ - fc::optional current_ntp_time = ntp_time(); - FC_ASSERT( current_ntp_time, "We don't have NTP time!" ); - return *current_ntp_time - fc::time_point::now(); -} - -void start_simulated_time( const fc::time_point sim_time ) -{ - simulated_time = sim_time.sec_since_epoch(); - adjusted_time_sec = 0; -} -void advance_simulated_time_to( const fc::time_point sim_time ) -{ - simulated_time = sim_time.sec_since_epoch(); - adjusted_time_sec = 0; -} - -void advance_time( int32_t delta_seconds ) -{ - adjusted_time_sec += delta_seconds; - time_discontinuity_signal(); -} - -} } // graphene::time diff --git a/libraries/utilities/CMakeLists.txt b/libraries/utilities/CMakeLists.txt index cce166446e..4311ef46cb 100644 --- a/libraries/utilities/CMakeLists.txt +++ b/libraries/utilities/CMakeLists.txt @@ -7,22 +7,28 @@ if(NOT GRAPHENE_GIT_REVISION_DESCRIPTION) set(GRAPHENE_GIT_REVISION_DESCRIPTION "unknown") endif(NOT GRAPHENE_GIT_REVISION_DESCRIPTION) -file(GLOB headers "include/graphene/utilities/*.hpp") +file(GLOB HEADERS "include/graphene/utilities/*.hpp") set(sources key_conversion.cpp string_escape.cpp tempdir.cpp words.cpp - ${headers}) + elasticsearch.cpp + ${HEADERS}) configure_file("${CMAKE_CURRENT_SOURCE_DIR}/git_revision.cpp.in" "${CMAKE_CURRENT_BINARY_DIR}/git_revision.cpp" @ONLY) list(APPEND sources "${CMAKE_CURRENT_BINARY_DIR}/git_revision.cpp") - +find_package(CURL REQUIRED) +include_directories(${CURL_INCLUDE_DIRS}) add_library( graphene_utilities ${sources} ${HEADERS} ) -target_link_libraries( graphene_utilities fc ) +if(CURL_STATICLIB) + SET_TARGET_PROPERTIES(graphene_utilities PROPERTIES + COMPILE_DEFINITIONS "CURL_STATICLIB") +endif(CURL_STATICLIB) +target_link_libraries( graphene_utilities fc ${CURL_LIBRARIES}) target_include_directories( graphene_utilities PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) if (USE_PCH) @@ -37,3 +43,4 @@ install( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +install( FILES ${HEADERS} DESTINATION "include/graphene/utilities" ) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp new file mode 100644 index 0000000000..0b94de50c9 --- /dev/null +++ b/libraries/utilities/elasticsearch.cpp @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include + +#include +#include +#include +#include + +size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp) +{ + ((std::string*)userp)->append((char*)contents, size * nmemb); + return size * nmemb; +} + +namespace graphene { namespace utilities { + +bool checkES(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + "_nodes"; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + if(doCurl(curl_request).empty()) + return false; + return true; + +} +const std::string simpleQuery(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.endpoint; + curl_request.auth = es.auth; + curl_request.type = "POST"; + curl_request.query = es.query; + + return doCurl(curl_request); +} + +bool SendBulk(ES&& es) +{ + std::string bulking = joinBulkLines(es.bulk_lines); + + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + "_bulk"; + curl_request.auth = es.auth; + curl_request.type = "POST"; + curl_request.query = std::move(bulking); + + auto curlResponse = doCurl(curl_request); + + if(handleBulkResponse(getResponseCode(curl_request.handler), curlResponse)) + return true; + return false; +} + +const std::string joinBulkLines(const std::vector& bulk) +{ + auto bulking = boost::algorithm::join(bulk, "\n"); + bulking = bulking + "\n"; + + return bulking; +} +long getResponseCode(CURL *handler) +{ + long http_code = 0; + curl_easy_getinfo (handler, CURLINFO_RESPONSE_CODE, &http_code); + return http_code; +} + +bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer) +{ + if(http_code == 200) { + // all good, but check errors in response + fc::variant j = fc::json::from_string(CurlReadBuffer); + bool errors = j["errors"].as_bool(); + if(errors == true) { + return false; + } + } + else { + if(http_code == 413) { + elog( "413 error: Can be low disk space" ); + } + else if(http_code == 401) { + elog( "401 error: Unauthorized" ); + } + else { + elog( std::to_string(http_code) + " error: Unknown error" ); + } + return false; + } + return true; +} + +const std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data) +{ + std::vector bulk; + fc::mutable_variant_object final_bulk_header; + final_bulk_header["index"] = bulk_header; + bulk.push_back(fc::json::to_string(final_bulk_header)); + bulk.push_back(data); + + return bulk; +} + +bool deleteAll(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.index_prefix + "*"; + curl_request.auth = es.auth; + curl_request.type = "DELETE"; + + auto curl_response = doCurl(curl_request); + if(curl_response.empty()) + return false; + else + return true; +} +const std::string getEndPoint(ES& es) +{ + graphene::utilities::CurlRequest curl_request; + curl_request.handler = es.curl; + curl_request.url = es.elasticsearch_url + es.endpoint; + curl_request.auth = es.auth; + curl_request.type = "GET"; + + return doCurl(curl_request); +} + +const std::string generateIndexName(const fc::time_point_sec& block_date, const std::string& _elasticsearch_index_prefix) +{ + auto block_date_string = block_date.to_iso_string(); + std::vector parts; + boost::split(parts, block_date_string, boost::is_any_of("-")); + std::string index_name = _elasticsearch_index_prefix + parts[0] + "-" + parts[1]; + return index_name; +} + +const std::string doCurl(CurlRequest& curl) +{ + std::string CurlReadBuffer; + struct curl_slist *headers = NULL; + headers = curl_slist_append(headers, "Content-Type: application/json"); + + curl_easy_setopt(curl.handler, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl.handler, CURLOPT_URL, curl.url.c_str()); + curl_easy_setopt(curl.handler, CURLOPT_CUSTOMREQUEST, curl.type.c_str()); + if(curl.type == "POST") + { + curl_easy_setopt(curl.handler, CURLOPT_POST, true); + curl_easy_setopt(curl.handler, CURLOPT_POSTFIELDS, curl.query.c_str()); + } + curl_easy_setopt(curl.handler, CURLOPT_WRITEFUNCTION, WriteCallback); + curl_easy_setopt(curl.handler, CURLOPT_WRITEDATA, (void *)&CurlReadBuffer); + curl_easy_setopt(curl.handler, CURLOPT_USERAGENT, "libcrp/0.1"); + if(!curl.auth.empty()) + curl_easy_setopt(curl.handler, CURLOPT_USERPWD, curl.auth.c_str()); + curl_easy_perform(curl.handler); + + return CurlReadBuffer; +} + +} } // end namespace graphene::utilities diff --git a/libraries/utilities/include/graphene/utilities/elasticsearch.hpp b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp new file mode 100644 index 0000000000..e8790c1ea5 --- /dev/null +++ b/libraries/utilities/include/graphene/utilities/elasticsearch.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once +#include +#include +#include + +#include +#include +#include + +size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp); + +namespace graphene { namespace utilities { + + class ES { + public: + CURL *curl; + std::vector bulk_lines; + std::string elasticsearch_url; + std::string index_prefix; + std::string auth; + std::string endpoint; + std::string query; + }; + class CurlRequest { + public: + CURL *handler; + std::string url; + std::string type; + std::string auth; + std::string query; + }; + + bool SendBulk(ES&& es); + const std::vector createBulk(const fc::mutable_variant_object& bulk_header, std::string&& data); + bool checkES(ES& es); + const std::string simpleQuery(ES& es); + bool deleteAll(ES& es); + bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer); + const std::string getEndPoint(ES& es); + const std::string generateIndexName(const fc::time_point_sec& block_date, const std::string& _elasticsearch_index_prefix); + const std::string doCurl(CurlRequest& curl); + const std::string joinBulkLines(const std::vector& bulk); + long getResponseCode(CURL *handler); + +} } // end namespace graphene::utilities diff --git a/libraries/utilities/key_conversion.cpp b/libraries/utilities/key_conversion.cpp index e41307893a..268b2b5ee0 100644 --- a/libraries/utilities/key_conversion.cpp +++ b/libraries/utilities/key_conversion.cpp @@ -58,7 +58,7 @@ fc::optional wif_to_key( const std::string& wif_key ) if (wif_bytes.size() < 5) return fc::optional(); std::vector key_bytes(wif_bytes.begin() + 1, wif_bytes.end() - 4); - fc::ecc::private_key key = fc::variant(key_bytes).as(); + fc::ecc::private_key key = fc::variant( key_bytes, 1 ).as( 1 ); fc::sha256 check = fc::sha256::hash(wif_bytes.data(), wif_bytes.size() - 4); fc::sha256 check2 = fc::sha256::hash(check); diff --git a/libraries/wallet/CMakeLists.txt b/libraries/wallet/CMakeLists.txt index 3d66c48e64..8ff42dc95f 100644 --- a/libraries/wallet/CMakeLists.txt +++ b/libraries/wallet/CMakeLists.txt @@ -8,13 +8,20 @@ if( PERL_FOUND AND DOXYGEN_FOUND AND NOT "${CMAKE_GENERATOR}" STREQUAL "Ninja" ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/doxygen/perlmod/DoxyDocs.pm WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${DOXYGEN_EXECUTABLE} - DEPENDS Doxyfile include/graphene/wallet/wallet.hpp ) - add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp - COMMAND ${PERL_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new - - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp - COMMAND ${CMAKE_COMMAND} -E remove ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/doxygen/perlmod/DoxyDocs.pm ) + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile include/graphene/wallet/wallet.hpp ) + if(MSVC) + add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp + COMMAND ${PERL_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp + COMMAND ${CMAKE_COMMAND} -E remove ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/doxygen/perlmod/DoxyDocs.pm ) + else(MSVC) + add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp + COMMAND PERLLIB=${CMAKE_CURRENT_BINARY_DIR} ${PERL_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp + COMMAND ${CMAKE_COMMAND} -E remove ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/doxygen/perlmod/DoxyDocs.pm ) + endif(MSVC) else() # no perl and doxygen, generate the best docs we can at runtime from reflection add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp @@ -37,3 +44,4 @@ install( TARGETS LIBRARY DESTINATION lib ARCHIVE DESTINATION lib ) +install( FILES ${HEADERS} DESTINATION "include/graphene/wallet" ) diff --git a/libraries/wallet/include/graphene/wallet/reflect_util.hpp b/libraries/wallet/include/graphene/wallet/reflect_util.hpp index 497303c51c..7a9e6e0812 100644 --- a/libraries/wallet/include/graphene/wallet/reflect_util.hpp +++ b/libraries/wallet/include/graphene/wallet/reflect_util.hpp @@ -39,7 +39,6 @@ namespace impl { std::string clean_name( const std::string& name ) { - std::string result; const static std::string prefix = "graphene::chain::"; const static std::string suffix = "_operation"; // graphene::chain::.*_operation @@ -62,14 +61,14 @@ struct static_variant_map_visitor template< typename T > result_type operator()( const T& dummy ) { - assert( which == m.which_to_name.size() ); + FC_ASSERT( which == m.which_to_name.size(), "This should not happen" ); std::string name = clean_name( fc::get_typename::name() ); m.name_to_which[ name ] = which; m.which_to_name.push_back( name ); } static_variant_map m; - int which; + uint16_t which; // 16 bit should be practically enough }; template< typename StaticVariant > @@ -81,24 +80,25 @@ struct from_which_visitor result_type operator()( const Member& dummy ) { Member result; - from_variant( v, result ); + from_variant( v, result, _max_depth ); return result; // converted from StaticVariant to Result automatically due to return type } const variant& v; + const uint32_t _max_depth; - from_which_visitor( const variant& _v ) : v(_v) {} + from_which_visitor( const variant& _v, uint32_t max_depth ) : v(_v), _max_depth(max_depth) {} }; } // namespace impl template< typename T > -T from_which_variant( int which, const variant& v ) +T from_which_variant( int which, const variant& v, uint32_t max_depth ) { // Parse a variant for a known which() T dummy; dummy.set_which( which ); - impl::from_which_visitor< T > vtor(v); + impl::from_which_visitor< T > vtor(v, max_depth); return dummy.visit( vtor ); } @@ -107,6 +107,7 @@ static_variant_map create_static_variant_map() { T dummy; int n = dummy.count(); + FC_ASSERT( n <= std::numeric_limits::max(), "Too many items in this static_variant" ); impl::static_variant_map_visitor vtor; for( int i=0; i derive_owner_keys_from_brain_key(string brain_key, int number_of_desired_keys = 1); + + /** Suggests a safe brain key to use for creating your account. + * \c create_account_with_brain_key() requires you to specify a 'brain key', + * a long passphrase that provides enough entropy to generate cyrptographic + * keys. This function will suggest a suitably random string that should + * be easy to write down (and, with effort, memorize). + * @returns a suggested brain_key + */ + static brain_key_info suggest_brain_key(); +}; + struct operation_detail { string memo; string description; operation_history_object op; }; +struct operation_detail_ex { + string memo; + string description; + operation_history_object op; + transaction_id_type transaction_id; +}; + +struct account_history_operation_detail { + uint32_t total_count = 0; + uint32_t result_count = 0; + vector details; +}; + /** * This wallet assumes it is connected to the database server with a high-bandwidth, low-latency connection and * performs minimal caching. This API could be provided locally to be used by a web interface. @@ -323,24 +365,81 @@ class wallet_api * @returns the list of asset objects, ordered by symbol */ vector list_assets(const string& lowerbound, uint32_t limit)const; - + /** Returns assets count registered on the blockchain. + * + * @returns assets count + */ + uint64_t get_asset_count()const; + /** Returns the most recent operations on the named account. * * This returns a list of operation history objects, which describe activity on the account. * - * @note this API doesn't give a way to retrieve more than the most recent 100 transactions, - * you can interface directly with the blockchain to get more history * @param name the name or id of the account - * @param limit the number of entries to return (starting from the most recent) (max 100) + * @param limit the number of entries to return (starting from the most recent) * @returns a list of \c operation_history_objects */ vector get_account_history(string name, int limit)const; + /** Returns the relative operations on the named account from start number. + * + * @param name the name or id of the account + * @param stop Sequence number of earliest operation. + * @param limit the number of entries to return + * @param start the sequence number where to start looping back throw the history + * @returns a list of \c operation_history_objects + */ + vector get_relative_account_history(string name, uint32_t stop, int limit, uint32_t start)const; + + /** + * @brief Fetch all objects relevant to the specified account + * @param name_or_id Must be the name or ID of an account to retrieve + * @return All info about the specified account + * + * This function fetches all relevant objects for the given account. If the string + * of \c name_or_id cannot be tied to an account, that input will be ignored. + * + */ + full_account get_full_account( const string& name_or_id); + vector get_market_history(string symbol, string symbol2, uint32_t bucket, fc::time_point_sec start, fc::time_point_sec end)const; + + /** + * @brief Fetch all orders relevant to the specified account sorted descendingly by price + * + * @param name_or_id The name or ID of an account to retrieve + * @param base Base asset + * @param quote Quote asset + * @param limit The limitation of items each query can fetch (max: 101) + * @param ostart_id Start order id, fetch orders which price are lower than or equal to this order + * @param ostart_price Fetch orders with price lower than or equal to this price + * + * @return List of orders from \c name_or_id to the corresponding account + * + * @note + * 1. if \c name_or_id cannot be tied to an account, empty result will be returned + * 2. \c ostart_id and \c ostart_price can be \c null, if so the api will return the "first page" of orders; + * if \c ostart_id is specified and valid, its price will be used to do page query preferentially, + * otherwise the \c ostart_price will be used + */ + vector get_account_limit_orders( const string& name_or_id, + const string &base, + const string "e, + uint32_t limit = 101, + optional ostart_id = optional(), + optional ostart_price = optional()); - vector get_market_history(string symbol, string symbol2, uint32_t bucket)const; vector get_limit_orders(string a, string b, uint32_t limit)const; vector get_call_orders(string a, uint32_t limit)const; vector get_settle_orders(string a, uint32_t limit)const; + + /** Returns the collateral_bid object for the given MPA + * + * @param asset the name or id of the asset + * @param limit the number of entries to return + * @param start the sequence number where to start looping back throw the history + * @returns a list of \c collateral_bid_objects + */ + vector get_collateral_bids(string asset, uint32_t limit = 100, uint32_t start = 0)const; /** Returns the block chain's slowly-changing settings. * This object contains all of the properties of the blockchain that are fixed @@ -351,6 +450,17 @@ class wallet_api */ global_property_object get_global_properties() const; + /** + * Get operations relevant to the specified account filtering by operation type, with transaction id + * + * @param name the name or id of the account, whose history shoulde be queried + * @param operation_types The IDs of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...) + * @param start the sequence number where to start looping back throw the history + * @param limit the max number of entries to return (from start number) + * @returns account_history_operation_detail + */ + account_history_operation_detail get_account_history_by_operations(string name, vector operation_types, uint32_t start, int limit); + /** Returns the block chain's rapidly-changing properties. * The returned object contains information that changes every block interval * such as the head block number, the next witness, etc. @@ -447,6 +557,13 @@ class wallet_api * @ingroup Transaction Builder API */ signed_transaction sign_builder_transaction(transaction_handle_type transaction_handle, bool broadcast = true); + + /** Broadcast signed transaction + * @param tx signed transaction + * @returns the transaction ID along with the signed transaction. + */ + pair broadcast_transaction(signed_transaction tx); + /** * @ingroup Transaction Builder API */ @@ -546,6 +663,12 @@ class wallet_api */ bool load_wallet_file(string wallet_filename = ""); + /** Quitting from BitShares wallet. + * + * The current wallet will be closed. + */ + void quit(); + /** Saves the current wallet to the given filename. * * @warning This does not change the wallet filename that will be used for future @@ -576,9 +699,30 @@ class wallet_api */ brain_key_info suggest_brain_key()const; + /** + * Derive any number of *possible* owner keys from a given brain key. + * + * NOTE: These keys may or may not match with the owner keys of any account. + * This function is merely intended to assist with account or key recovery. + * + * @see suggest_brain_key() + * + * @param brain_key Brain key + * @param number_of_desired_keys Number of desired keys + * @return A list of keys that are deterministically derived from the brainkey + */ + vector derive_owner_keys_from_brain_key(string brain_key, int number_of_desired_keys = 1) const; + + /** + * Determine whether a textual representation of a public key + * (in Base-58 format) is *currently* linked + * to any *registered* (i.e. non-stealth) account on the blockchain + * @param public_key Public key + * @return Whether a public key is known + */ + bool is_public_key_registered(string public_key) const; + /** Converts a signed_transaction in JSON form to its binary representation. - * - * TODO: I don't see a broadcast_transaction() function, do we need one? * * @param tx the transaction to serialize * @returns the binary form of the transaction. It will not be hex encoded, @@ -727,6 +871,22 @@ class wallet_api transaction_id_type get_transaction_id( const signed_transaction& trx )const { return trx.id(); } + /** Sign a memo message. + * + * @param from the name or id of signing account; or a public key. + * @param to the name or id of receiving account; or a public key. + * @param memo text to sign. + */ + memo_data sign_memo(string from, string to, string memo); + + /** Read a memo. + * + * @param memo JSON-enconded memo. + * @returns string with decrypted message.. + */ + string read_memo(const memo_data& memo); + + /** These methods are used for stealth transfers */ ///@{ /** @@ -767,13 +927,15 @@ class wallet_api * that it exists in the blockchain. If it exists then it will report the amount received and * who sent it. * - * @param opt_from - if not empty and the sender is a unknown public key, then the unknown public key will be given the label opt_from - * @param confirmation_receipt - a base58 encoded stealth confirmation + * @param opt_from if not empty and the sender is a unknown public key, + * then the unknown public key will be given the label \c opt_from + * @param confirmation_receipt a base58 encoded stealth confirmation + * @param opt_memo a self-defined label for this transfer to be saved in local wallet file */ blind_receipt receive_blind_transfer( string confirmation_receipt, string opt_from, string opt_memo ); /** - * Transfers a public balance from @from to one or more blinded balances using a + * Transfers a public balance from \c from_account_id_or_name to one or more blinded balances using a * stealth transfer. */ blind_confirmation transfer_to_blind( string from_account_id_or_name, @@ -849,51 +1011,6 @@ class wallet_api uint32_t timeout_sec = 0, bool fill_or_kill = false, bool broadcast = false); - - /** Place a limit order attempting to sell one asset for another. - * - * This API call abstracts away some of the details of the sell_asset call to be more - * user friendly. All orders placed with sell never timeout and will not be killed if they - * cannot be filled immediately. If you wish for one of these parameters to be different, - * then sell_asset should be used instead. - * - * @param seller_account the account providing the asset being sold, and which will - * receive the processed of the sale. - * @param base The name or id of the asset to sell. - * @param quote The name or id of the asset to recieve. - * @param rate The rate in base:quote at which you want to sell. - * @param amount The amount of base you want to sell. - * @param broadcast true to broadcast the transaction on the network. - * @returns The signed transaction selling the funds. - */ - signed_transaction sell( string seller_account, - string base, - string quote, - double rate, - double amount, - bool broadcast ); - - /** Place a limit order attempting to buy one asset with another. - * - * This API call abstracts away some of the details of the sell_asset call to be more - * user friendly. All orders placed with buy never timeout and will not be killed if they - * cannot be filled immediately. If you wish for one of these parameters to be different, - * then sell_asset should be used instead. - * - * @param buyer_account The account buying the asset for another asset. - * @param base The name or id of the asset to buy. - * @param quote The name or id of the assest being offered as payment. - * @param rate The rate in base:quote at which you want to buy. - * @param amount the amount of base you want to buy. - * @param broadcast true to broadcast the transaction on the network. - * @param The signed transaction selling the funds. - */ - signed_transaction buy( string buyer_account, - string base, - string quote, - double rate, - double amount, - bool broadcast ); /** Borrow an asset or update the debt/collateral ratio for the loan. * @@ -912,6 +1029,26 @@ class wallet_api signed_transaction borrow_asset(string borrower_name, string amount_to_borrow, string asset_symbol, string amount_of_collateral, bool broadcast = false); + /** Borrow an asset or update the debt/collateral ratio for the loan, with additional options. + * + * This is the first step in shorting an asset. Call \c sell_asset() to complete the short. + * + * @param borrower_name the name or id of the account associated with the transaction. + * @param amount_to_borrow the amount of the asset being borrowed. Make this value + * negative to pay back debt. + * @param asset_symbol the symbol or id of the asset being borrowed. + * @param amount_of_collateral the amount of the backing asset to add to your collateral + * position. Make this negative to claim back some of your collateral. + * The backing asset is defined in the \c bitasset_options for the asset being borrowed. + * @param extensions additional options + * @param broadcast true to broadcast the transaction on the network + * @returns the signed transaction borrowing the asset + */ + signed_transaction borrow_asset_ext( string borrower_name, string amount_to_borrow, string asset_symbol, + string amount_of_collateral, + call_order_update_operation::extensions_type extensions, + bool broadcast = false ); + /** Cancel an existing order * * @param order_id the id of order to be cancelled @@ -984,6 +1121,21 @@ class wallet_api asset_options new_options, bool broadcast = false); + /** Update the issuer of an asset + * Since this call requires the owner authority of the current issuer to sign the transaction, + * a separated operation is used to change the issuer. This call simplifies the use of this action. + * + * @note This operation requires the owner key to be available in the wallet. + * + * @param symbol the name or id of the asset to update + * @param new_issuer if changing the asset's issuer, the name or id of the new issuer. + * @param broadcast true to broadcast the transaction on the network + * @returns the signed transaction updating the asset + */ + signed_transaction update_asset_issuer(string symbol, + string new_issuer, + bool broadcast = false); + /** Update the options specific to a BitAsset. * * BitAssets have some options which are not relevant to other asset types. This operation is used to update those @@ -1059,6 +1211,23 @@ class wallet_api string amount, bool broadcast = false); + /** Claim funds from the fee pool for the given asset. + * + * User-issued assets can optionally have a pool of the core asset which is + * automatically used to pay transaction fees for any transaction using that + * asset (using the asset's core exchange rate). + * + * This command allows the issuer to withdraw those funds from the fee pool. + * + * @param symbol the name or id of the asset whose fee pool you wish to claim + * @param amount the amount of the core asset to withdraw + * @param broadcast true to broadcast the transaction on the network + * @returns the signed transaction claiming from the fee pool + */ + signed_transaction claim_asset_fee_pool(string symbol, + string amount, + bool broadcast = false); + /** Burns the given user-issued asset. * * This command burns the user-issued asset to reduce the amount in circulation. @@ -1115,6 +1284,25 @@ class wallet_api string symbol, bool broadcast = false); + /** Creates or updates a bid on an MPA after global settlement. + * + * In order to revive a market-pegged asset after global settlement (aka + * black swan), investors can bid collateral in order to take over part of + * the debt and the settlement fund, see BSIP-0018. Updating an existing + * bid to cover 0 debt will delete the bid. + * + * @param bidder_name the name or id of the account making the bid + * @param debt_amount the amount of debt of the named asset to bid for + * @param debt_symbol the name or id of the MPA to bid for + * @param additional_collateral the amount of additional collateral to bid + * for taking over debt_amount. The asset type of this amount is + * determined automatically from debt_symbol. + * @param broadcast true to broadcast the transaction on the network + * @returns the signed transaction creating/updating the bid + */ + signed_transaction bid_collateral(string bidder_name, string debt_amount, string debt_symbol, + string additional_collateral, bool broadcast = false); + /** Whitelist and blacklist accounts, primarily for transacting in whitelisted assets. * * Accounts can freely specify opinions about other accounts, in the form of either whitelisting or blacklisting @@ -1213,7 +1401,7 @@ class wallet_api /** * Update a witness object owned by the given account. * - * @param witness The name of the witness's owner account. Also accepts the ID of the owner account or the ID of the witness. + * @param witness_name The name of the witness's owner account. Also accepts the ID of the owner account or the ID of the witness. * @param url Same as for create_witness. The empty string makes it remain the same. * @param block_signing_key The new block signing public key. The empty string makes it remain the same. * @param broadcast true if you wish to broadcast the transaction. @@ -1251,7 +1439,7 @@ class wallet_api * Update your votes for a worker * * @param account The account which will pay the fee and update votes. - * @param worker_vote_delta {"vote_for" : [...], "vote_against" : [...], "vote_abstain" : [...]} + * @param delta {"vote_for" : [...], "vote_against" : [...], "vote_abstain" : [...]} * @param broadcast true if you wish to broadcast the transaction. */ signed_transaction update_worker_votes( @@ -1362,7 +1550,8 @@ class wallet_api * set, your preferences will be ignored. * * @param account_to_modify the name or id of the account to update - * @param number_of_committee_members the number + * @param desired_number_of_witnesses desired number of active witnesses + * @param desired_number_of_committee_members desired number of active committee members * * @param broadcast true if you wish to broadcast the transaction * @return the signed transaction changing your vote proxy settings @@ -1449,6 +1638,20 @@ class wallet_api order_book get_order_book( const string& base, const string& quote, unsigned limit = 50); + /** Signs a transaction. + * + * Given a fully-formed transaction with or without signatures, signs + * the transaction with the owned keys and optionally broadcasts the + * transaction. + * + * @param tx the unsigned transaction + * @param broadcast true if you wish to broadcast the transaction + * + * @return the signed transaction + */ + signed_transaction add_transaction_signature( signed_transaction tx, + bool broadcast = false ); + void dbg_make_uia(string creator, string symbol); void dbg_make_mia(string creator, string symbol); void dbg_push_blocks( std::string src_filename, uint32_t count ); @@ -1505,7 +1708,7 @@ FC_REFLECT( graphene::wallet::brain_key_info, (brain_priv_key) (wif_priv_key) (pub_key) - ); + ) FC_REFLECT( graphene::wallet::exported_account_keys, (account_name)(encrypted_private_keys)(public_keys) ) @@ -1538,6 +1741,12 @@ FC_REFLECT_DERIVED( graphene::wallet::vesting_balance_object_with_info, (graphen FC_REFLECT( graphene::wallet::operation_detail, (memo)(description)(op) ) +FC_REFLECT(graphene::wallet::operation_detail_ex, + (memo)(description)(op)(transaction_id)) + +FC_REFLECT( graphene::wallet::account_history_operation_detail, + (total_count)(result_count)(details)) + FC_API( graphene::wallet::wallet_api, (help) (gethelp) @@ -1549,6 +1758,7 @@ FC_API( graphene::wallet::wallet_api, (set_fees_on_builder_transaction) (preview_builder_transaction) (sign_builder_transaction) + (broadcast_transaction) (propose_builder_transaction) (propose_builder_transaction2) (remove_builder_transaction) @@ -1560,24 +1770,26 @@ FC_API( graphene::wallet::wallet_api, (list_accounts) (list_account_balances) (list_assets) + (get_asset_count) (import_key) (import_accounts) (import_account_keys) (import_balance) (suggest_brain_key) + (derive_owner_keys_from_brain_key) (register_account) (upgrade_account) (create_account_with_brain_key) (sell_asset) - (sell) - (buy) (borrow_asset) + (borrow_asset_ext) (cancel_order) (transfer) (transfer2) (get_transaction_id) (create_asset) (update_asset) + (update_asset_issuer) (update_bitasset) (update_asset_feed_producers) (publish_asset_feed) @@ -1585,9 +1797,11 @@ FC_API( graphene::wallet::wallet_api, (get_asset) (get_bitasset_data) (fund_asset_fee_pool) + (claim_asset_fee_pool) (reserve_asset) (global_settle_asset) (settle_asset) + (bid_collateral) (whitelist_account) (create_committee_member) (get_witness) @@ -1609,6 +1823,11 @@ FC_API( graphene::wallet::wallet_api, (get_block) (get_account_count) (get_account_history) + (get_relative_account_history) + (get_account_history_by_operations) + (get_collateral_bids) + (is_public_key_registered) + (get_full_account) (get_market_history) (get_global_properties) (get_dynamic_global_properties) @@ -1616,12 +1835,14 @@ FC_API( graphene::wallet::wallet_api, (get_private_key) (load_wallet_file) (normalize_brain_key) + (get_account_limit_orders) (get_limit_orders) (get_call_orders) (get_settle_orders) (save_wallet_file) (serialize_transaction) (sign_transaction) + (add_transaction_signature) (get_prototype_operation) (propose_parameter_change) (propose_fee_change) @@ -1635,6 +1856,8 @@ FC_API( graphene::wallet::wallet_api, (flood_network) (network_add_nodes) (network_get_connected_peers) + (sign_memo) + (read_memo) (set_key_label) (get_key_label) (get_public_key) @@ -1648,4 +1871,5 @@ FC_API( graphene::wallet::wallet_api, (blind_history) (receive_blind_transfer) (get_order_book) + (quit) ) diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 3cec70d2fb..db85cbdd65 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -59,10 +59,12 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -70,14 +72,22 @@ #include #include #include -#include #ifndef WIN32 # include # include #endif +// explicit instantiation for later use +namespace fc { + template class api; +} + #define BRAIN_KEY_WORD_COUNT 16 +#define RANGE_PROOF_MANTISSA 49 // Minimum mantissa bits to "hide" in the range proof. + // If this number is set too low, then for large value + // commitments the length of the range proof will hint + // strongly at the value amount that is being hidden. namespace graphene { namespace wallet { @@ -86,7 +96,7 @@ namespace detail { struct operation_result_printer { public: - operation_result_printer( const wallet_api_impl& w ) + explicit operation_result_printer( const wallet_api_impl& w ) : _wallet(w) {} const wallet_api_impl& _wallet; typedef std::string result_type; @@ -132,10 +142,10 @@ optional maybe_id( const string& name_or_id ) { try { - return fc::variant(name_or_id).as(); + return fc::variant(name_or_id, 1).as(1); } catch (const fc::exception&) - { + { // not an ID } } return optional(); @@ -255,9 +265,10 @@ class wallet_api_impl private: void claim_registered_account(const account_object& account) { + bool import_keys = false; auto it = _wallet.pending_account_registrations.find( account.name ); FC_ASSERT( it != _wallet.pending_account_registrations.end() ); - for (const std::string& wif_key : it->second) + for (const std::string& wif_key : it->second) { if( !import_key( account.name, wif_key ) ) { // somebody else beat our pending registration, there is @@ -270,8 +281,15 @@ class wallet_api_impl // possibility of migrating to a fork where the // name is available, the user can always // manually re-register) + } else { + import_keys = true; } + } _wallet.pending_account_registrations.erase( it ); + + if( import_keys ) { + save_wallet_file(); + } } // after a witness registration succeeds, this saves the private key in the wallet permanently @@ -330,7 +348,8 @@ class wallet_api_impl for( const fc::optional& optional_account : owner_account_objects ) if (optional_account) { - fc::optional witness_obj = _remote_db->get_witness_by_account(optional_account->id); + std::string account_id = account_id_to_string(optional_account->id); + fc::optional witness_obj = _remote_db->get_witness_by_account(account_id); if (witness_obj) claim_registered_witness(optional_account->name); } @@ -489,13 +508,13 @@ class wallet_api_impl T get_object(object_id id)const { auto ob = _remote_db->get_objects({id}).front(); - return ob.template as(); + return ob.template as( GRAPHENE_MAX_NESTED_OBJECTS ); } - void set_operation_fees( signed_transaction& tx, const fee_schedule& s ) + void set_operation_fees( signed_transaction& tx, const std::shared_ptr s ) { for( auto& op : tx.operations ) - s.set_fee(op); + s->set_fee(op); } variant info() const @@ -505,15 +524,15 @@ class wallet_api_impl auto dynamic_props = get_dynamic_global_properties(); fc::mutable_variant_object result; result["head_block_num"] = dynamic_props.head_block_number; - result["head_block_id"] = dynamic_props.head_block_id; + result["head_block_id"] = fc::variant(dynamic_props.head_block_id, 1); result["head_block_age"] = fc::get_approximate_relative_time_string(dynamic_props.time, time_point_sec(time_point::now()), " old"); result["next_maintenance_time"] = fc::get_approximate_relative_time_string(dynamic_props.next_maintenance_time); result["chain_id"] = chain_props.chain_id; result["participation"] = (100*dynamic_props.recent_slots_filled.popcount()) / 128.0; - result["active_witnesses"] = global_props.active_witnesses; - result["active_committee_members"] = global_props.active_committee_members; + result["active_witnesses"] = fc::variant(global_props.active_witnesses, GRAPHENE_MAX_NESTED_OBJECTS); + result["active_committee_members"] = fc::variant(global_props.active_committee_members, GRAPHENE_MAX_NESTED_OBJECTS); return result; } @@ -563,11 +582,18 @@ class wallet_api_impl { return _remote_db->get_dynamic_global_properties(); } + std::string account_id_to_string(account_id_type id) const + { + std::string account_id = fc::to_string(id.space_id) + + "." + fc::to_string(id.type_id) + + "." + fc::to_string(id.instance.value); + return account_id; + } account_object get_account(account_id_type id) const { - if( _wallet.my_accounts.get().count(id) ) - return *_wallet.my_accounts.get().find(id); - auto rec = _remote_db->get_accounts({id}).front(); + std::string account_id = account_id_to_string(id); + + auto rec = _remote_db->get_accounts({account_id}).front(); FC_ASSERT(rec); return *rec; } @@ -580,19 +606,6 @@ class wallet_api_impl // It's an ID return get_account(*id); } else { - // It's a name - if( _wallet.my_accounts.get().count(account_name_or_id) ) - { - auto local_account = *_wallet.my_accounts.get().find(account_name_or_id); - auto blockchain_account = _remote_db->lookup_account_names({account_name_or_id}).front(); - FC_ASSERT( blockchain_account ); - if (local_account.id != blockchain_account->id) - elog("my account id ${id} different from blockchain id ${id2}", ("id", local_account.id)("id2", blockchain_account->id)); - if (local_account.name != blockchain_account->name) - elog("my account name ${id} different from blockchain name ${id2}", ("id", local_account.name)("id2", blockchain_account->name)); - - return *_wallet.my_accounts.get().find(account_name_or_id); - } auto rec = _remote_db->lookup_account_names({account_name_or_id}).front(); FC_ASSERT( rec && rec->name == account_name_or_id ); return *rec; @@ -602,11 +615,16 @@ class wallet_api_impl { return get_account(account_name_or_id).get_id(); } + std::string asset_id_to_string(asset_id_type id) const + { + std::string asset_id = fc::to_string(id.space_id) + + "." + fc::to_string(id.type_id) + + "." + fc::to_string(id.instance.value); + return asset_id; + } optional find_asset(asset_id_type id)const { - auto rec = _remote_db->get_assets({id}).front(); - if( rec ) - _asset_cache[id] = *rec; + auto rec = _remote_db->get_assets({asset_id_to_string(id)}).front(); return rec; } optional find_asset(string asset_symbol_or_id)const @@ -624,8 +642,6 @@ class wallet_api_impl { if( rec->symbol != asset_symbol_or_id ) return optional(); - - _asset_cache[rec->get_id()] = *rec; } return rec; } @@ -648,7 +664,7 @@ class wallet_api_impl FC_ASSERT( asset_symbol_or_id.size() > 0 ); vector> opt_asset; if( std::isdigit( asset_symbol_or_id.front() ) ) - return fc::variant(asset_symbol_or_id).as(); + return fc::variant(asset_symbol_or_id, 1).as( 1 ); opt_asset = _remote_db->lookup_asset_symbols( {asset_symbol_or_id} ); FC_ASSERT( (opt_asset.size() > 0) && (opt_asset[0].valid()) ); return opt_asset[0]->id; @@ -719,14 +735,14 @@ class wallet_api_impl if( ! fc::exists( wallet_filename ) ) return false; - _wallet = fc::json::from_file( wallet_filename ).as< wallet_data >(); + _wallet = fc::json::from_file( wallet_filename ).as< wallet_data >( 2 * GRAPHENE_MAX_NESTED_OBJECTS ); if( _wallet.chain_id != _chain_id ) FC_THROW( "Wallet chain ID does not match", ("wallet.chain_id", _wallet.chain_id) ("chain_id", _chain_id) ); size_t account_pagination = 100; - vector< account_id_type > account_ids_to_send; + vector< std::string > account_ids_to_send; size_t n = _wallet.my_accounts.size(); account_ids_to_send.reserve( std::min( account_pagination, n ) ); auto it = _wallet.my_accounts.begin(); @@ -741,7 +757,8 @@ class wallet_api_impl { assert( it != _wallet.my_accounts.end() ); old_accounts.push_back( *it ); - account_ids_to_send.push_back( old_accounts.back().id ); + std::string account_id = account_id_to_string(old_accounts.back().id); + account_ids_to_send.push_back( account_id ); ++it; } std::vector< optional< account_object > > accounts = _remote_db->get_accounts(account_ids_to_send); @@ -771,6 +788,82 @@ class wallet_api_impl return true; } + + /** + * Get the required public keys to sign the transaction which had been + * owned by us + * + * NOTE, if `erase_existing_sigs` set to true, the original trasaction's + * signatures will be erased + * + * @param tx The transaction to be signed + * @param erase_existing_sigs + * The transaction could have been partially signed already, + * if set to false, the corresponding public key of existing + * signatures won't be returned. + * If set to true, the existing signatures will be erased and + * all required keys returned. + */ + set get_owned_required_keys( signed_transaction &tx, + bool erase_existing_sigs = true) + { + set pks = _remote_db->get_potential_signatures( tx ); + flat_set owned_keys; + owned_keys.reserve( pks.size() ); + std::copy_if( pks.begin(), pks.end(), + std::inserter( owned_keys, owned_keys.end() ), + [this]( const public_key_type &pk ) { + return _keys.find( pk ) != _keys.end(); + } ); + + if ( erase_existing_sigs ) + tx.signatures.clear(); + + return _remote_db->get_required_signatures( tx, owned_keys ); + } + + signed_transaction add_transaction_signature( signed_transaction tx, + bool broadcast ) + { + set approving_key_set = get_owned_required_keys(tx, false); + + if ( ( ( tx.ref_block_num == 0 && tx.ref_block_prefix == 0 ) || + tx.expiration == fc::time_point_sec() ) && + tx.signatures.empty() ) + { + auto dyn_props = get_dynamic_global_properties(); + auto parameters = get_global_properties().parameters; + fc::time_point_sec now = dyn_props.time; + tx.set_reference_block( dyn_props.head_block_id ); + tx.set_expiration( now + parameters.maximum_time_until_expiration ); + } + for ( const public_key_type &key : approving_key_set ) + tx.sign( get_private_key( key ), _chain_id ); + + if ( broadcast ) + { + try + { + _remote_net_broadcast->broadcast_transaction( tx ); + } + catch ( const fc::exception &e ) + { + elog( "Caught exception while broadcasting tx ${id}: ${e}", + ( "id", tx.id().str() )( "e", e.to_detail_string() ) ); + FC_THROW( "Caught exception while broadcasting tx" ); + } + } + + return tx; + } + + void quit() + { + ilog( "Quitting Cli Wallet ..." ); + + throw fc::canceled_exception(); + } + void save_wallet_file(string wallet_filename = "") { // @@ -788,6 +881,7 @@ class wallet_api_impl wlog( "saving wallet to file ${fn}", ("fn", wallet_filename) ); string data = fc::json::to_pretty_string( _wallet ); + try { enable_umask_protection(); @@ -797,14 +891,40 @@ class wallet_api_impl // // http://en.wikipedia.org/wiki/Most_vexing_parse // - fc::ofstream outfile{ fc::path( wallet_filename ) }; + std::string tmp_wallet_filename = wallet_filename + ".tmp"; + fc::ofstream outfile{ fc::path( tmp_wallet_filename ) }; outfile.write( data.c_str(), data.length() ); outfile.flush(); outfile.close(); + + wlog( "saved successfully wallet to tmp file ${fn}", ("fn", tmp_wallet_filename) ); + + std::string wallet_file_content; + fc::read_file_contents(tmp_wallet_filename, wallet_file_content); + + if (wallet_file_content == data) { + wlog( "validated successfully tmp wallet file ${fn}", ("fn", tmp_wallet_filename) ); + + fc::rename( tmp_wallet_filename, wallet_filename ); + + wlog( "renamed successfully tmp wallet file ${fn}", ("fn", tmp_wallet_filename) ); + } + else + { + FC_THROW("tmp wallet file cannot be validated ${fn}", ("fn", tmp_wallet_filename) ); + } + + wlog( "successfully saved wallet to file ${fn}", ("fn", wallet_filename) ); + disable_umask_protection(); } catch(...) { + string ws_password = _wallet.ws_password; + _wallet.ws_password = ""; + wlog("wallet file content is next: ${data}", ("data", fc::json::to_pretty_string( _wallet ) ) ); + _wallet.ws_password = ws_password; + disable_umask_protection(); throw; } @@ -866,6 +986,19 @@ class wallet_api_impl return _builder_transactions[transaction_handle] = sign_transaction(_builder_transactions[transaction_handle], broadcast); } + + pair broadcast_transaction(signed_transaction tx) + { + try { + _remote_net_broadcast->broadcast_transaction(tx); + } + catch (const fc::exception& e) { + elog("Caught exception while broadcasting tx ${id}: ${e}", ("id", tx.id().str())("e", e.to_detail_string())); + throw; + } + return std::make_pair(tx.id(),tx); + } + signed_transaction propose_builder_transaction( transaction_handle_type handle, time_point_sec expiration = time_point::now() + fc::minutes(1), @@ -911,7 +1044,6 @@ class wallet_api_impl _builder_transactions.erase(handle); } - signed_transaction register_account(string name, public_key_type owner, public_key_type active, @@ -951,8 +1083,7 @@ class wallet_api_impl tx.operations.push_back( account_create_op ); - auto current_fees = _remote_db->get_global_properties().parameters.current_fees; - set_operation_fees( tx, current_fees ); + set_operation_fees( tx, _remote_db->get_global_properties().parameters.current_fees ); vector paying_keys = registrar_account_object.active.get_keys(); @@ -980,7 +1111,6 @@ class wallet_api_impl return tx; } FC_CAPTURE_AND_RETHROW( (name)(owner)(active)(registrar_account)(referrer_account)(referrer_percent)(broadcast) ) } - signed_transaction upgrade_account(string name, bool broadcast) { try { FC_ASSERT( !self.is_locked() ); @@ -998,7 +1128,6 @@ class wallet_api_impl return sign_transaction( tx, broadcast ); } FC_CAPTURE_AND_RETHROW( (name) ) } - // This function generates derived keys starting with index 0 and keeps incrementing // the index until it finds a key that isn't registered in the block chain. To be // safer, it continues checking for a few more keys to make sure there wasn't a short gap @@ -1160,6 +1289,8 @@ class wallet_api_impl optional new_issuer_account_id; if (new_issuer) { + FC_ASSERT( _remote_db->get_dynamic_global_properties().time < HARDFORK_CORE_199_TIME, + "The use of 'new_issuer' is no longer supported. Please use `update_asset_issuer' instead!"); account_object new_issuer_account = get_account(*new_issuer); new_issuer_account_id = new_issuer_account.id; } @@ -1178,6 +1309,29 @@ class wallet_api_impl return sign_transaction( tx, broadcast ); } FC_CAPTURE_AND_RETHROW( (symbol)(new_issuer)(new_options)(broadcast) ) } + signed_transaction update_asset_issuer(string symbol, + string new_issuer, + bool broadcast /* = false */) + { try { + optional asset_to_update = find_asset(symbol); + if (!asset_to_update) + FC_THROW("No asset with that symbol exists!"); + + account_object new_issuer_account = get_account(new_issuer); + + asset_update_issuer_operation update_issuer; + update_issuer.issuer = asset_to_update->issuer; + update_issuer.asset_to_update = asset_to_update->id; + update_issuer.new_issuer = new_issuer_account.id; + + signed_transaction tx; + tx.operations.push_back( update_issuer ); + set_operation_fees( tx, _remote_db->get_global_properties().parameters.current_fees); + tx.validate(); + + return sign_transaction( tx, broadcast ); + } FC_CAPTURE_AND_RETHROW( (symbol)(new_issuer)(broadcast) ) } + signed_transaction update_bitasset(string symbol, bitasset_options new_options, bool broadcast /* = false */) @@ -1269,6 +1423,29 @@ class wallet_api_impl return sign_transaction( tx, broadcast ); } FC_CAPTURE_AND_RETHROW( (from)(symbol)(amount)(broadcast) ) } + signed_transaction claim_asset_fee_pool(string symbol, + string amount, + bool broadcast /* = false */) + { try { + optional asset_pool_to_claim = find_asset(symbol); + if (!asset_pool_to_claim) + FC_THROW("No asset with that symbol exists!"); + asset_object core_asset = get_asset(asset_id_type()); + + asset_claim_pool_operation claim_op; + claim_op.issuer = asset_pool_to_claim->issuer; + claim_op.asset_id = asset_pool_to_claim->id; + claim_op.amount_to_claim = core_asset.amount_from_string(amount).amount; + + signed_transaction tx; + tx.operations.push_back( claim_op ); + set_operation_fees( tx, _remote_db->get_global_properties().parameters.current_fees); + tx.validate(); + + return sign_transaction( tx, broadcast ); + } FC_CAPTURE_AND_RETHROW( (symbol)(amount)(broadcast) ) } + + signed_transaction reserve_asset(string from, string amount, string symbol, @@ -1333,6 +1510,31 @@ class wallet_api_impl return sign_transaction( tx, broadcast ); } FC_CAPTURE_AND_RETHROW( (account_to_settle)(amount_to_settle)(symbol)(broadcast) ) } + signed_transaction bid_collateral(string bidder_name, + string debt_amount, string debt_symbol, + string additional_collateral, + bool broadcast ) + { try { + optional debt_asset = find_asset(debt_symbol); + if (!debt_asset) + FC_THROW("No asset with that symbol exists!"); + + FC_ASSERT(debt_asset->bitasset_data_id.valid(), "Not a bitasset, bidding not possible."); + const asset_object& collateral = get_asset(get_object(*debt_asset->bitasset_data_id).options.short_backing_asset); + + bid_collateral_operation op; + op.bidder = get_account_id(bidder_name); + op.debt_covered = debt_asset->amount_from_string(debt_amount); + op.additional_collateral = collateral.amount_from_string(additional_collateral); + + signed_transaction tx; + tx.operations.push_back( op ); + set_operation_fees( tx, _remote_db->get_global_properties().parameters.current_fees); + tx.validate(); + + return sign_transaction( tx, broadcast ); + } FC_CAPTURE_AND_RETHROW( (bidder_name)(debt_amount)(debt_symbol)(additional_collateral)(broadcast) ) } + signed_transaction whitelist_account(string authorizing_account, string account_to_list, account_whitelist_operation::account_listing new_listing_status, @@ -1358,7 +1560,15 @@ class wallet_api_impl committee_member_create_operation committee_member_create_op; committee_member_create_op.committee_member_account = get_account_id(owner_account); committee_member_create_op.url = url; - if (_remote_db->get_committee_member_by_account(committee_member_create_op.committee_member_account)) + + /* + * Compatibility issue + * Current Date: 2018-09-28 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next 2 lines and change always_id to name in remote call after next hardfork + */ + auto account = get_account(owner_account); + auto always_id = account_id_to_string(account.id); + if (_remote_db->get_committee_member_by_account(always_id)) FC_THROW("Account ${owner_account} is already a committee_member", ("owner_account", owner_account)); signed_transaction tx; @@ -1388,7 +1598,7 @@ class wallet_api_impl // then maybe it's the owner account try { - account_id_type owner_account_id = get_account_id(owner_account); + std::string owner_account_id = account_id_to_string(get_account_id(owner_account)); fc::optional witness = _remote_db->get_witness_by_account(owner_account_id); if (witness) return *witness; @@ -1423,8 +1633,7 @@ class wallet_api_impl // then maybe it's the owner account try { - account_id_type owner_account_id = get_account_id(owner_account); - fc::optional committee_member = _remote_db->get_committee_member_by_account(owner_account_id); + fc::optional committee_member = _remote_db->get_committee_member_by_account(owner_account); if (committee_member) return *committee_member; else @@ -1454,7 +1663,7 @@ class wallet_api_impl witness_create_op.block_signing_key = witness_public_key; witness_create_op.url = url; - if (_remote_db->get_witness_by_account(witness_create_op.witness_account)) + if (_remote_db->get_witness_by_account(account_id_to_string(witness_create_op.witness_account))) FC_THROW("Account ${owner_account} is already a witness", ("owner_account", owner_account)); signed_transaction tx; @@ -1474,7 +1683,6 @@ class wallet_api_impl { try { witness_object witness = get_witness(witness_name); account_object witness_account = get_account( witness.witness_account ); - fc::ecc::private_key active_private_key = get_private_key_for_account(witness_account); witness_update_operation witness_update_op; witness_update_op.witness = witness.id; @@ -1496,7 +1704,7 @@ class wallet_api_impl static WorkerInit _create_worker_initializer( const variant& worker_settings ) { WorkerInit result; - from_variant( worker_settings, result ); + from_variant( worker_settings, result, GRAPHENE_MAX_NESTED_OBJECTS ); return result; } @@ -1550,7 +1758,6 @@ class wallet_api_impl ) { account_object acct = get_account( account ); - account_update_operation op; // you could probably use a faster algorithm for this, but flat_set is fast enough :) flat_set< worker_id_type > merged; @@ -1584,7 +1791,7 @@ class wallet_api_impl for( const variant& obj : objects ) { worker_object wo; - from_variant( obj, wo ); + from_variant( obj, wo, GRAPHENE_MAX_NESTED_OBJECTS ); new_votes.erase( wo.vote_for ); new_votes.erase( wo.vote_against ); if( delta.vote_for.find( wo.id ) != delta.vote_for.end() ) @@ -1619,13 +1826,15 @@ class wallet_api_impl result.emplace_back( get_object(*vbid), now ); return result; } - - // try casting to avoid a round-trip if we were given an account ID - fc::optional acct_id = maybe_id( account_name ); - if( !acct_id ) - acct_id = get_account( account_name ).id; - - vector< vesting_balance_object > vbos = _remote_db->get_vesting_balances( *acct_id ); + /* + * Compatibility issue + * Current Date: 2018-09-28 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next 2 lines and change always_id to name in remote call after next hardfork + */ + auto account = get_account(account_name); + auto always_id = account_id_to_string(account.id); + + vector< vesting_balance_object > vbos = _remote_db->get_vesting_balances( always_id ); if( vbos.size() == 0 ) return result; @@ -1673,8 +1882,15 @@ class wallet_api_impl bool broadcast /* = false */) { try { account_object voting_account_object = get_account(voting_account); - account_id_type committee_member_owner_account_id = get_account_id(committee_member); - fc::optional committee_member_obj = _remote_db->get_committee_member_by_account(committee_member_owner_account_id); + + /* + * Compatibility issue + * Current Date: 2018-09-28 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next 2 lines and change always_id to name in remote call after next hardfork + */ + auto account = get_account(committee_member); + auto always_id = account_id_to_string(account.id); + fc::optional committee_member_obj = _remote_db->get_committee_member_by_account(always_id); if (!committee_member_obj) FC_THROW("Account ${committee_member} is not registered as a committee_member", ("committee_member", committee_member)); if (approve) @@ -1707,8 +1923,15 @@ class wallet_api_impl bool broadcast /* = false */) { try { account_object voting_account_object = get_account(voting_account); - account_id_type witness_owner_account_id = get_account_id(witness); - fc::optional witness_obj = _remote_db->get_witness_by_account(witness_owner_account_id); + + /* + * Compatibility issue + * Current Date: 2018-09-28 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next 2 lines and change always_id to name in remote call after next hardfork + */ + auto account = get_account(witness); + auto always_id = account_id_to_string(account.id); + fc::optional witness_obj = _remote_db->get_witness_by_account(always_id); if (!witness_obj) FC_THROW("Account ${witness} is not registered as a witness", ("witness", witness)); if (approve) @@ -1794,74 +2017,8 @@ class wallet_api_impl signed_transaction sign_transaction(signed_transaction tx, bool broadcast = false) { - flat_set req_active_approvals; - flat_set req_owner_approvals; - vector other_auths; - - tx.get_required_authorities( req_active_approvals, req_owner_approvals, other_auths ); - - for( const auto& auth : other_auths ) - for( const auto& a : auth.account_auths ) - req_active_approvals.insert(a.first); - - // std::merge lets us de-duplicate account_id's that occur in both - // sets, and dump them into a vector (as required by remote_db api) - // at the same time - vector v_approving_account_ids; - std::merge(req_active_approvals.begin(), req_active_approvals.end(), - req_owner_approvals.begin() , req_owner_approvals.end(), - std::back_inserter(v_approving_account_ids)); - - /// TODO: fetch the accounts specified via other_auths as well. - - vector< optional > approving_account_objects = - _remote_db->get_accounts( v_approving_account_ids ); - /// TODO: recursively check one layer deeper in the authority tree for keys - - FC_ASSERT( approving_account_objects.size() == v_approving_account_ids.size() ); - - flat_map approving_account_lut; - size_t i = 0; - for( optional& approving_acct : approving_account_objects ) - { - if( !approving_acct.valid() ) - { - wlog( "operation_get_required_auths said approval of non-existing account ${id} was needed", - ("id", v_approving_account_ids[i]) ); - i++; - continue; - } - approving_account_lut[ approving_acct->id ] = &(*approving_acct); - i++; - } - - flat_set approving_key_set; - for( account_id_type& acct_id : req_active_approvals ) - { - const auto it = approving_account_lut.find( acct_id ); - if( it == approving_account_lut.end() ) - continue; - const account_object* acct = it->second; - vector v_approving_keys = acct->active.get_keys(); - for( const public_key_type& approving_key : v_approving_keys ) - approving_key_set.insert( approving_key ); - } - for( account_id_type& acct_id : req_owner_approvals ) - { - const auto it = approving_account_lut.find( acct_id ); - if( it == approving_account_lut.end() ) - continue; - const account_object* acct = it->second; - vector v_approving_keys = acct->owner.get_keys(); - for( const public_key_type& approving_key : v_approving_keys ) - approving_key_set.insert( approving_key ); - } - for( const authority& a : other_auths ) - { - for( const auto& k : a.key_auths ) - approving_key_set.insert( k.first ); - } + set approving_key_set = get_owned_required_keys(tx); auto dyn_props = get_dynamic_global_properties(); tx.set_reference_block( dyn_props.head_block_id ); @@ -1879,22 +2036,10 @@ class wallet_api_impl for (;;) { tx.set_expiration( dyn_props.time + fc::seconds(30 + expiration_time_offset) ); - tx.signatures.clear(); + tx.clear_signatures(); - for( public_key_type& key : approving_key_set ) - { - auto it = _keys.find(key); - if( it != _keys.end() ) - { - fc::optional privkey = wif_to_key( it->second ); - FC_ASSERT( privkey.valid(), "Malformed private key in _keys" ); - tx.sign( *privkey, _chain_id ); - } - /// TODO: if transaction has enough signatures to be "valid" don't add any more, - /// there are cases where the wallet may have more keys than strictly necessary and - /// the transaction will be rejected if the transaction validates without requiring - /// all signatures provided - } + for( const public_key_type& key : approving_key_set ) + tx.sign( get_private_key(key), _chain_id ); graphene::chain::transaction_id_type this_transaction_id = tx.id(); auto iter = _recently_generated_transactions.find(this_transaction_id); @@ -1928,6 +2073,56 @@ class wallet_api_impl return tx; } + memo_data sign_memo(string from, string to, string memo) + { + FC_ASSERT( !self.is_locked() ); + + memo_data md = memo_data(); + + // get account memo key, if that fails, try a pubkey + try { + account_object from_account = get_account(from); + md.from = from_account.options.memo_key; + } catch (const fc::exception& e) { + md.from = self.get_public_key( from ); + } + // same as above, for destination key + try { + account_object to_account = get_account(to); + md.to = to_account.options.memo_key; + } catch (const fc::exception& e) { + md.to = self.get_public_key( to ); + } + + md.set_message(get_private_key(md.from), md.to, memo); + return md; + } + + string read_memo(const memo_data& md) + { + FC_ASSERT(!is_locked()); + std::string clear_text; + + const memo_data *memo = &md; + + try { + FC_ASSERT(_keys.count(memo->to) || _keys.count(memo->from), "Memo is encrypted to a key ${to} or ${from} not in this wallet.", ("to", memo->to)("from",memo->from)); + if( _keys.count(memo->to) ) { + auto my_key = wif_to_key(_keys.at(memo->to)); + FC_ASSERT(my_key, "Unable to recover private key to decrypt memo. Wallet may be corrupted."); + clear_text = memo->get_message(*my_key, memo->from); + } else { + auto my_key = wif_to_key(_keys.at(memo->from)); + FC_ASSERT(my_key, "Unable to recover private key to decrypt memo. Wallet may be corrupted."); + clear_text = memo->get_message(*my_key, memo->to); + } + } catch (const fc::exception& e) { + elog("Error when decrypting memo: ${e}", ("e", e.to_detail_string())); + } + + return clear_text; + } + signed_transaction sell_asset(string seller_account, string amount_to_sell, string symbol_to_sell, @@ -1972,7 +2167,30 @@ class wallet_api_impl trx.operations = {op}; set_operation_fees( trx, _remote_db->get_global_properties().parameters.current_fees); trx.validate(); - idump((broadcast)); + + return sign_transaction(trx, broadcast); + } + + signed_transaction borrow_asset_ext( string seller_name, string amount_to_borrow, string asset_symbol, + string amount_of_collateral, + call_order_update_operation::extensions_type extensions, + bool broadcast = false) + { + account_object seller = get_account(seller_name); + asset_object mia = get_asset(asset_symbol); + FC_ASSERT(mia.is_market_issued()); + asset_object collateral = get_asset(get_object(*mia.bitasset_data_id).options.short_backing_asset); + + call_order_update_operation op; + op.funding_account = seller.id; + op.delta_debt = mia.amount_from_string(amount_to_borrow); + op.delta_collateral = collateral.amount_from_string(amount_of_collateral); + op.extensions = extensions; + + signed_transaction trx; + trx.operations = {op}; + set_operation_fees( trx, _remote_db->get_global_properties().parameters.current_fees); + trx.validate(); return sign_transaction(trx, broadcast); } @@ -2003,7 +2221,7 @@ class wallet_api_impl account_object from_account = get_account(from); account_object to_account = get_account(to); account_id_type from_id = from_account.id; - account_id_type to_id = get_account_id(to); + account_id_type to_id = to_account.id; transfer_operation xfer_op; @@ -2073,7 +2291,7 @@ class wallet_api_impl m["get_account_history"] = [this](variant result, const fc::variants& a) { - auto r = result.as>(); + auto r = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; for( operation_detail& d : r ) @@ -2088,10 +2306,50 @@ class wallet_api_impl return ss.str(); }; + m["get_relative_account_history"] = [this](variant result, const fc::variants& a) + { + auto r = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); + std::stringstream ss; + + for( operation_detail& d : r ) + { + operation_history_object& i = d.op; + auto b = _remote_db->get_block_header(i.block_num); + FC_ASSERT(b); + ss << b->timestamp.to_iso_string() << " "; + i.op.visit(operation_printer(ss, *this, i.result)); + ss << " \n"; + } + + return ss.str(); + }; + + m["get_account_history_by_operations"] = [this](variant result, const fc::variants& a) { + auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); + std::stringstream ss; + ss << "total_count : "; + ss << r.total_count; + ss << " \n"; + ss << "result_count : "; + ss << r.result_count; + ss << " \n"; + for (operation_detail_ex& d : r.details) { + operation_history_object& i = d.op; + auto b = _remote_db->get_block_header(i.block_num); + FC_ASSERT(b); + ss << b->timestamp.to_iso_string() << " "; + i.op.visit(operation_printer(ss, *this, i.result)); + ss << " transaction_id : "; + ss << d.transaction_id.str(); + ss << " \n"; + } + + return ss.str(); + }; m["list_account_balances"] = [this](variant result, const fc::variants& a) { - auto r = result.as>(); + auto r = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); vector asset_recs; std::transform(r.begin(), r.end(), std::back_inserter(asset_recs), [this](const asset& a) { return get_asset(a.asset_id); @@ -2106,7 +2364,7 @@ class wallet_api_impl m["get_blind_balances"] = [this](variant result, const fc::variants& a) { - auto r = result.as>(); + auto r = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); vector asset_recs; std::transform(r.begin(), r.end(), std::back_inserter(asset_recs), [this](const asset& a) { return get_asset(a.asset_id); @@ -2120,7 +2378,7 @@ class wallet_api_impl }; m["transfer_to_blind"] = [this](variant result, const fc::variants& a) { - auto r = result.as(); + auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; r.trx.operations[0].visit( operation_printer( ss, *this, operation_result() ) ); ss << "\n"; @@ -2133,7 +2391,7 @@ class wallet_api_impl }; m["blind_transfer"] = [this](variant result, const fc::variants& a) { - auto r = result.as(); + auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; r.trx.operations[0].visit( operation_printer( ss, *this, operation_result() ) ); ss << "\n"; @@ -2146,7 +2404,7 @@ class wallet_api_impl }; m["receive_blind_transfer"] = [this](variant result, const fc::variants& a) { - auto r = result.as(); + auto r = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; asset_object as = get_asset( r.amount.asset_id ); ss << as.amount_to_pretty_string( r.amount ) << " " << r.from_label << " => " << r.to_label << " " << r.memo <<"\n"; @@ -2154,7 +2412,7 @@ class wallet_api_impl }; m["blind_history"] = [this](variant result, const fc::variants& a) { - auto records = result.as>(); + auto records = result.as>( GRAPHENE_MAX_NESTED_OBJECTS ); std::stringstream ss; ss << "WHEN " << " " << "AMOUNT" << " " << "FROM" << " => " << "TO" << " " << "MEMO" <<"\n"; @@ -2167,9 +2425,9 @@ class wallet_api_impl } return ss.str(); }; - m["get_order_book"] = [this](variant result, const fc::variants& a) + m["get_order_book"] = [](variant result, const fc::variants& a) { - auto orders = result.as(); + auto orders = result.as( GRAPHENE_MAX_NESTED_OBJECTS ); auto bids = orders.bids; auto asks = orders.asks; std::stringstream ss; @@ -2179,12 +2437,10 @@ class wallet_api_impl double ask_sum = 0; const int spacing = 20; - auto prettify_num = [&]( double n ) + auto prettify_num = [&ss]( double n ) { - //ss << n; if (abs( round( n ) - n ) < 0.00000000001 ) { - //ss << setiosflags( !ios::fixed ) << (int) n; // doesn't compile on Linux with gcc ss << (int) n; } else if (n - floor(n) < 0.000001) @@ -2196,6 +2452,11 @@ class wallet_api_impl ss << setiosflags( ios::fixed ) << setprecision(6) << n; } }; + auto prettify_num_string = [&]( string& num_string ) + { + double n = fc::to_double( num_string ); + prettify_num( n ); + }; ss << setprecision( 8 ) << setiosflags( ios::fixed ) << setiosflags( ios::left ); @@ -2207,17 +2468,17 @@ class wallet_api_impl << "\n=====================================================================================" << "|=====================================================================================\n"; - for (int i = 0; i < bids.size() || i < asks.size() ; i++) + for (unsigned int i = 0; i < bids.size() || i < asks.size() ; i++) { if ( i < bids.size() ) { - bid_sum += bids[i].base; + bid_sum += fc::to_double( bids[i].base ); ss << ' ' << setw( spacing ); - prettify_num( bids[i].price ); + prettify_num_string( bids[i].price ); ss << ' ' << setw( spacing ); - prettify_num( bids[i].quote ); + prettify_num_string( bids[i].quote ); ss << ' ' << setw( spacing ); - prettify_num( bids[i].base ); + prettify_num_string( bids[i].base ); ss << ' ' << setw( spacing ); prettify_num( bid_sum ); ss << ' '; @@ -2231,13 +2492,13 @@ class wallet_api_impl if ( i < asks.size() ) { - ask_sum += asks[i].base; + ask_sum += fc::to_double( asks[i].base ); ss << ' ' << setw( spacing ); - prettify_num( asks[i].price ); + prettify_num_string( asks[i].price ); ss << ' ' << setw( spacing ); - prettify_num( asks[i].quote ); + prettify_num_string( asks[i].quote ); ss << ' ' << setw( spacing ); - prettify_num( asks[i].base ); + prettify_num_string( asks[i].base ); ss << ' ' << setw( spacing ); prettify_num( ask_sum ); } @@ -2266,7 +2527,7 @@ class wallet_api_impl const chain_parameters& current_params = get_global_properties().parameters; chain_parameters new_params = current_params; fc::reflector::visit( - fc::from_variant_visitor( changed_values, new_params ) + fc::from_variant_visitor( changed_values, new_params, GRAPHENE_MAX_NESTED_OBJECTS ) ); committee_member_update_global_parameters_operation update_op; @@ -2316,7 +2577,7 @@ class wallet_api_impl continue; } // is key a number? - auto is_numeric = [&]() -> bool + auto is_numeric = [&key]() -> bool { size_t n = key.size(); for( size_t i=0; isecond; } - fee_parameters fp = from_which_variant< fee_parameters >( which, item.value() ); + fee_parameters fp = from_which_variant< fee_parameters >( which, item.value(), GRAPHENE_MAX_NESTED_OBJECTS ); fee_map[ which ] = fp; } @@ -2349,7 +2610,7 @@ class wallet_api_impl new_fees.scale = scale; chain_parameters new_params = current_params; - new_params.current_fees = new_fees; + new_params.current_fees = std::make_shared(new_fees); committee_member_update_global_parameters_operation update_op; update_op.new_parameters = new_params; @@ -2380,7 +2641,7 @@ class wallet_api_impl proposal_update_operation update_op; update_op.fee_paying_account = get_account(fee_paying_account).id; - update_op.proposal = fc::variant(proposal_id).as(); + update_op.proposal = fc::variant(proposal_id, 1).as( 1 ); // make sure the proposal exists get_object( update_op.proposal ); @@ -2465,7 +2726,7 @@ class wallet_api_impl "to access the network API on the witness_node you are\n" "connecting to. Please follow the instructions in README.md to set up an apiaccess file.\n" "\n"; - throw(e); + throw; } } @@ -2507,7 +2768,7 @@ class wallet_api_impl for( const auto& peer : peers ) { variant v; - fc::to_variant( peer, v ); + fc::to_variant( peer, v, GRAPHENE_MAX_NESTED_OBJECTS ); result.push_back( v ); } return result; @@ -2520,7 +2781,6 @@ class wallet_api_impl const account_object& master = *_wallet.my_accounts.get().lower_bound("import"); int number_of_accounts = number_of_transactions / 3; number_of_transactions -= number_of_accounts; - //auto key = derive_private_key("floodshill", 0); try { dbg_make_uia(master.name, "SHILL"); } catch(...) {/* Ignore; the asset probably already exists.*/} @@ -2592,8 +2852,6 @@ class wallet_api_impl mode_t _old_umask; #endif const string _wallet_filename_extension = ".wallet"; - - mutable map _asset_cache; }; std::string operation_printer::fee(const asset& a)const { @@ -2669,7 +2927,6 @@ string operation_printer::operator()(const transfer_operation& op) const } } catch (const fc::exception& e) { out << " -- could not decrypt memo"; - elog("Error when decrypting memo: ${e}", ("e", e.to_detail_string())); } } } @@ -2717,7 +2974,59 @@ std::string operation_result_printer::operator()(const asset& a) }}} +namespace graphene { namespace wallet { + vector utility::derive_owner_keys_from_brain_key(string brain_key, int number_of_desired_keys) + { + // Safety-check + FC_ASSERT( number_of_desired_keys >= 1 ); + + // Create as many derived owner keys as requested + vector results; + brain_key = graphene::wallet::detail::normalize_brain_key(brain_key); + for (int i = 0; i < number_of_desired_keys; ++i) { + fc::ecc::private_key priv_key = graphene::wallet::detail::derive_private_key( brain_key, i ); + + brain_key_info result; + result.brain_priv_key = brain_key; + result.wif_priv_key = key_to_wif( priv_key ); + result.pub_key = priv_key.get_public_key(); + + results.push_back(result); + } + return results; + } + + brain_key_info utility::suggest_brain_key() + { + brain_key_info result; + // create a private key for secure entropy + fc::sha256 sha_entropy1 = fc::ecc::private_key::generate().get_secret(); + fc::sha256 sha_entropy2 = fc::ecc::private_key::generate().get_secret(); + fc::bigint entropy1(sha_entropy1.data(), sha_entropy1.data_size()); + fc::bigint entropy2(sha_entropy2.data(), sha_entropy2.data_size()); + fc::bigint entropy(entropy1); + entropy <<= 8 * sha_entropy1.data_size(); + entropy += entropy2; + string brain_key = ""; + + for (int i = 0; i < BRAIN_KEY_WORD_COUNT; i++) + { + fc::bigint choice = entropy % graphene::words::word_list_size; + entropy /= graphene::words::word_list_size; + if (i > 0) + brain_key += " "; + brain_key += graphene::words::word_list[choice.to_int64()]; + } + + brain_key = detail::normalize_brain_key(brain_key); + fc::ecc::private_key priv_key = detail::derive_private_key(brain_key, 0); + result.brain_priv_key = brain_key; + result.wif_priv_key = key_to_wif(priv_key); + result.pub_key = priv_key.get_public_key(); + return result; + } +}} namespace graphene { namespace wallet { @@ -2757,9 +3066,15 @@ map wallet_api::list_accounts(const string& lowerbound, vector wallet_api::list_account_balances(const string& id) { - if( auto real_id = detail::maybe_id(id) ) - return my->_remote_db->get_account_balances(*real_id, flat_set()); - return my->_remote_db->get_account_balances(get_account(id).id, flat_set()); + /* + * Compatibility issue + * Current Date: 2018-09-13 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next 2 lines and change always_id to id in remote call after next hardfork + */ + auto account = get_account(id); + auto always_id = my->account_id_to_string(account.id); + + return my->_remote_db->get_account_balances(always_id, flat_set()); } vector wallet_api::list_assets(const string& lowerbound, uint32_t limit)const @@ -2767,86 +3082,233 @@ vector wallet_api::list_assets(const string& lowerbound, uint32_t return my->_remote_db->list_assets( lowerbound, limit ); } +uint64_t wallet_api::get_asset_count()const +{ + return my->_remote_db->get_asset_count(); +} + vector wallet_api::get_account_history(string name, int limit)const { vector result; - auto account_id = get_account(name).get_id(); + + /* + * Compatibility issue + * Current Date: 2018-09-14 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next 2 lines and change always_id to name in remote call after next hardfork + */ + auto account = get_account(name); + auto always_id = my->account_id_to_string(account.id); while( limit > 0 ) { + bool skip_first_row = false; operation_history_id_type start; if( result.size() ) { start = result.back().op.id; - start = start + 1; + if( start == operation_history_id_type() ) // no more data + break; + start = start + (-1); + if( start == operation_history_id_type() ) // will return most recent history if directly call remote API with this + { + start = start + 1; + skip_first_row = true; + } } + int page_limit = skip_first_row ? std::min( 100, limit + 1 ) : std::min( 100, limit ); - vector current = my->_remote_hist->get_account_history(account_id, operation_history_id_type(), std::min(100,limit), start); - for( auto& o : current ) { + vector current = my->_remote_hist->get_account_history( + always_id, + operation_history_id_type(), + page_limit, + start ); + bool first_row = true; + for( auto& o : current ) + { + if( first_row ) + { + first_row = false; + if( skip_first_row ) + { + continue; + } + } std::stringstream ss; auto memo = o.op.visit(detail::operation_printer(ss, *my, o.result)); result.push_back( operation_detail{ memo, ss.str(), o } ); } - if( current.size() < std::min(100,limit) ) + + if( int(current.size()) < page_limit ) break; + limit -= current.size(); + if( skip_first_row ) + ++limit; } return result; } +vector wallet_api::get_relative_account_history( + string name, + uint32_t stop, + int limit, + uint32_t start)const +{ + vector result; + auto account_id = get_account(name).get_id(); + + const account_object& account = my->get_account(account_id); + const account_statistics_object& stats = my->get_object(account.statistics); + + /* + * Compatibility issue + * Current Date: 2018-09-14 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next line and change always_id to name in remote call after next hardfork + */ + auto always_id = my->account_id_to_string(account_id); + + if(start == 0) + start = stats.total_ops; + else + start = std::min(start, stats.total_ops); + + while( limit > 0 ) + { + vector current = my->_remote_hist->get_relative_account_history( + always_id, + stop, + std::min(100, limit), + start); + for (auto &o : current) { + std::stringstream ss; + auto memo = o.op.visit(detail::operation_printer(ss, *my, o.result)); + result.push_back(operation_detail{memo, ss.str(), o}); + } + if (current.size() < std::min(100, limit)) + break; + limit -= current.size(); + start -= 100; + if( start == 0 ) break; + } + return result; +} -vector wallet_api::get_market_history( string symbol1, string symbol2, uint32_t bucket )const +account_history_operation_detail wallet_api::get_account_history_by_operations( + string name, + vector operation_types, + uint32_t start, + int limit) +{ + account_history_operation_detail result; + auto account_id = get_account(name).get_id(); + + const auto& account = my->get_account(account_id); + const auto& stats = my->get_object(account.statistics); + + /* + * Compatibility issue + * Current Date: 2018-09-14 More info: https://github.com/bitshares/bitshares-core/issues/1307 + * Todo: remove the next line and change always_id to name in remote call after next hardfork + */ + auto always_id = my->account_id_to_string(account_id); + + // sequence of account_transaction_history_object start with 1 + start = start == 0 ? 1 : start; + + if (start <= stats.removed_ops) { + start = stats.removed_ops; + result.total_count =stats.removed_ops; + } + + while (limit > 0 && start <= stats.total_ops) { + uint32_t min_limit = std::min (100, limit); + auto current = my->_remote_hist->get_account_history_by_operations(always_id, operation_types, start, min_limit); + for (auto& obj : current.operation_history_objs) { + std::stringstream ss; + auto memo = obj.op.visit(detail::operation_printer(ss, *my, obj.result)); + + transaction_id_type transaction_id; + auto block = get_block(obj.block_num); + if (block.valid() && obj.trx_in_block < block->transaction_ids.size()) { + transaction_id = block->transaction_ids[obj.trx_in_block]; + } + result.details.push_back(operation_detail_ex{memo, ss.str(), obj, transaction_id}); + } + result.result_count += current.operation_history_objs.size(); + result.total_count += current.total_count; + + start += current.total_count > 0 ? current.total_count : min_limit; + limit -= current.operation_history_objs.size(); + } + + return result; +} + +full_account wallet_api::get_full_account( const string& name_or_id) +{ + return my->_remote_db->get_full_accounts({name_or_id}, false)[name_or_id]; +} + +vector wallet_api::get_market_history( + string symbol1, + string symbol2, + uint32_t bucket, + fc::time_point_sec start, + fc::time_point_sec end )const +{ + return my->_remote_hist->get_market_history( symbol1, symbol2, bucket, start, end ); +} + +vector wallet_api::get_account_limit_orders( + const string& name_or_id, + const string &base, + const string "e, + uint32_t limit, + optional ostart_id, + optional ostart_price) +{ + return my->_remote_db->get_account_limit_orders(name_or_id, base, quote, limit, ostart_id, ostart_price); +} + +vector wallet_api::get_limit_orders(std::string a, std::string b, uint32_t limit)const { - return my->_remote_hist->get_market_history( get_asset_id(symbol1), get_asset_id(symbol2), bucket, fc::time_point_sec(), fc::time_point::now() ); + return my->_remote_db->get_limit_orders(a, b, limit); } -vector wallet_api::get_limit_orders(string a, string b, uint32_t limit)const +vector wallet_api::get_call_orders(std::string a, uint32_t limit)const { - return my->_remote_db->get_limit_orders(get_asset(a).id, get_asset(b).id, limit); + return my->_remote_db->get_call_orders(a, limit); } -vector wallet_api::get_call_orders(string a, uint32_t limit)const +vector wallet_api::get_settle_orders(std::string a, uint32_t limit)const { - return my->_remote_db->get_call_orders(get_asset(a).id, limit); + return my->_remote_db->get_settle_orders(a, limit); } -vector wallet_api::get_settle_orders(string a, uint32_t limit)const +vector wallet_api::get_collateral_bids(std::string asset, uint32_t limit, uint32_t start)const { - return my->_remote_db->get_settle_orders(get_asset(a).id, limit); + return my->_remote_db->get_collateral_bids(asset, limit, start); } brain_key_info wallet_api::suggest_brain_key()const { - brain_key_info result; - // create a private key for secure entropy - fc::sha256 sha_entropy1 = fc::ecc::private_key::generate().get_secret(); - fc::sha256 sha_entropy2 = fc::ecc::private_key::generate().get_secret(); - fc::bigint entropy1( sha_entropy1.data(), sha_entropy1.data_size() ); - fc::bigint entropy2( sha_entropy2.data(), sha_entropy2.data_size() ); - fc::bigint entropy(entropy1); - entropy <<= 8*sha_entropy1.data_size(); - entropy += entropy2; - string brain_key = ""; - - for( int i=0; i 0 ) - brain_key += " "; - brain_key += graphene::words::word_list[ choice.to_int64() ]; - } + return graphene::wallet::utility::suggest_brain_key(); +} - brain_key = normalize_brain_key(brain_key); - fc::ecc::private_key priv_key = derive_private_key( brain_key, 0 ); - result.brain_priv_key = brain_key; - result.wif_priv_key = key_to_wif( priv_key ); - result.pub_key = priv_key.get_public_key(); - return result; +vector wallet_api::derive_owner_keys_from_brain_key(string brain_key, int number_of_desired_keys) const +{ + return graphene::wallet::utility::derive_owner_keys_from_brain_key(brain_key, number_of_desired_keys); } +bool wallet_api::is_public_key_registered(string public_key) const +{ + bool is_known = my->_remote_db->is_public_key_registered(public_key); + return is_known; +} + + string wallet_api::serialize_transaction( signed_transaction tx )const { return fc::to_hex(fc::raw::pack(tx)); @@ -2892,6 +3354,11 @@ signed_transaction wallet_api::sign_builder_transaction(transaction_handle_type return my->sign_builder_transaction(transaction_handle, broadcast); } +pair wallet_api::broadcast_transaction(signed_transaction tx) +{ + return my->broadcast_transaction(tx); +} + signed_transaction wallet_api::propose_builder_transaction( transaction_handle_type handle, time_point_sec expiration, @@ -2902,11 +3369,11 @@ signed_transaction wallet_api::propose_builder_transaction( } signed_transaction wallet_api::propose_builder_transaction2( - transaction_handle_type handle, - string account_name_or_id, - time_point_sec expiration, - uint32_t review_period_seconds, - bool broadcast) + transaction_handle_type handle, + string account_name_or_id, + time_point_sec expiration, + uint32_t review_period_seconds, + bool broadcast) { return my->propose_builder_transaction2(handle, account_name_or_id, expiration, review_period_seconds, broadcast); } @@ -3150,6 +3617,13 @@ signed_transaction wallet_api::update_asset(string symbol, return my->update_asset(symbol, new_issuer, new_options, broadcast); } +signed_transaction wallet_api::update_asset_issuer(string symbol, + string new_issuer, + bool broadcast /* = false */) +{ + return my->update_asset_issuer(symbol, new_issuer, broadcast); +} + signed_transaction wallet_api::update_bitasset(string symbol, bitasset_options new_options, bool broadcast /* = false */) @@ -3180,6 +3654,13 @@ signed_transaction wallet_api::fund_asset_fee_pool(string from, return my->fund_asset_fee_pool(from, symbol, amount, broadcast); } +signed_transaction wallet_api::claim_asset_fee_pool(string symbol, + string amount, + bool broadcast /* = false */) +{ + return my->claim_asset_fee_pool(symbol, amount, broadcast); +} + signed_transaction wallet_api::reserve_asset(string from, string amount, string symbol, @@ -3203,6 +3684,14 @@ signed_transaction wallet_api::settle_asset(string account_to_settle, return my->settle_asset(account_to_settle, amount_to_settle, symbol, broadcast); } +signed_transaction wallet_api::bid_collateral(string bidder_name, + string debt_amount, string debt_symbol, + string additional_collateral, + bool broadcast ) +{ + return my->bid_collateral(bidder_name, debt_amount, debt_symbol, additional_collateral, broadcast); +} + signed_transaction wallet_api::whitelist_account(string authorizing_account, string account_to_list, account_whitelist_operation::account_listing new_listing_status, @@ -3424,6 +3913,12 @@ dynamic_global_property_object wallet_api::get_dynamic_global_properties() const return my->get_dynamic_global_properties(); } +signed_transaction wallet_api::add_transaction_signature( signed_transaction tx, + bool broadcast ) +{ + return my->add_transaction_signature( tx, broadcast ); +} + string wallet_api::help()const { std::vector method_names = my->method_documentation.get_method_names(); @@ -3505,6 +4000,11 @@ bool wallet_api::load_wallet_file( string wallet_filename ) return my->load_wallet_file( wallet_filename ); } +void wallet_api::quit() +{ + my->quit(); +} + void wallet_api::save_wallet_file( string wallet_filename ) { my->save_wallet_file( wallet_filename ); @@ -3622,7 +4122,6 @@ vector< signed_transaction > wallet_api_impl::import_balance( string name_or_id, } vector< balance_object > balances = _remote_db->get_balance_objects( addrs ); - wdump((balances)); addrs.clear(); set bal_types; @@ -3706,39 +4205,38 @@ signed_transaction wallet_api::sell_asset(string seller_account, symbol_to_receive, expiration, fill_or_kill, broadcast); } -signed_transaction wallet_api::sell( string seller_account, - string base, - string quote, - double rate, - double amount, - bool broadcast ) +signed_transaction wallet_api::borrow_asset(string seller_name, string amount_to_sell, + string asset_symbol, string amount_of_collateral, bool broadcast) { - return my->sell_asset( seller_account, std::to_string( amount ), base, - std::to_string( rate * amount ), quote, 0, false, broadcast ); + FC_ASSERT(!is_locked()); + return my->borrow_asset(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, broadcast); } -signed_transaction wallet_api::buy( string buyer_account, - string base, - string quote, - double rate, - double amount, - bool broadcast ) +signed_transaction wallet_api::borrow_asset_ext( string seller_name, string amount_to_sell, + string asset_symbol, string amount_of_collateral, + call_order_update_operation::extensions_type extensions, + bool broadcast) { - return my->sell_asset( buyer_account, std::to_string( rate * amount ), quote, - std::to_string( amount ), base, 0, false, broadcast ); + FC_ASSERT(!is_locked()); + return my->borrow_asset_ext(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, extensions, broadcast); } -signed_transaction wallet_api::borrow_asset(string seller_name, string amount_to_sell, - string asset_symbol, string amount_of_collateral, bool broadcast) +signed_transaction wallet_api::cancel_order(object_id_type order_id, bool broadcast) { FC_ASSERT(!is_locked()); - return my->borrow_asset(seller_name, amount_to_sell, asset_symbol, amount_of_collateral, broadcast); + return my->cancel_order(order_id, broadcast); } -signed_transaction wallet_api::cancel_order(object_id_type order_id, bool broadcast) +memo_data wallet_api::sign_memo(string from, string to, string memo) { FC_ASSERT(!is_locked()); - return my->cancel_order(order_id, broadcast); + return my->sign_memo(from, to, memo); +} + +string wallet_api::read_memo(const memo_data& memo) +{ + FC_ASSERT(!is_locked()); + return my->read_memo(memo); } string wallet_api::get_key_label( public_key_type key )const @@ -3756,7 +4254,7 @@ string wallet_api::get_private_key( public_key_type pubkey )const public_key_type wallet_api::get_public_key( string label )const { - try { return fc::variant(label).as(); } catch ( ... ){} + try { return fc::variant(label, 1).as( 1 ); } catch ( ... ){} auto key_itr = my->_wallet.labeled_keys.get().find(label); if( key_itr != my->_wallet.labeled_keys.get().end() ) @@ -3886,6 +4384,9 @@ blind_confirmation wallet_api::transfer_from_blind( string from_blind_account_ke ilog( "about to validate" ); conf.trx.validate(); + ilog( "about to broadcast" ); + conf.trx = sign_transaction( conf.trx, broadcast ); + if( broadcast && conf.outputs.size() == 2 ) { // Save the change @@ -3903,9 +4404,6 @@ blind_confirmation wallet_api::transfer_from_blind( string from_blind_account_ke //} catch ( ... ){} } - ilog( "about to broadcast" ); - conf.trx = sign_transaction( conf.trx, broadcast ); - return conf; } FC_CAPTURE_AND_RETHROW( (from_blind_account_key_or_label)(to_account_id_or_name)(amount_in)(symbol) ) } @@ -3978,7 +4476,7 @@ blind_confirmation wallet_api::blind_transfer_help( string from_key_or_label, my->_wallet.blind_receipts.modify( itr, []( blind_receipt& r ){ r.used = true; } ); } - FC_ASSERT( total_amount >= amount+blind_tr.fee, "Insufficent Balance", ("available",total_amount)("amount",amount)("fee",blind_tr.fee) ); + FC_ASSERT( total_amount >= amount+blind_tr.fee, "Insufficient Balance", ("available",total_amount)("amount",amount)("fee",blind_tr.fee) ); auto one_time_key = fc::ecc::private_key::generate(); auto secret = one_time_key.get_shared_secret( to_key ); @@ -4019,12 +4517,14 @@ blind_confirmation wallet_api::blind_transfer_help( string from_key_or_label, if( blind_tr.outputs.size() > 1 ) { - to_out.range_proof = fc::ecc::range_proof_sign( 0, to_out.commitment, blind_factor, nonce, 0, 0, amount.amount.value ); + to_out.range_proof = fc::ecc::range_proof_sign( 0, to_out.commitment, blind_factor, nonce, + 0, RANGE_PROOF_MANTISSA, amount.amount.value ); blind_output change_out; change_out.owner = authority( 1, public_key_type( from_pub_key.child( from_child ) ), 1 ); change_out.commitment = fc::ecc::blind( change_blind_factor, change.amount.value ); - change_out.range_proof = fc::ecc::range_proof_sign( 0, change_out.commitment, change_blind_factor, from_nonce, 0, 0, change.amount.value ); + change_out.range_proof = fc::ecc::range_proof_sign( 0, change_out.commitment, change_blind_factor, from_nonce, + 0, RANGE_PROOF_MANTISSA, change.amount.value ); blind_tr.outputs[1] = change_out; @@ -4097,6 +4597,7 @@ blind_confirmation wallet_api::transfer_to_blind( string from_account_id_or_name bool broadcast ) { try { FC_ASSERT( !is_locked() ); + idump((to_amounts)); blind_confirmation confirm; account_object from_account = my->get_account(from_account_id_or_name); @@ -4131,8 +4632,8 @@ blind_confirmation wallet_api::transfer_to_blind( string from_account_id_or_name out.owner = authority( 1, public_key_type( to_pub_key.child( child ) ), 1 ); out.commitment = fc::ecc::blind( blind_factor, amount.amount.value ); if( to_amounts.size() > 1 ) - out.range_proof = fc::ecc::range_proof_sign( 0, out.commitment, blind_factor, nonce, 0, 0, amount.amount.value ); - + out.range_proof = fc::ecc::range_proof_sign( 0, out.commitment, blind_factor, nonce, + 0, RANGE_PROOF_MANTISSA, amount.amount.value ); blind_confirmation::output conf_output; conf_output.label = item.first; @@ -4291,13 +4792,15 @@ vesting_balance_object_with_info::vesting_balance_object_with_info( const vestin } } // graphene::wallet -void fc::to_variant(const account_multi_index_type& accts, fc::variant& vo) -{ - vo = vector(accts.begin(), accts.end()); -} +namespace fc { + void to_variant( const account_multi_index_type& accts, variant& vo, uint32_t max_depth ) + { + to_variant( std::vector(accts.begin(), accts.end()), vo, max_depth ); + } -void fc::from_variant(const fc::variant& var, account_multi_index_type& vo) -{ - const vector& v = var.as>(); - vo = account_multi_index_type(v.begin(), v.end()); + void from_variant( const variant& var, account_multi_index_type& vo, uint32_t max_depth ) + { + const std::vector& v = var.as>( max_depth ); + vo = account_multi_index_type(v.begin(), v.end()); + } } diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 932e69b777..88894ddf70 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory( build_helpers ) add_subdirectory( cli_wallet ) add_subdirectory( genesis_util ) add_subdirectory( witness_node ) -add_subdirectory( debug_node ) add_subdirectory( delayed_node ) add_subdirectory( js_operation_serializer ) add_subdirectory( size_checker ) +add_subdirectory( network_mapper ) diff --git a/programs/README.md b/programs/README.md new file mode 100644 index 0000000000..24f646ac0d --- /dev/null +++ b/programs/README.md @@ -0,0 +1,24 @@ +# BitShares Programs + +The bitshares programs are a collection of binaries to run the blockchain, interact with it or utilities. + +The main program is the `witness_node`, used to run a bitshares block producer, API or plugin node. The second in importance is the `cli_wallet`, used to interact with the blockchain. This 2 programs are the most used by the community and updated by the developers, rest of the programs are utilities. + +Programs in here are part of the **bitshares-core** project and are maintained by the bitshares core team and contributors. + + +# Available Programs + +Folder | Name | Description | Category | Status | Help +---|---|---|---|---|--- +[witness_node](witness_node) | Witness Node | Main software used to sign blocks or provide services. | Node | Active | `./witness_node --help` +[cli_wallet](cli_wallet) | CLI Wallet | Software to interact with the blockchain by command line. | Wallet | Active | `./cli_wallet --help` +[delayed_node](delayed_node) | Delayed Node | Runs a node with `delayed_node` plugin loaded. This is deprecated in favour of `./witness_node --plugins "delayed_node"`. | Node | Deprecated | `./delayed_node --help` +[js_operation_serializer](js_operation_serializer) | Operation Serializer | Dump all blockchain operations and types. Used by the UI. | Tool | Old | `./js_operation_serializer` +[size_checker](size_checker) | Size Checker | Return wire size average in bytes of all the operations. | Tool | Old | `./size_checker` +[cat-parts](build_helpers/cat-parts.cpp) | Cat parts | Used to create `hardfork.hpp` from individual files. | Tool | Active | `./cat-parts` +[check_reflect](build_helpers/check_reflect.py) | Check reflect | Check reflected fields automatically(https://github.com/cryptonomex/graphene/issues/562) | Tool | Old | `doxygen;cp -rf doxygen programs/build_helpers; ./check_reflect.py` +[member_enumerator](build_helpers/member_enumerator.cpp) | Member enumerator | | Tool | Deprecated | `./member_enumerator` +[get_dev_key](genesis_util/get_dev_key.cpp) | Get Dev Key | Create public, private and address keys. Useful in private testnets, `genesis.json` files, new blockchain creation and others. | Tool | Active | `/programs/genesis_util/get_dev_key -h` +[genesis_util](genesis_util) | Genesis Utils | Other utilities for genesis creation. | Tool | Old | +[network_mapper](network_mapper) | Network Mapper | Generates .DOT file that can be rendered by graphviz to make images of node connectivity. | Tool | Experimental | `./programs/network_mapper/network_mapper` diff --git a/programs/build_helpers/buildstep b/programs/build_helpers/buildstep new file mode 100755 index 0000000000..44d5458a10 --- /dev/null +++ b/programs/build_helpers/buildstep @@ -0,0 +1,55 @@ +#!/bin/sh + +usage () { + echo Usage: + echo " ${0##*/} [-h | --help] Display this help message" + echo " ${0##*/} -s | --start Initialize timing" + echo " ${0##*/} " + echo "The last form executes build step consisting of shell " + echo "if imated time is still available, otherwise it fails fast." + echo " and must be specified in seconds." + exit $1 +} + +if [ "$#" = 0 -o "$1" = "--help" -o "$1" = "-h" ]; then + usage `test "$#" = 1; echo $?` +fi + +NOW="$(date +%s)" + +if [ "$1" = "--start" -o "$1" = "-s" ]; then + if [ "$#" != 2 ]; then + usage 1 + fi + echo "$2 $NOW" >_start_time + echo "Starting at $(date --date=@$NOW)" + exit 0 +fi + +NAME="$1" +EST="$2" +CMD="$3" + +if [ ! -r _start_time ]; then + echo "Need to initialize with '$0 -s ' first!" 1>&2 + exit 1 +fi + +read max begin prev_name prev_begin <_start_time + +if [ "$prev_name" != "" ]; then + echo "'$prev_name' took $(($NOW - $prev_begin))s" +fi + +if [ "$CMD" != "" ]; then + if [ $(($NOW - $begin + $EST)) -lt $max ]; then + echo "Running '$NAME' at $NOW..." + echo "sh -c '$CMD'" + echo "$max $begin $NAME $NOW" >_start_time + exec bash -c "$CMD" + fi + echo "$(($begin + $max - $NOW))s left - insufficient to run '$NAME', exiting!" 1>&2 + exit 1 +fi + +exit 0 diff --git a/programs/build_helpers/make_with_sonar b/programs/build_helpers/make_with_sonar new file mode 100755 index 0000000000..a91470adf5 --- /dev/null +++ b/programs/build_helpers/make_with_sonar @@ -0,0 +1,8 @@ +#!/bin/sh + +OUT_DIR="$1" +shift +if which build-wrapper-linux-x86-64 >/dev/null; then + exec build-wrapper-linux-x86-64 --out-dir "$OUT_DIR" make "$@" +fi +exec make "$@" diff --git a/programs/build_helpers/member_enumerator.cpp b/programs/build_helpers/member_enumerator.cpp index 001b47bd73..53d1169037 100644 --- a/programs/build_helpers/member_enumerator.cpp +++ b/programs/build_helpers/member_enumerator.cpp @@ -18,7 +18,7 @@ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ -#include +#include #include #include #include @@ -28,7 +28,6 @@ #include #include #include -#include #include using namespace graphene::chain; @@ -37,7 +36,7 @@ namespace graphene { namespace member_enumerator { struct class_processor { - class_processor( std::map< std::string, std::vector< std::string > >& r ) : result(r) {} + explicit class_processor( std::map< std::string, std::vector< std::string > >& r ) : result(r) {} template< typename T > void process_class( const T* dummy ); @@ -84,7 +83,7 @@ struct member_visitor struct static_variant_visitor { - static_variant_visitor( class_processor* p ) : proc(p) {} + explicit static_variant_visitor( class_processor* p ) : proc(p) {} typedef void result_type; @@ -194,13 +193,12 @@ int main( int argc, char** argv ) { std::map< std::string, std::vector< std::string > > result; graphene::member_enumerator::class_processor::process_class(result); - //graphene::member_enumerator::process_class(result); fc::mutable_variant_object mvo; for( const std::pair< std::string, std::vector< std::string > >& e : result ) { variant v; - to_variant( e.second, v ); + to_variant( e.second, v , 1); mvo.set( e.first, v ); } diff --git a/programs/cli_wallet/CMakeLists.txt b/programs/cli_wallet/CMakeLists.txt index 140bdce32e..689601712f 100644 --- a/programs/cli_wallet/CMakeLists.txt +++ b/programs/cli_wallet/CMakeLists.txt @@ -10,7 +10,7 @@ if( GPERFTOOLS_FOUND ) endif() target_link_libraries( cli_wallet - PRIVATE graphene_app graphene_net graphene_chain graphene_egenesis_brief graphene_utilities graphene_wallet fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE graphene_app graphene_net graphene_chain graphene_egenesis_brief graphene_utilities graphene_wallet fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) if(MSVC) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) diff --git a/programs/cli_wallet/main.cpp b/programs/cli_wallet/main.cpp index 040b6fac40..2a949417ae 100644 --- a/programs/cli_wallet/main.cpp +++ b/programs/cli_wallet/main.cpp @@ -34,10 +34,9 @@ #include #include #include -#include #include -#include +#include #include #include #include @@ -50,6 +49,11 @@ #include #include +#include +#include +#include +#include + #ifdef WIN32 # include #else @@ -79,7 +83,10 @@ int main( int argc, char** argv ) ("rpc-http-endpoint,H", bpo::value()->implicit_value("127.0.0.1:8093"), "Endpoint for wallet HTTP RPC to listen on") ("daemon,d", "Run the wallet in daemon mode" ) ("wallet-file,w", bpo::value()->implicit_value("wallet.json"), "wallet to load") - ("chain-id", bpo::value(), "chain ID to connect to"); + ("chain-id", bpo::value(), "chain ID to connect to") + ("suggest-brain-key", "Suggest a safe brain key to use for creating your account") + ("version,v", "Display version information"); + bpo::variables_map options; @@ -90,6 +97,23 @@ int main( int argc, char** argv ) std::cout << opts << "\n"; return 0; } + if( options.count("version") ) + { + std::cout << "Version: " << graphene::utilities::git_revision_description << "\n"; + std::cout << "SHA: " << graphene::utilities::git_revision_sha << "\n"; + std::cout << "Timestamp: " << fc::get_approximate_relative_time_string(fc::time_point_sec(graphene::utilities::git_revision_unix_timestamp)) << "\n"; + std::cout << "SSL: " << OPENSSL_VERSION_TEXT << "\n"; + std::cout << "Boost: " << boost::replace_all_copy(std::string(BOOST_LIB_VERSION), "_", ".") << "\n"; + std::cout << "Websocket++: " << websocketpp::major_version << "." << websocketpp::minor_version << "." << websocketpp::patch_version << "\n"; + return 0; + } + if( options.count("suggest-brain-key") ) + { + auto keyinfo = graphene::wallet::utility::suggest_brain_key(); + string data = fc::json::to_pretty_string( keyinfo ); + std::cout << data.c_str() << std::endl; + return 0; + } fc::path data_dir; fc::logging_config cfg; @@ -104,8 +128,8 @@ int main( int argc, char** argv ) std::cout << "Logging RPC to file: " << (data_dir / ac.filename).preferred_string() << "\n"; - cfg.appenders.push_back(fc::appender_config( "default", "console", fc::variant(fc::console_appender::config()))); - cfg.appenders.push_back(fc::appender_config( "rpc", "file", fc::variant(ac))); + cfg.appenders.push_back(fc::appender_config( "default", "console", fc::variant(fc::console_appender::config(), 20))); + cfg.appenders.push_back(fc::appender_config( "rpc", "file", fc::variant(ac, 5))); cfg.loggers = { fc::logger_config("default"), fc::logger_config( "rpc") }; cfg.loggers.front().level = fc::log_level::info; @@ -113,8 +137,6 @@ int main( int argc, char** argv ) cfg.loggers.back().level = fc::log_level::debug; cfg.loggers.back().appenders = {"rpc"}; - //fc::configure_logging( cfg ); - fc::ecc::private_key committee_private_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key"))); idump( (key_to_wif( committee_private_key ) ) ); @@ -135,7 +157,7 @@ int main( int argc, char** argv ) fc::path wallet_file( options.count("wallet-file") ? options.at("wallet-file").as() : "wallet.json"); if( fc::exists( wallet_file ) ) { - wdata = fc::json::from_file( wallet_file ).as(); + wdata = fc::json::from_file( wallet_file ).as( GRAPHENE_MAX_NESTED_OBJECTS ); if( options.count("chain-id") ) { // the --chain-id on the CLI must match the chain ID embedded in the wallet file @@ -171,12 +193,11 @@ int main( int argc, char** argv ) fc::http::websocket_client client; idump((wdata.ws_server)); auto con = client.connect( wdata.ws_server ); - auto apic = std::make_shared(*con); + auto apic = std::make_shared(*con, GRAPHENE_MAX_NESTED_OBJECTS); auto remote_api = apic->get_remote_api< login_api >(1); edump((wdata.ws_user)(wdata.ws_password) ); - // TODO: Error message here - FC_ASSERT( remote_api->login( wdata.ws_user, wdata.ws_password ) ); + FC_ASSERT( remote_api->login( wdata.ws_user, wdata.ws_password ), "Failed to log in to API server" ); auto wapiptr = std::make_shared( wdata, remote_api ); wapiptr->set_wallet_filename( wallet_file.generic_string() ); @@ -184,11 +205,11 @@ int main( int argc, char** argv ) fc::api wapi(wapiptr); - auto wallet_cli = std::make_shared(); + auto wallet_cli = std::make_shared( GRAPHENE_MAX_NESTED_OBJECTS ); for( auto& name_formatter : wapiptr->get_result_formatters() ) wallet_cli->format_result( name_formatter.first, name_formatter.second ); - boost::signals2::scoped_connection closed_connection(con->closed.connect([=]{ + boost::signals2::scoped_connection closed_connection(con->closed.connect([wallet_cli]{ cerr << "Server has disconnected us.\n"; wallet_cli->stop(); })); @@ -208,10 +229,8 @@ int main( int argc, char** argv ) auto _websocket_server = std::make_shared(); if( options.count("rpc-endpoint") ) { - _websocket_server->on_connection([&]( const fc::http::websocket_connection_ptr& c ){ - std::cout << "here... \n"; - wlog("." ); - auto wsc = std::make_shared(*c); + _websocket_server->on_connection([&wapi]( const fc::http::websocket_connection_ptr& c ){ + auto wsc = std::make_shared(*c, GRAPHENE_MAX_NESTED_OBJECTS); wsc->register_api(wapi); c->set_session_data( wsc ); }); @@ -227,8 +246,8 @@ int main( int argc, char** argv ) auto _websocket_tls_server = std::make_shared(cert_pem); if( options.count("rpc-tls-endpoint") ) { - _websocket_tls_server->on_connection([&]( const fc::http::websocket_connection_ptr& c ){ - auto wsc = std::make_shared(*c); + _websocket_tls_server->on_connection([&wapi]( const fc::http::websocket_connection_ptr& c ){ + auto wsc = std::make_shared(*c, GRAPHENE_MAX_NESTED_OBJECTS); wsc->register_api(wapi); c->set_session_data( wsc ); }); @@ -246,10 +265,10 @@ int main( int argc, char** argv ) // due to implementation, on_request() must come AFTER listen() // _http_server->on_request( - [&]( const fc::http::request& req, const fc::http::server::response& resp ) + [&wapi]( const fc::http::request& req, const fc::http::server::response& resp ) { std::shared_ptr< fc::rpc::http_api_connection > conn = - std::make_shared< fc::rpc::http_api_connection>(); + std::make_shared< fc::rpc::http_api_connection >( GRAPHENE_MAX_NESTED_OBJECTS ); conn->register_api( wapi ); conn->on_request( req, resp ); } ); @@ -259,6 +278,17 @@ int main( int argc, char** argv ) { wallet_cli->register_api( wapi ); wallet_cli->start(); + + fc::set_signal_handler([](int signal) { + ilog( "Captured SIGINT not in daemon mode" ); + fclose(stdin); + }, SIGINT); + + fc::set_signal_handler([](int signal) { + ilog( "Captured SIGTERM not in daemon mode" ); + fclose(stdin); + }, SIGTERM); + wallet_cli->wait(); } else @@ -268,6 +298,10 @@ int main( int argc, char** argv ) exit_promise->set_value(signal); }, SIGINT); + fc::set_signal_handler([&exit_promise](int signal) { + exit_promise->set_value(signal); + }, SIGTERM); + ilog( "Entering Daemon Mode, ^C to exit" ); exit_promise->wait(); } diff --git a/programs/debug_node/CMakeLists.txt b/programs/debug_node/CMakeLists.txt deleted file mode 100644 index 8ec7362ba4..0000000000 --- a/programs/debug_node/CMakeLists.txt +++ /dev/null @@ -1,21 +0,0 @@ -add_executable( debug_node main.cpp ) -if( UNIX AND NOT APPLE ) - set(rt_library rt ) -endif() - -find_package( Gperftools QUIET ) -if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling debug_node with TCMalloc") - list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) -endif() - -target_link_libraries( debug_node - PRIVATE graphene_app graphene_account_history graphene_market_history graphene_witness graphene_debug_witness graphene_chain graphene_egenesis_full fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) - -install( TARGETS - debug_node - - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) diff --git a/programs/debug_node/main.cpp b/programs/debug_node/main.cpp deleted file mode 100644 index 4b89c199e2..0000000000 --- a/programs/debug_node/main.cpp +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -#include -#include - -#ifdef WIN32 -# include -#else -# include -#endif - -using namespace graphene; -namespace bpo = boost::program_options; - -void write_default_logging_config_to_stream(std::ostream& out); -fc::optional load_logging_config_from_ini_file(const fc::path& config_ini_filename); - -int main(int argc, char** argv) { - app::application* node = new app::application(); - fc::oexception unhandled_exception; - try { - bpo::options_description app_options("Graphene Witness Node"); - bpo::options_description cfg_options("Graphene Witness Node"); - app_options.add_options() - ("help,h", "Print this help message and exit.") - ("data-dir,d", bpo::value()->default_value("witness_node_data_dir"), "Directory containing databases, configuration file, etc.") - ; - - bpo::variables_map options; - - auto witness_plug = node->register_plugin(); - auto history_plug = node->register_plugin(); - auto market_history_plug = node->register_plugin(); - - try - { - bpo::options_description cli, cfg; - node->set_program_options(cli, cfg); - app_options.add(cli); - cfg_options.add(cfg); - bpo::store(bpo::parse_command_line(argc, argv, app_options), options); - } - catch (const boost::program_options::error& e) - { - std::cerr << "Error parsing command line: " << e.what() << "\n"; - return 1; - } - - if( options.count("help") ) - { - std::cout << app_options << "\n"; - return 0; - } - - fc::path data_dir; - if( options.count("data-dir") ) - { - data_dir = options["data-dir"].as(); - if( data_dir.is_relative() ) - data_dir = fc::current_path() / data_dir; - } - - fc::path config_ini_path = data_dir / "config.ini"; - if( fc::exists(config_ini_path) ) - { - // get the basic options - bpo::store(bpo::parse_config_file(config_ini_path.preferred_string().c_str(), cfg_options, true), options); - - // try to get logging options from the config file. - try - { - fc::optional logging_config = load_logging_config_from_ini_file(config_ini_path); - if (logging_config) - fc::configure_logging(*logging_config); - } - catch (const fc::exception&) - { - wlog("Error parsing logging config from config file ${config}, using default config", ("config", config_ini_path.preferred_string())); - } - } - else - { - ilog("Writing new config file at ${path}", ("path", config_ini_path)); - if( !fc::exists(data_dir) ) - fc::create_directories(data_dir); - - std::ofstream out_cfg(config_ini_path.preferred_string()); - for( const boost::shared_ptr od : cfg_options.options() ) - { - if( !od->description().empty() ) - out_cfg << "# " << od->description() << "\n"; - boost::any store; - if( !od->semantic()->apply_default(store) ) - out_cfg << "# " << od->long_name() << " = \n"; - else { - auto example = od->format_parameter(); - if( example.empty() ) - // This is a boolean switch - out_cfg << od->long_name() << " = " << "false\n"; - else { - // The string is formatted "arg (=)" - example.erase(0, 6); - example.erase(example.length()-1); - out_cfg << od->long_name() << " = " << example << "\n"; - } - } - out_cfg << "\n"; - } - write_default_logging_config_to_stream(out_cfg); - out_cfg.close(); - // read the default logging config we just wrote out to the file and start using it - fc::optional logging_config = load_logging_config_from_ini_file(config_ini_path); - if (logging_config) - fc::configure_logging(*logging_config); - } - - bpo::notify(options); - node->initialize(data_dir, options); - node->initialize_plugins( options ); - - node->startup(); - node->startup_plugins(); - - fc::promise::ptr exit_promise = new fc::promise("UNIX Signal Handler"); - - fc::set_signal_handler([&exit_promise](int signal) { - elog( "Caught SIGINT attempting to exit cleanly" ); - exit_promise->set_value(signal); - }, SIGINT); - - fc::set_signal_handler([&exit_promise](int signal) { - elog( "Caught SIGTERM attempting to exit cleanly" ); - exit_promise->set_value(signal); - }, SIGTERM); - - ilog("Started witness node on a chain with ${h} blocks.", ("h", node->chain_database()->head_block_num())); - ilog("Chain ID is ${id}", ("id", node->chain_database()->get_chain_id()) ); - - int signal = exit_promise->wait(); - ilog("Exiting from signal ${n}", ("n", signal)); - node->shutdown_plugins(); - node->shutdown(); - delete node; - return 0; - } catch( const fc::exception& e ) { - // deleting the node can yield, so do this outside the exception handler - unhandled_exception = e; - } - - if (unhandled_exception) - { - elog("Exiting with error:\n${e}", ("e", unhandled_exception->to_detail_string())); - node->shutdown(); - delete node; - return 1; - } -} - -// logging config is too complicated to be parsed by boost::program_options, -// so we do it by hand -// -// Currently, you can only specify the filenames and logging levels, which -// are all most users would want to change. At a later time, options can -// be added to control rotation intervals, compression, and other seldom- -// used features -void write_default_logging_config_to_stream(std::ostream& out) -{ - out << "# declare an appender named \"stderr\" that writes messages to the console\n" - "[log.console_appender.stderr]\n" - "stream=std_error\n\n" - "# declare an appender named \"p2p\" that writes messages to p2p.log\n" - "[log.file_appender.p2p]\n" - "filename=logs/p2p/p2p.log\n" - "# filename can be absolute or relative to this config file\n\n" - "# route any messages logged to the default logger to the \"stderr\" logger we\n" - "# declared above, if they are info level are higher\n" - "[logger.default]\n" - "level=info\n" - "appenders=stderr\n\n" - "# route messages sent to the \"p2p\" logger to the p2p appender declared above\n" - "[logger.p2p]\n" - "level=debug\n" - "appenders=p2p\n\n"; -} - -fc::optional load_logging_config_from_ini_file(const fc::path& config_ini_filename) -{ - try - { - fc::logging_config logging_config; - bool found_logging_config = false; - - boost::property_tree::ptree config_ini_tree; - boost::property_tree::ini_parser::read_ini(config_ini_filename.preferred_string().c_str(), config_ini_tree); - for (const auto& section : config_ini_tree) - { - const std::string& section_name = section.first; - const boost::property_tree::ptree& section_tree = section.second; - - const std::string console_appender_section_prefix = "log.console_appender."; - const std::string file_appender_section_prefix = "log.file_appender."; - const std::string logger_section_prefix = "logger."; - - if (boost::starts_with(section_name, console_appender_section_prefix)) - { - std::string console_appender_name = section_name.substr(console_appender_section_prefix.length()); - std::string stream_name = section_tree.get("stream"); - - // construct a default console appender config here - // stdout/stderr will be taken from ini file, everything else hard-coded here - fc::console_appender::config console_appender_config; - console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::debug, - fc::console_appender::color::green)); - console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::warn, - fc::console_appender::color::brown)); - console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::error, - fc::console_appender::color::cyan)); - console_appender_config.stream = fc::variant(stream_name).as(); - logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config))); - found_logging_config = true; - } - else if (boost::starts_with(section_name, file_appender_section_prefix)) - { - std::string file_appender_name = section_name.substr(file_appender_section_prefix.length()); - fc::path file_name = section_tree.get("filename"); - if (file_name.is_relative()) - file_name = fc::absolute(config_ini_filename).parent_path() / file_name; - - - // construct a default file appender config here - // filename will be taken from ini file, everything else hard-coded here - fc::file_appender::config file_appender_config; - file_appender_config.filename = file_name; - file_appender_config.flush = true; - file_appender_config.rotate = true; - file_appender_config.rotation_interval = fc::hours(1); - file_appender_config.rotation_limit = fc::days(1); - logging_config.appenders.push_back(fc::appender_config(file_appender_name, "file", fc::variant(file_appender_config))); - found_logging_config = true; - } - else if (boost::starts_with(section_name, logger_section_prefix)) - { - std::string logger_name = section_name.substr(logger_section_prefix.length()); - std::string level_string = section_tree.get("level"); - std::string appenders_string = section_tree.get("appenders"); - fc::logger_config logger_config(logger_name); - logger_config.level = fc::variant(level_string).as(); - boost::split(logger_config.appenders, appenders_string, - boost::is_any_of(" ,"), - boost::token_compress_on); - logging_config.loggers.push_back(logger_config); - found_logging_config = true; - } - } - if (found_logging_config) - return logging_config; - else - return fc::optional(); - } - FC_RETHROW_EXCEPTIONS(warn, "") -} diff --git a/programs/delayed_node/main.cpp b/programs/delayed_node/main.cpp index 430fcfa31a..a3bcc2b5e2 100644 --- a/programs/delayed_node/main.cpp +++ b/programs/delayed_node/main.cpp @@ -64,9 +64,11 @@ int main(int argc, char** argv) { bpo::options_description app_options("Graphene Delayed Node"); bpo::options_description cfg_options("Graphene Delayed Node"); app_options.add_options() - ("help,h", "Print this help message and exit.") - ("data-dir,d", bpo::value()->default_value("delayed_node_data_dir"), "Directory containing databases, configuration file, etc.") - ; + ("help,h", "Print this help message and exit.") + ("data-dir,d", bpo::value()->default_value("delayed_node_data_dir"), "Directory containing databases, configuration file, etc.") + ("plugins", bpo::value()->default_value("delayed_node account_history market_history"), + "Space-separated list of plugins to activate"); + ; bpo::variables_map options; @@ -84,8 +86,8 @@ int main(int argc, char** argv) { } catch (const boost::program_options::error& e) { - std::cerr << "Error parsing command line: " << e.what() << "\n"; - return 1; + std::cerr << "Error parsing command line: " << e.what() << "\n"; + return 1; } if( options.count("help") ) @@ -160,15 +162,25 @@ int main(int argc, char** argv) { elog("Error parsing configuration file: ${e}", ("e", e.what())); return 1; } + + std::set plugins; + boost::split(plugins, options.at("plugins").as(), [](char c){return c == ' ';}); + + std::for_each(plugins.begin(), plugins.end(), [&](const std::string& plug) mutable { + if (!plug.empty()) { + node.enable_plugin(plug); + } + }); node.initialize(data_dir, options); node.initialize_plugins( options ); node.startup(); + node.startup_plugins(); fc::promise::ptr exit_promise = new fc::promise("UNIX Signal Handler"); fc::set_signal_handler([&exit_promise](int signal) { - exit_promise->set_value(signal); + exit_promise->set_value(signal); }, SIGINT); ilog("Started delayed node on a chain with ${h} blocks.", ("h", node.chain_database()->head_block_num())); @@ -177,10 +189,11 @@ int main(int argc, char** argv) { int signal = exit_promise->wait(); ilog("Exiting from signal ${n}", ("n", signal)); node.shutdown_plugins(); - return 0; + node.shutdown(); + return EXIT_SUCCESS; } catch( const fc::exception& e ) { elog("Exiting with error:\n${e}", ("e", e.to_detail_string())); - return 1; + return EXIT_FAILURE; } } @@ -207,7 +220,7 @@ void write_default_logging_config_to_stream(std::ostream& out) "appenders=stderr\n\n" "# route messages sent to the \"p2p\" logger to the p2p appender declared above\n" "[logger.p2p]\n" - "level=debug\n" + "level=info\n" "appenders=p2p\n\n"; } @@ -238,16 +251,16 @@ fc::optional load_logging_config_from_ini_file(const fc::pat // stdout/stderr will be taken from ini file, everything else hard-coded here fc::console_appender::config console_appender_config; console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::debug, - fc::console_appender::color::green)); + fc::console_appender::level_color(fc::log_level::debug, + fc::console_appender::color::green)); console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::warn, - fc::console_appender::color::brown)); + fc::console_appender::level_color(fc::log_level::warn, + fc::console_appender::color::brown)); console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::error, - fc::console_appender::color::cyan)); - console_appender_config.stream = fc::variant(stream_name).as(); - logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config))); + fc::console_appender::level_color(fc::log_level::error, + fc::console_appender::color::cyan)); + console_appender_config.stream = fc::variant(stream_name, 1).as(1); + logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config, GRAPHENE_MAX_NESTED_OBJECTS))); found_logging_config = true; } else if (boost::starts_with(section_name, file_appender_section_prefix)) @@ -266,7 +279,7 @@ fc::optional load_logging_config_from_ini_file(const fc::pat file_appender_config.rotate = true; file_appender_config.rotation_interval = fc::hours(1); file_appender_config.rotation_limit = fc::days(1); - logging_config.appenders.push_back(fc::appender_config(file_appender_name, "file", fc::variant(file_appender_config))); + logging_config.appenders.push_back(fc::appender_config(file_appender_name, "file", fc::variant(file_appender_config, GRAPHENE_MAX_NESTED_OBJECTS))); found_logging_config = true; } else if (boost::starts_with(section_name, logger_section_prefix)) @@ -275,7 +288,7 @@ fc::optional load_logging_config_from_ini_file(const fc::pat std::string level_string = section_tree.get("level"); std::string appenders_string = section_tree.get("appenders"); fc::logger_config logger_config(logger_name); - logger_config.level = fc::variant(level_string).as(); + logger_config.level = fc::variant(level_string, 1).as(1); boost::split(logger_config.appenders, appenders_string, boost::is_any_of(" ,"), boost::token_compress_on); diff --git a/programs/genesis_util/genesis_update.cpp b/programs/genesis_util/genesis_update.cpp index 0dec01654b..7e251de8a7 100644 --- a/programs/genesis_util/genesis_update.cpp +++ b/programs/genesis_util/genesis_update.cpp @@ -30,11 +30,9 @@ #include #include #include -#include #include #include -#include #include #include @@ -108,7 +106,7 @@ int main( int argc, char** argv ) std::cerr << "update_genesis: Reading genesis from file " << genesis_json_filename.preferred_string() << "\n"; std::string genesis_json; read_file_contents( genesis_json_filename, genesis_json ); - genesis = fc::json::from_string( genesis_json ).as< genesis_state_type >(); + genesis = fc::json::from_string( genesis_json ).as< genesis_state_type >(20); } else { @@ -116,9 +114,9 @@ int main( int argc, char** argv ) genesis = graphene::app::detail::create_example_genesis(); } - std::string dev_key_prefix = options["dev-key-prefix"].as(); + const std::string dev_key_prefix = options["dev-key-prefix"].as(); - auto get_dev_key = [&]( std::string prefix, uint32_t i ) -> public_key_type + auto get_dev_key = [&dev_key_prefix]( std::string prefix, uint32_t i ) { return fc::ecc::private_key::regenerate( fc::sha256::hash( dev_key_prefix + prefix + std::to_string(i) ) ).get_public_key(); }; diff --git a/programs/genesis_util/get_dev_key.cpp b/programs/genesis_util/get_dev_key.cpp index c82e6a601f..ea7cdf9f0e 100644 --- a/programs/genesis_util/get_dev_key.cpp +++ b/programs/genesis_util/get_dev_key.cpp @@ -70,9 +70,9 @@ int main( int argc, char** argv ) bool comma = false; - auto show_key = [&]( const fc::ecc::private_key& priv_key ) + auto show_key = [&comma]( const fc::ecc::private_key& priv_key ) { - fc::mutable_variant_object mvo; + fc::limited_mutable_variant_object mvo(5); graphene::chain::public_key_type pub_key = priv_key.get_public_key(); mvo( "private_key", graphene::utilities::key_to_wif( priv_key ) ) ( "public_key", std::string( pub_key ) ) @@ -80,7 +80,7 @@ int main( int argc, char** argv ) ; if( comma ) std::cout << ",\n"; - std::cout << fc::json::to_string( mvo ); + std::cout << fc::json::to_string( fc::mutable_variant_object(mvo) ); comma = true; }; @@ -90,7 +90,7 @@ int main( int argc, char** argv ) { std::string arg = argv[i]; std::string prefix; - int lep = -1, rep; + int lep = -1, rep = -1; auto dash_pos = arg.rfind('-'); if( dash_pos != string::npos ) { @@ -104,7 +104,6 @@ int main( int argc, char** argv ) rep = std::stoi( rhs.substr( colon_pos+1 ) ); } } - vector< fc::ecc::private_key > keys; if( lep >= 0 ) { for( int k=lep; k +#include #include #include @@ -37,7 +37,6 @@ #include #include -#include #include using namespace graphene::chain; @@ -110,7 +109,6 @@ struct js_name> template struct js_name> { static std::string name(){ return "bytes "+ fc::to_string(N); }; }; template struct js_name> { static std::string name(){ return "bytes "+ fc::to_string(N); }; }; template struct js_name< fc::optional > { static std::string name(){ return "optional " + js_name::name(); } }; -template struct js_name< fc::smart_ref > { static std::string name(){ return js_name::name(); } }; template<> struct js_name< object_id_type > { static std::string name(){ return "object_id_type"; } }; template struct js_name< fc::flat_set > { static std::string name(){ return "set " + js_name::name(); } }; template struct js_name< std::vector > { static std::string name(){ return "array " + js_name::name(); } }; @@ -121,8 +119,7 @@ template<> struct js_name< std::vector > { static std::string name(){ retu template<> struct js_name { static std::string name(){ return "bytes 20"; } }; template<> struct js_name { static std::string name(){ return "bytes 28"; } }; template<> struct js_name { static std::string name(){ return "bytes 32"; } }; -template<> struct js_name { static std::string name(){ return "varuint32"; } }; -template<> struct js_name { static std::string name(){ return "varint32"; } }; +template<> struct js_name { static std::string name(){ return "varuint64"; } }; template<> struct js_name< vote_id_type > { static std::string name(){ return "vote_id"; } }; template<> struct js_name< time_point_sec > { static std::string name(){ return "time_point_sec"; } }; @@ -238,14 +235,6 @@ struct serializer,false> static void generate() {} }; -template -struct serializer,false> -{ - static void init() { - serializer::init(); } - static void generate() {} -}; - template<> struct serializer,false> { diff --git a/programs/network_mapper/CMakeLists.txt b/programs/network_mapper/CMakeLists.txt new file mode 100644 index 0000000000..7d3326c1eb --- /dev/null +++ b/programs/network_mapper/CMakeLists.txt @@ -0,0 +1,3 @@ +add_executable( network_mapper network_mapper.cpp ) +target_link_libraries( network_mapper fc graphene_chain graphene_net ) + diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp new file mode 100644 index 0000000000..2ecc724d8c --- /dev/null +++ b/programs/network_mapper/network_mapper.cpp @@ -0,0 +1,317 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +class peer_probe : public graphene::net::peer_connection_delegate +{ +public: + bool _peer_closed_connection; + bool _we_closed_connection; + graphene::net::peer_connection_ptr _connection; + std::vector _peers; + fc::ecc::public_key _node_id; + fc::ip::endpoint _remote; + bool _connection_was_rejected; + bool _done; + fc::promise::ptr _probe_complete_promise; + +public: + peer_probe() : + _peer_closed_connection(false), + _we_closed_connection(false), + _connection(graphene::net::peer_connection::make_shared(this)), + _connection_was_rejected(false), + _done(false), + _probe_complete_promise(fc::promise::ptr(new fc::promise("probe_complete"))) + {} + + void start(const fc::ip::endpoint& endpoint_to_probe, + const fc::ecc::private_key& my_node_id, + const graphene::chain::chain_id_type& chain_id) + { + _remote = endpoint_to_probe; + fc::future connect_task = fc::async([this](){ _connection->connect_to(_remote); }, "connect_task"); + try + { + connect_task.wait(fc::seconds(10)); + } + catch (const fc::timeout_exception&) + { + ilog("timeout connecting to node ${endpoint}", ("endpoint", endpoint_to_probe)); + connect_task.cancel(__FUNCTION__); + throw; + } + + fc::sha256::encoder shared_secret_encoder; + fc::sha512 shared_secret = _connection->get_shared_secret(); + shared_secret_encoder.write(shared_secret.data(), sizeof(shared_secret)); + fc::ecc::compact_signature signature = my_node_id.sign_compact(shared_secret_encoder.result()); + + graphene::net::hello_message hello("network_mapper", + GRAPHENE_NET_PROTOCOL_VERSION, + fc::ip::address(), 0, 0, + my_node_id.get_public_key(), + signature, + chain_id, + fc::variant_object()); + + _connection->send_message(hello); + } + + void on_message(graphene::net::peer_connection* originating_peer, + const graphene::net::message& received_message) override + { + graphene::net::message_hash_type message_hash = received_message.id(); + dlog( "handling message ${type} ${hash} size ${size} from peer ${endpoint}", + ( "type", graphene::net::core_message_type_enum(received_message.msg_type ) )("hash", message_hash )("size", received_message.size )("endpoint", originating_peer->get_remote_endpoint() ) ); + switch ( received_message.msg_type ) + { + case graphene::net::core_message_type_enum::hello_message_type: + on_hello_message( originating_peer, received_message.as() ); + break; + case graphene::net::core_message_type_enum::connection_accepted_message_type: + on_connection_accepted_message( originating_peer, received_message.as() ); + break; + case graphene::net::core_message_type_enum::connection_rejected_message_type: + on_connection_rejected_message( originating_peer, received_message.as() ); + break; + case graphene::net::core_message_type_enum::address_request_message_type: + on_address_request_message( originating_peer, received_message.as() ); + break; + case graphene::net::core_message_type_enum::address_message_type: + on_address_message( originating_peer, received_message.as() ); + break; + case graphene::net::core_message_type_enum::closing_connection_message_type: + on_closing_connection_message( originating_peer, received_message.as() ); + break; + default: + break; + } + } + + void on_hello_message(graphene::net::peer_connection* originating_peer, + const graphene::net::hello_message& hello_message_received) + { + _node_id = hello_message_received.node_public_key; + if (hello_message_received.user_data.contains("node_id")) + originating_peer->node_id = hello_message_received.user_data["node_id"].as( 1 ); + originating_peer->send_message(graphene::net::connection_rejected_message()); + } + + void on_connection_accepted_message(graphene::net::peer_connection* originating_peer, + const graphene::net::connection_accepted_message& connection_accepted_message_received) + { + _connection_was_rejected = false; + originating_peer->send_message(graphene::net::address_request_message()); + } + + void on_connection_rejected_message( graphene::net::peer_connection* originating_peer, + const graphene::net::connection_rejected_message& connection_rejected_message_received ) + { + _connection_was_rejected = true; + originating_peer->send_message(graphene::net::address_request_message()); + } + + void on_address_request_message(graphene::net::peer_connection* originating_peer, + const graphene::net::address_request_message& address_request_message_received) + { + originating_peer->send_message(graphene::net::address_message()); + } + + + void on_address_message(graphene::net::peer_connection* originating_peer, + const graphene::net::address_message& address_message_received) + { + _peers = address_message_received.addresses; + originating_peer->send_message(graphene::net::closing_connection_message("Thanks for the info")); + _we_closed_connection = true; + } + + void on_closing_connection_message(graphene::net::peer_connection* originating_peer, + const graphene::net::closing_connection_message& closing_connection_message_received) + { + if (_we_closed_connection) + _connection->close_connection(); + else + _peer_closed_connection = true; + } + + void on_connection_closed(graphene::net::peer_connection* originating_peer) override + { + _done = true; + _probe_complete_promise->set_value(); + } + + graphene::net::message get_message_for_item(const graphene::net::item_id& item) override + { + return graphene::net::item_not_available_message(item); + } + + void wait( const fc::microseconds& timeout_us ) + { + _probe_complete_promise->wait( timeout_us ); + } +}; + +int main(int argc, char** argv) +{ + std::queue nodes_to_visit; + std::set nodes_to_visit_set; + std::set nodes_already_visited; + + if ( argc < 3 ) { + std::cerr << "Usage: " << argv[0] << " [ ...]\n"; + exit(1); + } + + const graphene::chain::chain_id_type chain_id( argv[1] ); + for ( int i = 2; i < argc; i++ ) + { + std::string ep(argv[i]); + uint16_t port; + auto pos = ep.find(':'); + if (pos > 0) + port = boost::lexical_cast( ep.substr( pos+1, ep.size() ) ); + else + port = 1776; + for (const auto& addr : fc::resolve( ep.substr( 0, pos > 0 ? pos : ep.size() ), port )) + nodes_to_visit.push( addr ); + } + + fc::path data_dir = fc::temp_directory_path() / ("network_map_" + (fc::string) chain_id); + fc::create_directories(data_dir); + + fc::ip::endpoint seed_node1 = nodes_to_visit.front(); + + fc::ecc::private_key my_node_id = fc::ecc::private_key::generate(); + std::map address_info_by_node_id; + std::map > connections_by_node_id; + std::vector> probes; + + while (!nodes_to_visit.empty() || !probes.empty()) + { + while (!nodes_to_visit.empty()) + { + fc::ip::endpoint remote = nodes_to_visit.front(); + nodes_to_visit.pop(); + nodes_to_visit_set.erase( remote ); + nodes_already_visited.insert( remote ); + + try + { + std::shared_ptr probe(new peer_probe()); + probe->start(remote, my_node_id, chain_id); + probes.emplace_back( std::move( probe ) ); + } + catch (const fc::exception&) + { + std::cerr << "Failed to connect " << fc::string(remote) << " - skipping!" << std::endl; + } + } + + if (!probes.empty()) + { + fc::yield(); + std::vector> running; + for ( auto& probe : probes ) { + if (probe->_probe_complete_promise->error()) + { + std::cerr << fc::string(probe->_remote) << " ran into an error!\n"; + continue; + } + if (!probe->_probe_complete_promise->ready()) + { + running.push_back( probe ); + continue; + } + + if( probe->_node_id.valid() ) + { + graphene::net::address_info this_node_info; + this_node_info.direction = graphene::net::peer_connection_direction::outbound; + this_node_info.firewalled = graphene::net::firewalled_state::not_firewalled; + this_node_info.remote_endpoint = probe->_remote; + this_node_info.node_id = probe->_node_id; + + connections_by_node_id[this_node_info.node_id] = probe->_peers; + if (address_info_by_node_id.find(this_node_info.node_id) == address_info_by_node_id.end()) + address_info_by_node_id[this_node_info.node_id] = this_node_info; + } + + for (const graphene::net::address_info& info : probe->_peers) + { + if (nodes_already_visited.find(info.remote_endpoint) == nodes_already_visited.end() && + info.firewalled == graphene::net::firewalled_state::not_firewalled && + nodes_to_visit_set.find(info.remote_endpoint) == nodes_to_visit_set.end()) + { + nodes_to_visit.push(info.remote_endpoint); + nodes_to_visit_set.insert(info.remote_endpoint); + } + if (address_info_by_node_id.find(info.node_id) == address_info_by_node_id.end()) + address_info_by_node_id[info.node_id] = info; + } + } + probes = std::move( running ); + std::cout << address_info_by_node_id.size() << " checked, " + << probes.size() << " active, " + << nodes_to_visit.size() << " to do\n"; + } + } + + graphene::net::node_id_t seed_node_id; + std::set non_firewalled_nodes_set; + for (const auto& address_info_for_node : address_info_by_node_id) + { + if (address_info_for_node.second.remote_endpoint == seed_node1) + seed_node_id = address_info_for_node.first; + if (address_info_for_node.second.firewalled == graphene::net::firewalled_state::not_firewalled) + non_firewalled_nodes_set.insert(address_info_for_node.first); + } + std::set seed_node_connections; + for (const graphene::net::address_info& info : connections_by_node_id[seed_node_id]) + seed_node_connections.insert(info.node_id); + std::set seed_node_missing_connections; + std::set_difference(non_firewalled_nodes_set.begin(), non_firewalled_nodes_set.end(), + seed_node_connections.begin(), seed_node_connections.end(), + std::inserter(seed_node_missing_connections, seed_node_missing_connections.end())); + seed_node_missing_connections.erase(seed_node_id); + + std::ofstream dot_stream((data_dir / "network_graph.dot").string().c_str()); + + dot_stream << "graph G {\n"; + dot_stream << " // Total " << address_info_by_node_id.size() << " nodes, firewalled: " << (address_info_by_node_id.size() - non_firewalled_nodes_set.size()) + << ", non-firewalled: " << non_firewalled_nodes_set.size() << "\n"; + dot_stream << " // Seed node is " << (std::string)address_info_by_node_id[seed_node_id].remote_endpoint << " id: " << fc::variant( seed_node_id, 1 ).as_string() << "\n"; + dot_stream << " // Seed node is connected to " << connections_by_node_id[seed_node_id].size() << " nodes\n"; + dot_stream << " // Seed node is missing connections to " << seed_node_missing_connections.size() << " non-firewalled nodes:\n"; + for (const graphene::net::node_id_t& id : seed_node_missing_connections) + dot_stream << " // " << (std::string)address_info_by_node_id[id].remote_endpoint << "\n"; + + dot_stream << " layout=\"circo\";\n"; + + for (const auto& address_info_for_node : address_info_by_node_id) + { + dot_stream << " \"" << fc::variant( address_info_for_node.first, 1 ).as_string() << "\"[label=\"" << (std::string)address_info_for_node.second.remote_endpoint << "\""; + if (address_info_for_node.second.firewalled != graphene::net::firewalled_state::not_firewalled) + dot_stream << ",shape=rectangle"; + dot_stream << "];\n"; + } + for (auto& node_and_connections : connections_by_node_id) + for (const graphene::net::address_info& this_connection : node_and_connections.second) + dot_stream << " \"" << fc::variant( node_and_connections.first, 2 ).as_string() << "\" -- \"" << fc::variant( this_connection.node_id, 1 ).as_string() << "\";\n"; + + dot_stream << "}\n"; + + return 0; +} diff --git a/programs/size_checker/main.cpp b/programs/size_checker/main.cpp index de071cfcf4..72d7d85f85 100644 --- a/programs/size_checker/main.cpp +++ b/programs/size_checker/main.cpp @@ -23,11 +23,11 @@ */ #include -#include #include #include -#include +#include +#include #include #include diff --git a/programs/witness_node/CMakeLists.txt b/programs/witness_node/CMakeLists.txt index 0509a0af79..4815879a40 100644 --- a/programs/witness_node/CMakeLists.txt +++ b/programs/witness_node/CMakeLists.txt @@ -11,7 +11,8 @@ endif() # We have to link against graphene_debug_witness because deficiency in our API infrastructure doesn't allow plugins to be fully abstracted #246 target_link_libraries( witness_node - PRIVATE graphene_app graphene_account_history graphene_market_history graphene_witness graphene_chain graphene_debug_witness graphene_egenesis_full fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + +PRIVATE graphene_app graphene_delayed_node graphene_account_history graphene_elasticsearch graphene_market_history graphene_grouped_orders graphene_witness graphene_chain graphene_debug_witness graphene_egenesis_full graphene_snapshot graphene_es_objects fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) install( TARGETS witness_node diff --git a/programs/witness_node/main.cpp b/programs/witness_node/main.cpp index 258a46c85d..cc10ecca01 100644 --- a/programs/witness_node/main.cpp +++ b/programs/witness_node/main.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2015-2017 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -22,29 +22,31 @@ * THE SOFTWARE. */ #include +#include #include +#include #include +#include #include +#include +#include +#include +#include -#include #include #include -#include -#include -#include -#include #include - #include -#include -#include -#include -#include +#include +#include + +#include +#include +#include #include -#include #ifdef WIN32 # include @@ -54,9 +56,6 @@ using namespace graphene; namespace bpo = boost::program_options; - -void write_default_logging_config_to_stream(std::ostream& out); -fc::optional load_logging_config_from_ini_file(const fc::path& config_ini_filename); int main(int argc, char** argv) { app::application* node = new app::application(); @@ -66,14 +65,23 @@ int main(int argc, char** argv) { bpo::options_description cfg_options("Graphene Witness Node"); app_options.add_options() ("help,h", "Print this help message and exit.") - ("data-dir,d", bpo::value()->default_value("witness_node_data_dir"), "Directory containing databases, configuration file, etc.") - ; + ("data-dir,d", bpo::value()->default_value("witness_node_data_dir"), + "Directory containing databases, configuration file, etc.") + ("version,v", "Display version information") + ("plugins", bpo::value()->default_value("witness account_history market_history grouped_orders"), + "Space-separated list of plugins to activate"); bpo::variables_map options; auto witness_plug = node->register_plugin(); + auto debug_witness_plug = node->register_plugin(); auto history_plug = node->register_plugin(); + auto elasticsearch_plug = node->register_plugin(); auto market_history_plug = node->register_plugin(); + auto delayed_plug = node->register_plugin(); + auto snapshot_plug = node->register_plugin(); + auto es_objects_plug = node->register_plugin(); + auto grouped_orders_plug = node->register_plugin(); try { @@ -85,15 +93,39 @@ int main(int argc, char** argv) { } catch (const boost::program_options::error& e) { - std::cerr << "Error parsing command line: " << e.what() << "\n"; - return 1; + std::cerr << "Error parsing command line: " << e.what() << "\n"; + return 1; + } + + std::set plugins; + boost::split(plugins, options.at("plugins").as(), [](char c){return c == ' ';}); + + if(plugins.count("account_history") && plugins.count("elasticsearch")) { + std::cerr << "Plugin conflict: Cannot load both account_history plugin and elasticsearch plugin\n"; + return 1; } + std::for_each(plugins.begin(), plugins.end(), [node](const std::string& plug) mutable { + if (!plug.empty()) { + node->enable_plugin(plug); + } + }); + if( options.count("help") ) { std::cout << app_options << "\n"; return 0; } + if( options.count("version") ) + { + std::cout << "Version: " << graphene::utilities::git_revision_description << "\n"; + std::cout << "SHA: " << graphene::utilities::git_revision_sha << "\n"; + std::cout << "Timestamp: " << fc::get_approximate_relative_time_string(fc::time_point_sec(graphene::utilities::git_revision_unix_timestamp)) << "\n"; + std::cout << "SSL: " << OPENSSL_VERSION_TEXT << "\n"; + std::cout << "Boost: " << boost::replace_all_copy(std::string(BOOST_LIB_VERSION), "_", ".") << "\n"; + std::cout << "Websocket++: " << websocketpp::major_version << "." << websocketpp::minor_version << "." << websocketpp::patch_version << "\n"; + return 0; + } fc::path data_dir; if( options.count("data-dir") ) @@ -102,62 +134,10 @@ int main(int argc, char** argv) { if( data_dir.is_relative() ) data_dir = fc::current_path() / data_dir; } - - fc::path config_ini_path = data_dir / "config.ini"; - if( fc::exists(config_ini_path) ) - { - // get the basic options - bpo::store(bpo::parse_config_file(config_ini_path.preferred_string().c_str(), cfg_options, true), options); - - // try to get logging options from the config file. - try - { - fc::optional logging_config = load_logging_config_from_ini_file(config_ini_path); - if (logging_config) - fc::configure_logging(*logging_config); - } - catch (const fc::exception&) - { - wlog("Error parsing logging config from config file ${config}, using default config", ("config", config_ini_path.preferred_string())); - } - } - else - { - ilog("Writing new config file at ${path}", ("path", config_ini_path)); - if( !fc::exists(data_dir) ) - fc::create_directories(data_dir); - - std::ofstream out_cfg(config_ini_path.preferred_string()); - for( const boost::shared_ptr od : cfg_options.options() ) - { - if( !od->description().empty() ) - out_cfg << "# " << od->description() << "\n"; - boost::any store; - if( !od->semantic()->apply_default(store) ) - out_cfg << "# " << od->long_name() << " = \n"; - else { - auto example = od->format_parameter(); - if( example.empty() ) - // This is a boolean switch - out_cfg << od->long_name() << " = " << "false\n"; - else { - // The string is formatted "arg (=)" - example.erase(0, 6); - example.erase(example.length()-1); - out_cfg << od->long_name() << " = " << example << "\n"; - } - } - out_cfg << "\n"; - } - write_default_logging_config_to_stream(out_cfg); - out_cfg.close(); - // read the default logging config we just wrote out to the file and start using it - fc::optional logging_config = load_logging_config_from_ini_file(config_ini_path); - if (logging_config) - fc::configure_logging(*logging_config); - } + app::load_configuration_options(data_dir, cfg_options, options); bpo::notify(options); + node->initialize(data_dir, options); node->initialize_plugins( options ); @@ -176,7 +156,7 @@ int main(int argc, char** argv) { exit_promise->set_value(signal); }, SIGTERM); - ilog("Started witness node on a chain with ${h} blocks.", ("h", node->chain_database()->head_block_num())); + ilog("Started BitShares node on a chain with ${h} blocks.", ("h", node->chain_database()->head_block_num())); ilog("Chain ID is ${id}", ("id", node->chain_database()->get_chain_id()) ); int signal = exit_promise->wait(); @@ -184,7 +164,7 @@ int main(int argc, char** argv) { node->shutdown_plugins(); node->shutdown(); delete node; - return 0; + return EXIT_SUCCESS; } catch( const fc::exception& e ) { // deleting the node can yield, so do this outside the exception handler unhandled_exception = e; @@ -195,113 +175,7 @@ int main(int argc, char** argv) { elog("Exiting with error:\n${e}", ("e", unhandled_exception->to_detail_string())); node->shutdown(); delete node; - return 1; + return EXIT_FAILURE; } } -// logging config is too complicated to be parsed by boost::program_options, -// so we do it by hand -// -// Currently, you can only specify the filenames and logging levels, which -// are all most users would want to change. At a later time, options can -// be added to control rotation intervals, compression, and other seldom- -// used features -void write_default_logging_config_to_stream(std::ostream& out) -{ - out << "# declare an appender named \"stderr\" that writes messages to the console\n" - "[log.console_appender.stderr]\n" - "stream=std_error\n\n" - "# declare an appender named \"p2p\" that writes messages to p2p.log\n" - "[log.file_appender.p2p]\n" - "filename=logs/p2p/p2p.log\n" - "# filename can be absolute or relative to this config file\n\n" - "# route any messages logged to the default logger to the \"stderr\" logger we\n" - "# declared above, if they are info level are higher\n" - "[logger.default]\n" - "level=info\n" - "appenders=stderr\n\n" - "# route messages sent to the \"p2p\" logger to the p2p appender declared above\n" - "[logger.p2p]\n" - "level=debug\n" - "appenders=p2p\n\n"; -} - -fc::optional load_logging_config_from_ini_file(const fc::path& config_ini_filename) -{ - try - { - fc::logging_config logging_config; - bool found_logging_config = false; - - boost::property_tree::ptree config_ini_tree; - boost::property_tree::ini_parser::read_ini(config_ini_filename.preferred_string().c_str(), config_ini_tree); - for (const auto& section : config_ini_tree) - { - const std::string& section_name = section.first; - const boost::property_tree::ptree& section_tree = section.second; - - const std::string console_appender_section_prefix = "log.console_appender."; - const std::string file_appender_section_prefix = "log.file_appender."; - const std::string logger_section_prefix = "logger."; - - if (boost::starts_with(section_name, console_appender_section_prefix)) - { - std::string console_appender_name = section_name.substr(console_appender_section_prefix.length()); - std::string stream_name = section_tree.get("stream"); - - // construct a default console appender config here - // stdout/stderr will be taken from ini file, everything else hard-coded here - fc::console_appender::config console_appender_config; - console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::debug, - fc::console_appender::color::green)); - console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::warn, - fc::console_appender::color::brown)); - console_appender_config.level_colors.emplace_back( - fc::console_appender::level_color(fc::log_level::error, - fc::console_appender::color::cyan)); - console_appender_config.stream = fc::variant(stream_name).as(); - logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config))); - found_logging_config = true; - } - else if (boost::starts_with(section_name, file_appender_section_prefix)) - { - std::string file_appender_name = section_name.substr(file_appender_section_prefix.length()); - fc::path file_name = section_tree.get("filename"); - if (file_name.is_relative()) - file_name = fc::absolute(config_ini_filename).parent_path() / file_name; - - - // construct a default file appender config here - // filename will be taken from ini file, everything else hard-coded here - fc::file_appender::config file_appender_config; - file_appender_config.filename = file_name; - file_appender_config.flush = true; - file_appender_config.rotate = true; - file_appender_config.rotation_interval = fc::hours(1); - file_appender_config.rotation_limit = fc::days(1); - logging_config.appenders.push_back(fc::appender_config(file_appender_name, "file", fc::variant(file_appender_config))); - found_logging_config = true; - } - else if (boost::starts_with(section_name, logger_section_prefix)) - { - std::string logger_name = section_name.substr(logger_section_prefix.length()); - std::string level_string = section_tree.get("level"); - std::string appenders_string = section_tree.get("appenders"); - fc::logger_config logger_config(logger_name); - logger_config.level = fc::variant(level_string).as(); - boost::split(logger_config.appenders, appenders_string, - boost::is_any_of(" ,"), - boost::token_compress_on); - logging_config.loggers.push_back(logger_config); - found_logging_config = true; - } - } - if (found_logging_config) - return logging_config; - else - return fc::optional(); - } - FC_RETHROW_EXCEPTIONS(warn, "") -} diff --git a/sonar-project.properties b/sonar-project.properties new file mode 100644 index 0000000000..a4487be06e --- /dev/null +++ b/sonar-project.properties @@ -0,0 +1,14 @@ +sonar.projectKey=BitShares_Core +sonar.projectName=BitShares Core + +sonar.links.homepage=https://bitshares.org +sonar.links.ci=https://travis-ci.org/bitshares/bitshares-core/ +sonar.links.issue=https://github.com/bitshares/bitshares-core/issues +sonar.links.scm=https://github.com/bitshares/bitshares-core/tree/master + +sonar.tests=tests +sonar.exclusions=programs/build_helper/**/*,libraries/fc/**/*,libraries/egenesis/egenesis_full.cpp +sonar.sources=libraries,programs +sonar.cfamily.build-wrapper-output=bw-output +sonar.cfamily.gcov.reportsPath=. +sonar.cfamily.threads=2 diff --git a/testnet-shared-accounts.txt b/testnet-shared-accounts.txt deleted file mode 100644 index 99392365ca..0000000000 --- a/testnet-shared-accounts.txt +++ /dev/null @@ -1,556 +0,0 @@ - "initial_accounts": [{ - "name": "init0", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init1", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init2", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init3", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init4", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init5", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init6", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init7", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init8", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init9", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init10", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init11", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init12", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init13", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init14", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init15", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init16", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init17", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init18", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init19", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init20", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init21", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init22", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init23", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init24", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init25", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init26", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init27", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init28", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init29", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init30", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init31", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init32", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init33", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init34", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init35", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init36", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init37", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init38", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init39", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init40", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init41", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init42", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init43", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init44", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init45", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init46", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init47", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init48", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init49", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init50", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init51", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init52", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init53", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init54", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init55", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init56", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init57", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init58", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init59", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init60", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init61", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init62", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init63", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init64", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init65", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init66", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init67", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init68", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init69", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init70", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init71", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init72", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init73", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init74", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init75", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init76", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init77", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init78", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init79", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init80", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init81", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init82", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init83", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init84", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init85", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init86", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init87", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init88", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init89", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init90", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init91", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init92", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init93", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init94", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init95", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init96", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init97", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init98", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init99", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "init100", - "owner_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "active_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "is_lifetime_member": true - },{ - "name": "dummy0", - "owner_key": "BTS6qkMe8pHmQ4zUetLV1bbVKoQJYTNb1fSUbkQzuzpscYhonWpgk", - "active_key": "BTS6qkMe8pHmQ4zUetLV1bbVKoQJYTNb1fSUbkQzuzpscYhonWpgk", - "is_lifetime_member": true - },{ - "name": "dummy1", - "owner_key": "BTS7wXsTzBBR2QEetjrgxcSmN7Kuzey3RAzQWNNHwbPQsKYxkP6fp", - "active_key": "BTS7wXsTzBBR2QEetjrgxcSmN7Kuzey3RAzQWNNHwbPQsKYxkP6fp", - "is_lifetime_member": true - },{ - "name": "dummy2", - "owner_key": "BTS7rzifzfJxS8RWhev9aU8HDYoJi1EGwJRHG9B2fJKxnZAiF2Rsh", - "active_key": "BTS7rzifzfJxS8RWhev9aU8HDYoJi1EGwJRHG9B2fJKxnZAiF2Rsh", - "is_lifetime_member": true - },{ - "name": "dummy3", - "owner_key": "BTS6QZdcwFEFMtHsfW27YBGTv9KLaLTvgx5wgGrPHeUxDTrYEQJ2d", - "active_key": "BTS6QZdcwFEFMtHsfW27YBGTv9KLaLTvgx5wgGrPHeUxDTrYEQJ2d", - "is_lifetime_member": true - },{ - "name": "dummy4", - "owner_key": "BTS7q5MqhSP2a6CRTWaJk77ZcGdpnv14JbT4cVzbXaoAsWJoCxFJG", - "active_key": "BTS7q5MqhSP2a6CRTWaJk77ZcGdpnv14JbT4cVzbXaoAsWJoCxFJG", - "is_lifetime_member": true - },{ - "name": "dummy5", - "owner_key": "BTS5sRXxgDCnteHLUS623xtxJM5WKKVygwDMzEso6LigwxvprJqBA", - "active_key": "BTS5sRXxgDCnteHLUS623xtxJM5WKKVygwDMzEso6LigwxvprJqBA", - "is_lifetime_member": true - },{ - "name": "dummy6", - "owner_key": "BTS5V4HEQJbVbMjUWASeknQ42NT3NP9bZaygt83XMuvy6v4QMJuSP", - "active_key": "BTS5V4HEQJbVbMjUWASeknQ42NT3NP9bZaygt83XMuvy6v4QMJuSP", - "is_lifetime_member": true - },{ - "name": "dummy7", - "owner_key": "BTS86ukuPAufzKouerZf1dCxjVSmxQPA5kLwvnYEjn9GRqi5qXBop", - "active_key": "BTS86ukuPAufzKouerZf1dCxjVSmxQPA5kLwvnYEjn9GRqi5qXBop", - "is_lifetime_member": true - },{ - "name": "dummy8", - "owner_key": "BTS7Sdg3kQuz2pPT8mA8Yr3mkBe7zr6293mnBmoR36z9xxtRdiMmJ", - "active_key": "BTS7Sdg3kQuz2pPT8mA8Yr3mkBe7zr6293mnBmoR36z9xxtRdiMmJ", - "is_lifetime_member": true - },{ - "name": "dummy9", - "owner_key": "BTS5WCj1mMiiqEE4QRs7xhaFfSaiFroejUp3GuZE9wvfue9nxhPPn", - "active_key": "BTS5WCj1mMiiqEE4QRs7xhaFfSaiFroejUp3GuZE9wvfue9nxhPPn", - "is_lifetime_member": true - },{ diff --git a/testnet-shared-balances.txt b/testnet-shared-balances.txt deleted file mode 100644 index dc9061fa77..0000000000 --- a/testnet-shared-balances.txt +++ /dev/null @@ -1,41 +0,0 @@ - "initial_balances": [{ - "owner": "BTSHYhQcrjVg5kBzCoeeD38eQdncCC5pBgee", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTSPgQZg5929ht1NBdEvsGKqoQ7buRu3nKf4", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTSC9zrLXSAPUQaVmQPk1S9dMqSzT7jPqYU7", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTS93aQPtbbkXwaSjtHaREsNVcCvbfHo93aZ", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTS6RM4UfsYFPDuhbmgkvDS9ip8Kvqundvyk", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTSNVkZXdqWWSzqHVxvfetMe347is6kEkC4K", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTS5GHzWZ64Luoajqsz6JGjTKVMgWYkGV9SQ", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTSDCVRFez92bW9doRLjnFCKLJnpM58mgmMb", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTS5CCdX3JYLBptYMuCjbsezqGYzN9vG9JCu", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ - "owner": "BTSEQ3yQdr3EMDL2eRqGiceMCpoanaW16Puw", - "asset_symbol": "CORE", - "amount": 100000000000 - },{ diff --git a/testnet-shared-committee-members.txt b/testnet-shared-committee-members.txt deleted file mode 100644 index 7d7ae11b04..0000000000 --- a/testnet-shared-committee-members.txt +++ /dev/null @@ -1,204 +0,0 @@ - "initial_committee_candidates": [{ - "owner_name": "init0" - },{ - "owner_name": "init1" - },{ - "owner_name": "init2" - },{ - "owner_name": "init3" - },{ - "owner_name": "init4" - },{ - "owner_name": "init5" - },{ - "owner_name": "init6" - },{ - "owner_name": "init7" - },{ - "owner_name": "init8" - },{ - "owner_name": "init9" - },{ - "owner_name": "init10" - },{ - "owner_name": "init11" - },{ - "owner_name": "init12" - },{ - "owner_name": "init13" - },{ - "owner_name": "init14" - },{ - "owner_name": "init15" - },{ - "owner_name": "init16" - },{ - "owner_name": "init17" - },{ - "owner_name": "init18" - },{ - "owner_name": "init19" - },{ - "owner_name": "init20" - },{ - "owner_name": "init21" - },{ - "owner_name": "init22" - },{ - "owner_name": "init23" - },{ - "owner_name": "init24" - },{ - "owner_name": "init25" - },{ - "owner_name": "init26" - },{ - "owner_name": "init27" - },{ - "owner_name": "init28" - },{ - "owner_name": "init29" - },{ - "owner_name": "init30" - },{ - "owner_name": "init31" - },{ - "owner_name": "init32" - },{ - "owner_name": "init33" - },{ - "owner_name": "init34" - },{ - "owner_name": "init35" - },{ - "owner_name": "init36" - },{ - "owner_name": "init37" - },{ - "owner_name": "init38" - },{ - "owner_name": "init39" - },{ - "owner_name": "init40" - },{ - "owner_name": "init41" - },{ - "owner_name": "init42" - },{ - "owner_name": "init43" - },{ - "owner_name": "init44" - },{ - "owner_name": "init45" - },{ - "owner_name": "init46" - },{ - "owner_name": "init47" - },{ - "owner_name": "init48" - },{ - "owner_name": "init49" - },{ - "owner_name": "init50" - },{ - "owner_name": "init51" - },{ - "owner_name": "init52" - },{ - "owner_name": "init53" - },{ - "owner_name": "init54" - },{ - "owner_name": "init55" - },{ - "owner_name": "init56" - },{ - "owner_name": "init57" - },{ - "owner_name": "init58" - },{ - "owner_name": "init59" - },{ - "owner_name": "init60" - },{ - "owner_name": "init61" - },{ - "owner_name": "init62" - },{ - "owner_name": "init63" - },{ - "owner_name": "init64" - },{ - "owner_name": "init65" - },{ - "owner_name": "init66" - },{ - "owner_name": "init67" - },{ - "owner_name": "init68" - },{ - "owner_name": "init69" - },{ - "owner_name": "init70" - },{ - "owner_name": "init71" - },{ - "owner_name": "init72" - },{ - "owner_name": "init73" - },{ - "owner_name": "init74" - },{ - "owner_name": "init75" - },{ - "owner_name": "init76" - },{ - "owner_name": "init77" - },{ - "owner_name": "init78" - },{ - "owner_name": "init79" - },{ - "owner_name": "init80" - },{ - "owner_name": "init81" - },{ - "owner_name": "init82" - },{ - "owner_name": "init83" - },{ - "owner_name": "init84" - },{ - "owner_name": "init85" - },{ - "owner_name": "init86" - },{ - "owner_name": "init87" - },{ - "owner_name": "init88" - },{ - "owner_name": "init89" - },{ - "owner_name": "init90" - },{ - "owner_name": "init91" - },{ - "owner_name": "init92" - },{ - "owner_name": "init93" - },{ - "owner_name": "init94" - },{ - "owner_name": "init95" - },{ - "owner_name": "init96" - },{ - "owner_name": "init97" - },{ - "owner_name": "init98" - },{ - "owner_name": "init99" - },{ - "owner_name": "init100" - } - ], diff --git a/testnet-shared-private-keys.txt b/testnet-shared-private-keys.txt deleted file mode 100644 index e2a8d7ddc0..0000000000 --- a/testnet-shared-private-keys.txt +++ /dev/null @@ -1,10 +0,0 @@ -5KCNDLVGqvX8p3GcMFun9sMe6XbMvycVTm4bGrkB5aZGWCbAAtr -5HvFQ1bcAWk8H1A2qXj1AqSNp93GUAb6b2w5TVfLb1jWL6yNF3f -5JSxv2kgaBSm9nGseRNhLhgEKTBmoKJ5CkgLbbk5RW4RBCNsLJC -5K5E2TQtrodDFzsqPq3oVFi9rVX15AN8sLE3iTHfVsX1b49y49J -5HxC3fwN7VDZXKVkbbX3SzCczh18Fetx8TXBfJ3z3ovDUSPKvVd -5KSr4w978PDanQDYtftarcfJVvGe4wedYb1sYbdH6HNpi15heRa -5Kan4si6qWvDVpZuqug4c6KQH4zkvDhwspaGQiFKYniJv6qji6t -5KcZri5DDsMcDp1DjNeMkZSijkWurPoAoR7gBKTnnetNQ9CpXoJ -5K5TRZyEhC6GPgi57t5FhiSMRGVTHEbwXngbBEtCA41gM8LPFhF -5KXVG4oP4Vj3RawRpta79UFAg7pWp17FGf4DnrKfkr69ELytDMv diff --git a/testnet-shared-vesting-balances.txt b/testnet-shared-vesting-balances.txt deleted file mode 100644 index 1dd0023014..0000000000 --- a/testnet-shared-vesting-balances.txt +++ /dev/null @@ -1,71 +0,0 @@ - "initial_vesting_balances": [{ - "owner": "BTSHYhQcrjVg5kBzCoeeD38eQdncCC5pBgee", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTSPgQZg5929ht1NBdEvsGKqoQ7buRu3nKf4", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTSC9zrLXSAPUQaVmQPk1S9dMqSzT7jPqYU7", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTS93aQPtbbkXwaSjtHaREsNVcCvbfHo93aZ", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTS6RM4UfsYFPDuhbmgkvDS9ip8Kvqundvyk", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTSNVkZXdqWWSzqHVxvfetMe347is6kEkC4K", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTS5GHzWZ64Luoajqsz6JGjTKVMgWYkGV9SQ", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTSDCVRFez92bW9doRLjnFCKLJnpM58mgmMb", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTS5CCdX3JYLBptYMuCjbsezqGYzN9vG9JCu", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ - "owner": "BTSEQ3yQdr3EMDL2eRqGiceMCpoanaW16Puw", - "asset_symbol": "BTS", - "amount": 50000000000, - "begin_timestamp": "2014-11-06T00:00:00", - "vesting_duration_seconds": 63072000, - "begin_balance": 50000000000 - },{ diff --git a/testnet-shared-witnesses.txt b/testnet-shared-witnesses.txt deleted file mode 100644 index c09b132961..0000000000 --- a/testnet-shared-witnesses.txt +++ /dev/null @@ -1,304 +0,0 @@ - "initial_witness_candidates": [{ - "owner_name": "init0", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init1", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init2", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init3", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init4", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init5", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init6", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init7", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init8", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init9", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init10", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init11", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init12", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init13", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init14", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init15", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init16", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init17", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init18", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init19", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init20", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init21", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init22", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init23", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init24", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init25", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init26", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init27", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init28", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init29", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init30", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init31", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init32", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init33", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init34", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init35", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init36", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init37", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init38", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init39", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init40", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init41", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init42", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init43", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init44", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init45", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init46", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init47", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init48", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init49", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init50", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init51", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init52", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init53", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init54", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init55", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init56", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init57", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init58", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init59", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init60", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init61", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init62", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init63", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init64", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init65", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init66", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init67", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init68", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init69", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init70", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init71", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init72", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init73", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init74", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init75", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init76", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init77", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init78", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init79", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init80", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init81", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init82", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init83", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init84", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init85", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init86", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init87", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init88", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init89", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init90", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init91", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init92", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init93", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init94", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init95", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init96", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init97", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init98", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init99", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ - "owner_name": "init100", - "block_signing_key": "GPH6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - },{ diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index b03d58a802..ad7a10e9aa 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -7,26 +7,41 @@ if( GPERFTOOLS_FOUND ) endif() file(GLOB UNIT_TESTS "tests/*.cpp") -add_executable( chain_test ${UNIT_TESTS} ${COMMON_SOURCES} ) -target_link_libraries( chain_test graphene_chain graphene_app graphene_account_history graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +add_executable( chain_test ${COMMON_SOURCES} ${UNIT_TESTS} ) +target_link_libraries( chain_test graphene_chain graphene_app graphene_witness graphene_account_history graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) if(MSVC) set_source_files_properties( tests/serialization_tests.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) + set_source_files_properties( tests/common/database_fixture.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) +file(GLOB SLOW_UNIT_TESTS "slow_tests/*.cpp") +add_executable( slow_chain_test ${SLOW_UNIT_TESTS} ${COMMON_SOURCES} ) +target_link_libraries( slow_chain_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) + file(GLOB PERFORMANCE_TESTS "performance/*.cpp") -add_executable( performance_test ${PERFORMANCE_TESTS} ${COMMON_SOURCES} ) -target_link_libraries( performance_test graphene_chain graphene_app graphene_account_history graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +add_executable( performance_test ${COMMON_SOURCES} ${PERFORMANCE_TESTS} ) +target_link_libraries( performance_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB BENCH_MARKS "benchmarks/*.cpp") -add_executable( chain_bench ${BENCH_MARKS} ${COMMON_SOURCES} ) -target_link_libraries( chain_bench graphene_chain graphene_app graphene_account_history graphene_time graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +add_executable( chain_bench ${COMMON_SOURCES} ${BENCH_MARKS} ) +target_link_libraries( chain_bench graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB APP_SOURCES "app/*.cpp") add_executable( app_test ${APP_SOURCES} ) -target_link_libraries( app_test graphene_app graphene_account_history graphene_net graphene_chain graphene_time graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( app_test graphene_app graphene_account_history graphene_net graphene_witness graphene_chain graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) + +file(GLOB CLI_SOURCES "cli/*.cpp") +add_executable( cli_test ${CLI_SOURCES} ) +if(WIN32) + list(APPEND PLATFORM_SPECIFIC_LIBS ws2_32) +endif() +target_link_libraries( cli_test graphene_app graphene_wallet graphene_witness graphene_account_history graphene_net graphene_chain graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +if(MSVC) + set_source_files_properties( cli/main.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) -file(GLOB INTENSE_SOURCES "intense/*.cpp") -add_executable( intense_test ${INTENSE_SOURCES} ${COMMON_SOURCES} ) -target_link_libraries( intense_test graphene_chain graphene_app graphene_account_history graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +file(GLOB ES_SOURCES "elasticsearch/*.cpp") +add_executable( es_test ${COMMON_SOURCES} ${ES_SOURCES} ) +target_link_libraries( es_test graphene_chain graphene_app graphene_account_history graphene_elasticsearch graphene_es_objects graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) add_subdirectory( generate_empty_blocks ) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index 50ed9f07b7..b85b847ae5 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -23,62 +23,239 @@ */ #include #include +#include #include -#include - #include #include +#include +#include +#include #include -#include +#include +#include #include #define BOOST_TEST_MODULE Test Application #include +#include "../common/genesis_file_util.hpp" + using namespace graphene; +namespace bpo = boost::program_options; + +namespace fc { + extern std::unordered_map &get_logger_map(); + extern std::unordered_map &get_appender_map(); +} + +BOOST_AUTO_TEST_CASE(load_configuration_options_test_config_logging_files_created) +{ + fc::temp_directory app_dir(graphene::utilities::temp_directory_path()); + auto dir = app_dir.path(); + auto config_ini_file = dir / "config.ini"; + auto logging_ini_file = dir / "logging.ini"; + + /// create default config options + auto node = new app::application(); + bpo::options_description cli, cfg; + node->set_program_options(cli, cfg); + bpo::options_description cfg_options("Graphene Witness Node"); + cfg_options.add(cfg); + + /// check preconditions + BOOST_CHECK(!fc::exists(config_ini_file)); + BOOST_CHECK(!fc::exists(logging_ini_file)); + + bpo::variables_map options; + app::load_configuration_options(dir, cfg_options, options); + + /// check post-conditions + BOOST_CHECK(fc::exists(config_ini_file)); + BOOST_CHECK(fc::exists(logging_ini_file)); + BOOST_CHECK_GT(fc::file_size(config_ini_file), 0u); + BOOST_CHECK_GT(fc::file_size(logging_ini_file), 0u); +} + +BOOST_AUTO_TEST_CASE(load_configuration_options_test_config_ini_options) +{ + fc::temp_directory app_dir(graphene::utilities::temp_directory_path()); + auto dir = app_dir.path(); + auto config_ini_file = dir / "config.ini"; + auto logging_ini_file = dir / "logging.ini"; + + /// create config.ini + bpo::options_description cfg_options("config.ini options"); + cfg_options.add_options() + ("option1", bpo::value(), "") + ("option2", bpo::value(), "") + ; + std::ofstream out(config_ini_file.preferred_string()); + out << "option1=is present\n" + "option2=1\n\n"; + out.close(); + + /// check preconditions + BOOST_CHECK(fc::exists(config_ini_file)); + BOOST_CHECK(!fc::exists(logging_ini_file)); + + bpo::variables_map options; + app::load_configuration_options(dir, cfg_options, options); + + /// check the options values are parsed into the output map + BOOST_CHECK(!options.empty()); + BOOST_CHECK_EQUAL(options.count("option1"), 1u); + BOOST_CHECK_EQUAL(options.count("option2"), 1u); + BOOST_CHECK_EQUAL(options["option1"].as(), "is present"); + BOOST_CHECK_EQUAL(options["option2"].as(), 1); + + /// when the config.ini exists and doesn't contain logging configuration while the logging.ini doesn't exist + /// the logging.ini is not created + BOOST_CHECK(!fc::exists(logging_ini_file)); +} + +BOOST_AUTO_TEST_CASE(load_configuration_options_test_logging_ini_options) +{ + fc::temp_directory app_dir(graphene::utilities::temp_directory_path()); + auto dir = app_dir.path(); + auto config_ini_file = dir / "config.ini"; + auto logging_ini_file = dir / "logging.ini"; + + /// create logging.ini + /// configure exactly one logger and appender + std::ofstream out(logging_ini_file.preferred_string()); + out << "[log.file_appender.default]\n" + "filename=test.log\n\n" + "[logger.default]\n" + "level=info\n" + "appenders=default\n\n" + ; + out.close(); + + /// clear logger and appender state + fc::get_logger_map().clear(); + fc::get_appender_map().clear(); + BOOST_CHECK(fc::get_logger_map().empty()); + BOOST_CHECK(fc::get_appender_map().empty()); + + bpo::options_description cfg_options("empty"); + bpo::variables_map options; + app::load_configuration_options(dir, cfg_options, options); + + /// check the options values are parsed into the output map + /// this is a little bit tricky since load_configuration_options() doesn't provide output variable for logging_config + auto logger_map = fc::get_logger_map(); + auto appender_map = fc::get_appender_map(); + BOOST_CHECK_EQUAL(logger_map.size(), 1u); + BOOST_CHECK(logger_map.count("default")); + BOOST_CHECK_EQUAL(appender_map.size(), 1u); + BOOST_CHECK(appender_map.count("default")); +} + +BOOST_AUTO_TEST_CASE(load_configuration_options_test_legacy_config_ini_options) +{ + fc::temp_directory app_dir(graphene::utilities::temp_directory_path()); + auto dir = app_dir.path(); + auto config_ini_file = dir / "config.ini"; + auto logging_ini_file = dir / "logging.ini"; + + /// create config.ini + bpo::options_description cfg_options("config.ini options"); + cfg_options.add_options() + ("option1", bpo::value(), "") + ("option2", bpo::value(), "") + ; + std::ofstream out(config_ini_file.preferred_string()); + out << "option1=is present\n" + "option2=1\n\n" + "[log.file_appender.default]\n" + "filename=test.log\n\n" + "[logger.default]\n" + "level=info\n" + "appenders=default\n\n" + ; + out.close(); + + /// clear logger and appender state + fc::get_logger_map().clear(); + fc::get_appender_map().clear(); + BOOST_CHECK(fc::get_logger_map().empty()); + BOOST_CHECK(fc::get_appender_map().empty()); + + bpo::variables_map options; + app::load_configuration_options(dir, cfg_options, options); + + /// check logging.ini not created + BOOST_CHECK(!fc::exists(logging_ini_file)); + + /// check the options values are parsed into the output map + BOOST_CHECK(!options.empty()); + BOOST_CHECK_EQUAL(options.count("option1"), 1u); + BOOST_CHECK_EQUAL(options.count("option2"), 1u); + BOOST_CHECK_EQUAL(options["option1"].as(), "is present"); + BOOST_CHECK_EQUAL(options["option2"].as(), 1); + + auto logger_map = fc::get_logger_map(); + auto appender_map = fc::get_appender_map(); + BOOST_CHECK_EQUAL(logger_map.size(), 1u); + BOOST_CHECK(logger_map.count("default")); + BOOST_CHECK_EQUAL(appender_map.size(), 1u); + BOOST_CHECK(appender_map.count("default")); +} +///////////// +/// @brief create a 2 node network +///////////// BOOST_AUTO_TEST_CASE( two_node_network ) { using namespace graphene::chain; using namespace graphene::app; try { - BOOST_TEST_MESSAGE( "Creating temporary files" ); + BOOST_TEST_MESSAGE( "Creating and initializing app1" ); fc::temp_directory app_dir( graphene::utilities::temp_directory_path() ); - fc::temp_directory app2_dir( graphene::utilities::temp_directory_path() ); - fc::temp_file genesis_json; - - BOOST_TEST_MESSAGE( "Creating and initializing app1" ); graphene::app::application app1; - app1.register_plugin(); + app1.register_plugin< graphene::account_history::account_history_plugin>(); + app1.register_plugin< graphene::market_history::market_history_plugin >(); + app1.register_plugin< graphene::witness_plugin::witness_plugin >(); + app1.register_plugin< graphene::grouped_orders::grouped_orders_plugin>(); + app1.startup_plugins(); boost::program_options::variables_map cfg; cfg.emplace("p2p-endpoint", boost::program_options::variable_value(string("127.0.0.1:3939"), false)); + cfg.emplace("genesis-json", boost::program_options::variable_value(create_genesis_file(app_dir), false)); + cfg.emplace("seed-nodes", boost::program_options::variable_value(string("[]"), false)); app1.initialize(app_dir.path(), cfg); + BOOST_TEST_MESSAGE( "Starting app1 and waiting 500 ms" ); + app1.startup(); + fc::usleep(fc::milliseconds(500)); BOOST_TEST_MESSAGE( "Creating and initializing app2" ); + fc::temp_directory app2_dir( graphene::utilities::temp_directory_path() ); graphene::app::application app2; app2.register_plugin(); + app2.register_plugin< graphene::market_history::market_history_plugin >(); + app2.register_plugin< graphene::witness_plugin::witness_plugin >(); + app2.register_plugin< graphene::grouped_orders::grouped_orders_plugin>(); + app2.startup_plugins(); auto cfg2 = cfg; cfg2.erase("p2p-endpoint"); cfg2.emplace("p2p-endpoint", boost::program_options::variable_value(string("127.0.0.1:4040"), false)); + cfg2.emplace("genesis-json", boost::program_options::variable_value(create_genesis_file(app_dir), false)); cfg2.emplace("seed-node", boost::program_options::variable_value(vector{"127.0.0.1:3939"}, false)); + cfg2.emplace("seed-nodes", boost::program_options::variable_value(string("[]"), false)); app2.initialize(app2_dir.path(), cfg2); - BOOST_TEST_MESSAGE( "Starting app1 and waiting 500 ms" ); - app1.startup(); - fc::usleep(fc::milliseconds(500)); BOOST_TEST_MESSAGE( "Starting app2 and waiting 500 ms" ); app2.startup(); fc::usleep(fc::milliseconds(500)); - BOOST_REQUIRE_EQUAL(app1.p2p_node()->get_connection_count(), 1); + BOOST_REQUIRE_EQUAL(app1.p2p_node()->get_connection_count(), 1u); BOOST_CHECK_EQUAL(std::string(app1.p2p_node()->get_connected_peers().front().host.get_address()), "127.0.0.1"); BOOST_TEST_MESSAGE( "app1 and app2 successfully connected" ); @@ -89,7 +266,7 @@ BOOST_AUTO_TEST_CASE( two_node_network ) BOOST_CHECK_EQUAL( db2->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 0 ); BOOST_TEST_MESSAGE( "Creating transfer tx" ); - graphene::chain::signed_transaction trx; + graphene::chain::precomputable_transaction trx; { account_id_type nathan_id = db2->get_index_type().indices().get().find( "nathan" )->id; fc::ecc::private_key nathan_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan"))); @@ -143,8 +320,8 @@ BOOST_AUTO_TEST_CASE( two_node_network ) fc::usleep(fc::milliseconds(500)); BOOST_TEST_MESSAGE( "Verifying nodes are still connected" ); - BOOST_CHECK_EQUAL(app1.p2p_node()->get_connection_count(), 1); - BOOST_CHECK_EQUAL(app1.chain_database()->head_block_num(), 1); + BOOST_CHECK_EQUAL(app1.p2p_node()->get_connection_count(), 1u); + BOOST_CHECK_EQUAL(app1.chain_database()->head_block_num(), 1u); BOOST_TEST_MESSAGE( "Checking GRAPHENE_NULL_ACCOUNT has balance" ); } catch( fc::exception& e ) { @@ -152,3 +329,22 @@ BOOST_AUTO_TEST_CASE( two_node_network ) throw; } } + +// a contrived example to test the breaking out of application_impl to a header file + +#include "../../libraries/app/application_impl.hxx" + +BOOST_AUTO_TEST_CASE(application_impl_breakout) { + class test_impl : public graphene::app::detail::application_impl { + // override the constructor, just to test that we can + public: + test_impl() : application_impl(nullptr) {} + bool has_item(const net::item_id& id) override { + return true; + } + }; + + test_impl impl; + graphene::net::item_id id; + BOOST_CHECK(impl.has_item(id)); +} diff --git a/tests/benchmarks/genesis_allocation.cpp b/tests/benchmarks/genesis_allocation.cpp index 470d586d62..63e75db568 100644 --- a/tests/benchmarks/genesis_allocation.cpp +++ b/tests/benchmarks/genesis_allocation.cpp @@ -25,10 +25,7 @@ #include #include -#include - #include -#include #include @@ -70,7 +67,7 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench ) { database db; - db.open(data_dir.path(), [&]{return genesis_state;}); + db.open(data_dir.path(), [&]{return genesis_state;}, "test"); for( int i = 11; i < account_count + 11; ++i) BOOST_CHECK(db.get_balance(account_id_type(i), asset_id_type()).amount == GRAPHENE_MAX_SHARE_SUPPLY / account_count); @@ -83,7 +80,7 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench ) database db; fc::time_point start_time = fc::time_point::now(); - db.open(data_dir.path(), [&]{return genesis_state;}); + db.open(data_dir.path(), [&]{return genesis_state;}, "test"); ilog("Opened database in ${t} milliseconds.", ("t", (fc::time_point::now() - start_time).count() / 1000)); for( int i = 11; i < account_count + 11; ++i) @@ -118,7 +115,7 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench ) auto start_time = fc::time_point::now(); wlog( "about to start reindex..." ); - db.reindex(data_dir.path(), genesis_state); + db.open(data_dir.path(), [&]{return genesis_state;}, "force_wipe"); ilog("Replayed database in ${t} milliseconds.", ("t", (fc::time_point::now() - start_time).count() / 1000)); for( int i = 0; i < blocks_to_produce; ++i ) diff --git a/tests/cli/main.cpp b/tests/cli/main.cpp new file mode 100644 index 0000000000..d489dd080a --- /dev/null +++ b/tests/cli/main.cpp @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2018 John Jones, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#ifdef _WIN32 + #ifndef _WIN32_WINNT + #define _WIN32_WINNT 0x0501 + #endif + #include + #include +#else + #include + #include + #include +#endif + +#include + +#define BOOST_TEST_MODULE Test Application +#include + +/***** + * Global Initialization for Windows + * ( sets up Winsock stuf ) + */ +#ifdef _WIN32 +int sockInit(void) +{ + WSADATA wsa_data; + return WSAStartup(MAKEWORD(1,1), &wsa_data); +} +int sockQuit(void) +{ + return WSACleanup(); +} +#endif + +/********************* + * Helper Methods + *********************/ + +#include "../common/genesis_file_util.hpp" + +#define INVOKE(test) ((struct test*)this)->test_method(); + +////// +/// @brief attempt to find an available port on localhost +/// @returns an available port number, or -1 on error +///// +int get_available_port() +{ + struct sockaddr_in sin; + int socket_fd = socket(AF_INET, SOCK_STREAM, 0); + if (socket_fd == -1) + return -1; + sin.sin_family = AF_INET; + sin.sin_port = 0; + sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + if (::bind(socket_fd, (struct sockaddr*)&sin, sizeof(struct sockaddr_in)) == -1) + return -1; + socklen_t len = sizeof(sin); + if (getsockname(socket_fd, (struct sockaddr *)&sin, &len) == -1) + return -1; +#ifdef _WIN32 + closesocket(socket_fd); +#else + close(socket_fd); +#endif + return ntohs(sin.sin_port); +} + +/////////// +/// @brief Start the application +/// @param app_dir the temporary directory to use +/// @param server_port_number to be filled with the rpc endpoint port number +/// @returns the application object +////////// +std::shared_ptr start_application(fc::temp_directory& app_dir, int& server_port_number) { + std::shared_ptr app1(new graphene::app::application{}); + + app1->register_plugin(true); + app1->register_plugin< graphene::market_history::market_history_plugin >(true); + app1->register_plugin< graphene::witness_plugin::witness_plugin >(true); + app1->register_plugin< graphene::grouped_orders::grouped_orders_plugin>(true); + app1->startup_plugins(); + boost::program_options::variables_map cfg; +#ifdef _WIN32 + sockInit(); +#endif + server_port_number = get_available_port(); + cfg.emplace( + "rpc-endpoint", + boost::program_options::variable_value(string("127.0.0.1:" + std::to_string(server_port_number)), false) + ); + cfg.emplace("genesis-json", boost::program_options::variable_value(create_genesis_file(app_dir), false)); + cfg.emplace("seed-nodes", boost::program_options::variable_value(string("[]"), false)); + app1->initialize(app_dir.path(), cfg); + + app1->initialize_plugins(cfg); + app1->startup_plugins(); + + app1->startup(); + fc::usleep(fc::milliseconds(500)); + return app1; +} + +/////////// +/// Send a block to the db +/// @param app the application +/// @returns true on success +/////////// +bool generate_block(std::shared_ptr app) { + try { + fc::ecc::private_key committee_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan"))); + auto db = app->chain_database(); + auto block_1 = db->generate_block( db->get_slot_time(1), + db->get_scheduled_witness(1), + committee_key, + database::skip_nothing ); + return true; + } catch (exception &e) { + return false; + } +} + +/////////// +/// @brief Skip intermediate blocks, and generate a maintenance block +/// @param app the application +/// @returns true on success +/////////// +bool generate_maintenance_block(std::shared_ptr app) { + try { + fc::ecc::private_key committee_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan"))); + uint32_t skip = ~0; + auto db = app->chain_database(); + auto maint_time = db->get_dynamic_global_properties().next_maintenance_time; + auto slots_to_miss = db->get_slot_at_time(maint_time); + db->generate_block(db->get_slot_time(slots_to_miss), + db->get_scheduled_witness(slots_to_miss), + committee_key, + skip); + return true; + } catch (exception& e) + { + return false; + } +} + +/////////// +/// @brief a class to make connecting to the application server easier +/////////// +class client_connection +{ +public: + ///////// + // constructor + ///////// + client_connection( + std::shared_ptr app, + const fc::temp_directory& data_dir, + const int server_port_number + ) + { + wallet_data.chain_id = app->chain_database()->get_chain_id(); + wallet_data.ws_server = "ws://127.0.0.1:" + std::to_string(server_port_number); + wallet_data.ws_user = ""; + wallet_data.ws_password = ""; + websocket_connection = websocket_client.connect( wallet_data.ws_server ); + + api_connection = std::make_shared(*websocket_connection, GRAPHENE_MAX_NESTED_OBJECTS); + + remote_login_api = api_connection->get_remote_api< graphene::app::login_api >(1); + BOOST_CHECK(remote_login_api->login( wallet_data.ws_user, wallet_data.ws_password ) ); + + wallet_api_ptr = std::make_shared(wallet_data, remote_login_api); + wallet_filename = data_dir.path().generic_string() + "/wallet.json"; + wallet_api_ptr->set_wallet_filename(wallet_filename); + + wallet_api = fc::api(wallet_api_ptr); + + wallet_cli = std::make_shared(GRAPHENE_MAX_NESTED_OBJECTS); + for( auto& name_formatter : wallet_api_ptr->get_result_formatters() ) + wallet_cli->format_result( name_formatter.first, name_formatter.second ); + + boost::signals2::scoped_connection closed_connection(websocket_connection->closed.connect([=]{ + cerr << "Server has disconnected us.\n"; + wallet_cli->stop(); + })); + (void)(closed_connection); + } + ~client_connection() + { + // wait for everything to finish up + fc::usleep(fc::milliseconds(500)); + } +public: + fc::http::websocket_client websocket_client; + graphene::wallet::wallet_data wallet_data; + fc::http::websocket_connection_ptr websocket_connection; + std::shared_ptr api_connection; + fc::api remote_login_api; + std::shared_ptr wallet_api_ptr; + fc::api wallet_api; + std::shared_ptr wallet_cli; + std::string wallet_filename; +}; + +/////////////////////////////// +// Cli Wallet Fixture +/////////////////////////////// + +struct cli_fixture +{ + class dummy + { + public: + ~dummy() + { + // wait for everything to finish up + fc::usleep(fc::milliseconds(500)); + } + }; + dummy dmy; + int server_port_number; + fc::temp_directory app_dir; + std::shared_ptr app1; + client_connection con; + std::vector nathan_keys; + + cli_fixture() : + server_port_number(0), + app_dir( graphene::utilities::temp_directory_path() ), + app1( start_application(app_dir, server_port_number) ), + con( app1, app_dir, server_port_number ), + nathan_keys( {"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"} ) + { + BOOST_TEST_MESSAGE("Setup cli_wallet::boost_fixture_test_case"); + + using namespace graphene::chain; + using namespace graphene::app; + + try + { + BOOST_TEST_MESSAGE("Setting wallet password"); + con.wallet_api_ptr->set_password("supersecret"); + con.wallet_api_ptr->unlock("supersecret"); + + // import Nathan account + BOOST_TEST_MESSAGE("Importing nathan key"); + BOOST_CHECK_EQUAL(nathan_keys[0], "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"); + BOOST_CHECK(con.wallet_api_ptr->import_key("nathan", nathan_keys[0])); + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } + } + + ~cli_fixture() + { + BOOST_TEST_MESSAGE("Cleanup cli_wallet::boost_fixture_test_case"); + + // wait for everything to finish up + fc::usleep(fc::seconds(1)); + + app1->shutdown(); +#ifdef _WIN32 + sockQuit(); +#endif + } +}; + +/////////////////////////////// +// Tests +/////////////////////////////// + +//////////////// +// Start a server and connect using the same calls as the CLI +//////////////// +BOOST_FIXTURE_TEST_CASE( cli_connect, cli_fixture ) +{ + BOOST_TEST_MESSAGE("Testing wallet connection."); +} + +//////////////// +// Start a server and connect using the same calls as the CLI +// Quit wallet and be sure that file was saved correctly +//////////////// +BOOST_FIXTURE_TEST_CASE( cli_quit, cli_fixture ) +{ + BOOST_TEST_MESSAGE("Testing wallet connection and quit command."); + BOOST_CHECK_THROW( con.wallet_api_ptr->quit(), fc::canceled_exception ); +} + +BOOST_FIXTURE_TEST_CASE( upgrade_nathan_account, cli_fixture ) +{ + try + { + BOOST_TEST_MESSAGE("Upgrade Nathan's account"); + + account_object nathan_acct_before_upgrade, nathan_acct_after_upgrade; + std::vector import_txs; + signed_transaction upgrade_tx; + + BOOST_TEST_MESSAGE("Importing nathan's balance"); + import_txs = con.wallet_api_ptr->import_balance("nathan", nathan_keys, true); + nathan_acct_before_upgrade = con.wallet_api_ptr->get_account("nathan"); + + // upgrade nathan + BOOST_TEST_MESSAGE("Upgrading Nathan to LTM"); + upgrade_tx = con.wallet_api_ptr->upgrade_account("nathan", true); + nathan_acct_after_upgrade = con.wallet_api_ptr->get_account("nathan"); + + // verify that the upgrade was successful + BOOST_CHECK_PREDICATE( + std::not_equal_to(), + (nathan_acct_before_upgrade.membership_expiration_date.sec_since_epoch()) + (nathan_acct_after_upgrade.membership_expiration_date.sec_since_epoch()) + ); + BOOST_CHECK(nathan_acct_after_upgrade.is_lifetime_member()); + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_FIXTURE_TEST_CASE( create_new_account, cli_fixture ) +{ + try + { + INVOKE(upgrade_nathan_account); + + // create a new account + graphene::wallet::brain_key_info bki = con.wallet_api_ptr->suggest_brain_key(); + BOOST_CHECK(!bki.brain_priv_key.empty()); + signed_transaction create_acct_tx = con.wallet_api_ptr->create_account_with_brain_key( + bki.brain_priv_key, "jmjatlanta", "nathan", "nathan", true + ); + // save the private key for this new account in the wallet file + BOOST_CHECK(con.wallet_api_ptr->import_key("jmjatlanta", bki.wif_priv_key)); + con.wallet_api_ptr->save_wallet_file(con.wallet_filename); + + // attempt to give jmjatlanta some bitsahres + BOOST_TEST_MESSAGE("Transferring bitshares from Nathan to jmjatlanta"); + signed_transaction transfer_tx = con.wallet_api_ptr->transfer( + "nathan", "jmjatlanta", "10000", "1.3.0", "Here are some CORE token for your new account", true + ); + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } +} + +/////////////////////// +// Start a server and connect using the same calls as the CLI +// Vote for two witnesses, and make sure they both stay there +// after a maintenance block +/////////////////////// +BOOST_FIXTURE_TEST_CASE( cli_vote_for_2_witnesses, cli_fixture ) +{ + try + { + BOOST_TEST_MESSAGE("Cli Vote Test for 2 Witnesses"); + + INVOKE(create_new_account); + + // get the details for init1 + witness_object init1_obj = con.wallet_api_ptr->get_witness("init1"); + int init1_start_votes = init1_obj.total_votes; + // Vote for a witness + signed_transaction vote_witness1_tx = con.wallet_api_ptr->vote_for_witness("jmjatlanta", "init1", true, true); + + // generate a block to get things started + BOOST_CHECK(generate_block(app1)); + // wait for a maintenance interval + BOOST_CHECK(generate_maintenance_block(app1)); + + // Verify that the vote is there + init1_obj = con.wallet_api_ptr->get_witness("init1"); + witness_object init2_obj = con.wallet_api_ptr->get_witness("init2"); + int init1_middle_votes = init1_obj.total_votes; + BOOST_CHECK(init1_middle_votes > init1_start_votes); + + // Vote for a 2nd witness + int init2_start_votes = init2_obj.total_votes; + signed_transaction vote_witness2_tx = con.wallet_api_ptr->vote_for_witness("jmjatlanta", "init2", true, true); + + // send another block to trigger maintenance interval + BOOST_CHECK(generate_maintenance_block(app1)); + + // Verify that both the first vote and the 2nd are there + init2_obj = con.wallet_api_ptr->get_witness("init2"); + init1_obj = con.wallet_api_ptr->get_witness("init1"); + + int init2_middle_votes = init2_obj.total_votes; + BOOST_CHECK(init2_middle_votes > init2_start_votes); + int init1_last_votes = init1_obj.total_votes; + BOOST_CHECK(init1_last_votes > init1_start_votes); + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } +} + +/////////////////// +// Start a server and connect using the same calls as the CLI +// Set a voting proxy and be assured that it sticks +/////////////////// +BOOST_FIXTURE_TEST_CASE( cli_set_voting_proxy, cli_fixture ) +{ + try { + INVOKE(create_new_account); + + // grab account for comparison + account_object prior_voting_account = con.wallet_api_ptr->get_account("jmjatlanta"); + // set the voting proxy to nathan + BOOST_TEST_MESSAGE("About to set voting proxy."); + signed_transaction voting_tx = con.wallet_api_ptr->set_voting_proxy("jmjatlanta", "nathan", true); + account_object after_voting_account = con.wallet_api_ptr->get_account("jmjatlanta"); + // see if it changed + BOOST_CHECK(prior_voting_account.options.voting_account != after_voting_account.options.voting_account); + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } +} + +/////////////////// +// Test blind transactions and mantissa length of range proofs. +/////////////////// +BOOST_FIXTURE_TEST_CASE( cli_confidential_tx_test, cli_fixture ) +{ + using namespace graphene::wallet; + try { + std::vector import_txs; + + BOOST_TEST_MESSAGE("Importing nathan's balance"); + import_txs = con.wallet_api_ptr->import_balance("nathan", nathan_keys, true); + + unsigned int head_block = 0; + auto & W = *con.wallet_api_ptr; // Wallet alias + + BOOST_TEST_MESSAGE("Creating blind accounts"); + graphene::wallet::brain_key_info bki_nathan = W.suggest_brain_key(); + graphene::wallet::brain_key_info bki_alice = W.suggest_brain_key(); + graphene::wallet::brain_key_info bki_bob = W.suggest_brain_key(); + W.create_blind_account("nathan", bki_nathan.brain_priv_key); + W.create_blind_account("alice", bki_alice.brain_priv_key); + W.create_blind_account("bob", bki_bob.brain_priv_key); + BOOST_CHECK(W.get_blind_accounts().size() == 3); + + // ** Block 1: Import Nathan account: + BOOST_TEST_MESSAGE("Importing nathan key and balance"); + std::vector nathan_keys{"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"}; + W.import_key("nathan", nathan_keys[0]); + W.import_balance("nathan", nathan_keys, true); + generate_block(app1); head_block++; + + // ** Block 2: Nathan will blind 100M CORE token: + BOOST_TEST_MESSAGE("Blinding a large balance"); + W.transfer_to_blind("nathan", GRAPHENE_SYMBOL, {{"nathan","100000000"}}, true); + BOOST_CHECK( W.get_blind_balances("nathan")[0].amount == 10000000000000 ); + generate_block(app1); head_block++; + + // ** Block 3: Nathan will send 1M CORE token to alice and 10K CORE token to bob. We + // then confirm that balances are received, and then analyze the range + // prooofs to make sure the mantissa length does not reveal approximate + // balance (issue #480). + std::map to_list = {{"alice",100000000000}, + {"bob", 1000000000}}; + vector bconfs; + asset_object core_asset = W.get_asset("1.3.0"); + BOOST_TEST_MESSAGE("Sending blind transactions to alice and bob"); + for (auto to : to_list) { + string amount = core_asset.amount_to_string(to.second); + bconfs.push_back(W.blind_transfer("nathan",to.first,amount,core_asset.symbol,true)); + BOOST_CHECK( W.get_blind_balances(to.first)[0].amount == to.second ); + } + BOOST_TEST_MESSAGE("Inspecting range proof mantissa lengths"); + vector rp_mantissabits; + for (auto conf : bconfs) { + for (auto out : conf.trx.operations[0].get().outputs) { + rp_mantissabits.push_back(1+out.range_proof[1]); // 2nd byte encodes mantissa length + } + } + // We are checking the mantissa length of the range proofs for several Pedersen + // commitments of varying magnitude. We don't want the mantissa lengths to give + // away magnitude. Deprecated wallet behavior was to use "just enough" mantissa + // bits to prove range, but this gives away value to within a factor of two. As a + // naive test, we assume that if all mantissa lengths are equal, then they are not + // revealing magnitude. However, future more-sophisticated wallet behavior + // *might* randomize mantissa length to achieve some space savings in the range + // proof. The following test will fail in that case and a more sophisticated test + // will be needed. + auto adjacent_unequal = std::adjacent_find(rp_mantissabits.begin(), + rp_mantissabits.end(), // find unequal adjacent values + std::not_equal_to()); + BOOST_CHECK(adjacent_unequal == rp_mantissabits.end()); + generate_block(app1); head_block++; + + // ** Check head block: + BOOST_TEST_MESSAGE("Check that all expected blocks have processed"); + dynamic_global_property_object dgp = W.get_dynamic_global_properties(); + BOOST_CHECK(dgp.head_block_number == head_block); + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } +} + +/****** + * Check account history pagination (see bitshares-core/issue/1176) + */ +BOOST_FIXTURE_TEST_CASE( account_history_pagination, cli_fixture ) +{ + try + { + INVOKE(create_new_account); + + // attempt to give jmjatlanta some bitsahres + BOOST_TEST_MESSAGE("Transferring bitshares from Nathan to jmjatlanta"); + for(int i = 1; i <= 199; i++) + { + signed_transaction transfer_tx = con.wallet_api_ptr->transfer("nathan", "jmjatlanta", std::to_string(i), + "1.3.0", "Here are some CORE token for your new account", true); + } + + BOOST_CHECK(generate_block(app1)); + + // now get account history and make sure everything is there (and no duplicates) + std::vector history = con.wallet_api_ptr->get_account_history("jmjatlanta", 300); + BOOST_CHECK_EQUAL(201u, history.size() ); + + std::set operation_ids; + + for(auto& op : history) + { + if( operation_ids.find(op.op.id) != operation_ids.end() ) + { + BOOST_FAIL("Duplicate found"); + } + operation_ids.insert(op.op.id); + } + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } +} + + +/////////////////////// +// Create a multi-sig account and verify that only when all signatures are +// signed, the transaction could be broadcast +/////////////////////// +BOOST_AUTO_TEST_CASE( cli_multisig_transaction ) +{ + using namespace graphene::chain; + using namespace graphene::app; + std::shared_ptr app1; + try { + fc::temp_directory app_dir( graphene::utilities::temp_directory_path() ); + + int server_port_number = 0; + app1 = start_application(app_dir, server_port_number); + + // connect to the server + client_connection con(app1, app_dir, server_port_number); + + BOOST_TEST_MESSAGE("Setting wallet password"); + con.wallet_api_ptr->set_password("supersecret"); + con.wallet_api_ptr->unlock("supersecret"); + + // import Nathan account + BOOST_TEST_MESSAGE("Importing nathan key"); + std::vector nathan_keys{"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"}; + BOOST_CHECK_EQUAL(nathan_keys[0], "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"); + BOOST_CHECK(con.wallet_api_ptr->import_key("nathan", nathan_keys[0])); + + BOOST_TEST_MESSAGE("Importing nathan's balance"); + std::vector import_txs = con.wallet_api_ptr->import_balance("nathan", nathan_keys, true); + account_object nathan_acct_before_upgrade = con.wallet_api_ptr->get_account("nathan"); + + // upgrade nathan + BOOST_TEST_MESSAGE("Upgrading Nathan to LTM"); + signed_transaction upgrade_tx = con.wallet_api_ptr->upgrade_account("nathan", true); + account_object nathan_acct_after_upgrade = con.wallet_api_ptr->get_account("nathan"); + + // verify that the upgrade was successful + BOOST_CHECK_PREDICATE( std::not_equal_to(), (nathan_acct_before_upgrade.membership_expiration_date.sec_since_epoch())(nathan_acct_after_upgrade.membership_expiration_date.sec_since_epoch()) ); + BOOST_CHECK(nathan_acct_after_upgrade.is_lifetime_member()); + + // create a new multisig account + graphene::wallet::brain_key_info bki1 = con.wallet_api_ptr->suggest_brain_key(); + graphene::wallet::brain_key_info bki2 = con.wallet_api_ptr->suggest_brain_key(); + graphene::wallet::brain_key_info bki3 = con.wallet_api_ptr->suggest_brain_key(); + graphene::wallet::brain_key_info bki4 = con.wallet_api_ptr->suggest_brain_key(); + BOOST_CHECK(!bki1.brain_priv_key.empty()); + BOOST_CHECK(!bki2.brain_priv_key.empty()); + BOOST_CHECK(!bki3.brain_priv_key.empty()); + BOOST_CHECK(!bki4.brain_priv_key.empty()); + + signed_transaction create_multisig_acct_tx; + account_create_operation account_create_op; + + account_create_op.referrer = nathan_acct_after_upgrade.id; + account_create_op.referrer_percent = nathan_acct_after_upgrade.referrer_rewards_percentage; + account_create_op.registrar = nathan_acct_after_upgrade.id; + account_create_op.name = "cifer.test"; + account_create_op.owner = authority(1, bki1.pub_key, 1); + account_create_op.active = authority(2, bki2.pub_key, 1, bki3.pub_key, 1); + account_create_op.options.memo_key = bki4.pub_key; + account_create_op.fee = asset(1000000); // should be enough for creating account + + create_multisig_acct_tx.operations.push_back(account_create_op); + con.wallet_api_ptr->sign_transaction(create_multisig_acct_tx, true); + + // attempt to give cifer.test some bitsahres + BOOST_TEST_MESSAGE("Transferring bitshares from Nathan to cifer.test"); + signed_transaction transfer_tx1 = con.wallet_api_ptr->transfer("nathan", "cifer.test", "10000", "1.3.0", "Here are some BTS for your new account", true); + + // transfer bts from cifer.test to nathan + BOOST_TEST_MESSAGE("Transferring bitshares from cifer.test to nathan"); + auto dyn_props = app1->chain_database()->get_dynamic_global_properties(); + account_object cifer_test = con.wallet_api_ptr->get_account("cifer.test"); + + // construct a transfer transaction + signed_transaction transfer_tx2; + transfer_operation xfer_op; + xfer_op.from = cifer_test.id; + xfer_op.to = nathan_acct_after_upgrade.id; + xfer_op.amount = asset(100000000); + xfer_op.fee = asset(3000000); // should be enough for transfer + transfer_tx2.operations.push_back(xfer_op); + + // case1: sign a transaction without TaPoS and expiration fields + // expect: return a transaction with TaPoS and expiration filled + transfer_tx2 = + con.wallet_api_ptr->add_transaction_signature( transfer_tx2, false ); + BOOST_CHECK( ( transfer_tx2.ref_block_num != 0 && + transfer_tx2.ref_block_prefix != 0 ) || + ( transfer_tx2.expiration != fc::time_point_sec() ) ); + + // case2: broadcast without signature + // expect: exception with missing active authority + BOOST_CHECK_THROW(con.wallet_api_ptr->broadcast_transaction(transfer_tx2), fc::exception); + + // case3: + // import one of the private keys for this new account in the wallet file, + // sign and broadcast with partial signatures + // + // expect: exception with missing active authority + BOOST_CHECK(con.wallet_api_ptr->import_key("cifer.test", bki2.wif_priv_key)); + BOOST_CHECK_THROW(con.wallet_api_ptr->add_transaction_signature(transfer_tx2, true), fc::exception); + + // case4: sign again as signature exists + // expect: num of signatures not increase + transfer_tx2 = con.wallet_api_ptr->add_transaction_signature(transfer_tx2, false); + BOOST_CHECK_EQUAL(transfer_tx2.signatures.size(), 1); + + // case5: + // import another private key, sign and broadcast without full signatures + // + // expect: transaction broadcast successfully + BOOST_CHECK(con.wallet_api_ptr->import_key("cifer.test", bki3.wif_priv_key)); + con.wallet_api_ptr->add_transaction_signature(transfer_tx2, true); + auto balances = con.wallet_api_ptr->list_account_balances( "cifer.test" ); + for (auto b : balances) { + if (b.asset_id == asset_id_type()) { + BOOST_ASSERT(b == asset(900000000 - 3000000)); + } + } + + // wait for everything to finish up + fc::usleep(fc::seconds(1)); + } catch( fc::exception& e ) { + edump((e.to_detail_string())); + throw; + } + app1->shutdown(); +} + +graphene::wallet::plain_keys decrypt_keys( const std::string& password, const vector& cipher_keys ) +{ + auto pw = fc::sha512::hash( password.c_str(), password.size() ); + vector decrypted = fc::aes_decrypt( pw, cipher_keys ); + return fc::raw::unpack( decrypted ); +} + +BOOST_AUTO_TEST_CASE( saving_keys_wallet_test ) { + cli_fixture cli; + + cli.con.wallet_api_ptr->import_balance( "nathan", cli.nathan_keys, true ); + cli.con.wallet_api_ptr->upgrade_account( "nathan", true ); + std::string brain_key( "FICTIVE WEARY MINIBUS LENS HAWKIE MAIDISH MINTY GLYPH GYTE KNOT COCKSHY LENTIGO PROPS BIFORM KHUTBAH BRAZIL" ); + cli.con.wallet_api_ptr->create_account_with_brain_key( brain_key, "account1", "nathan", "nathan", true ); + + BOOST_CHECK_NO_THROW( cli.con.wallet_api_ptr->transfer( "nathan", "account1", "9000", "1.3.0", "", true ) ); + + std::string path( cli.app_dir.path().generic_string() + "/wallet.json" ); + graphene::wallet::wallet_data wallet = fc::json::from_file( path ).as( 2 * GRAPHENE_MAX_NESTED_OBJECTS ); + BOOST_CHECK( wallet.extra_keys.size() == 1 ); // nathan + BOOST_CHECK( wallet.pending_account_registrations.size() == 1 ); // account1 + BOOST_CHECK( wallet.pending_account_registrations["account1"].size() == 2 ); // account1 active key + account1 memo key + + graphene::wallet::plain_keys pk = decrypt_keys( "supersecret", wallet.cipher_keys ); + BOOST_CHECK( pk.keys.size() == 1 ); // nathan key + + BOOST_CHECK( generate_block( cli.app1 ) ); + fc::usleep( fc::seconds(1) ); + + wallet = fc::json::from_file( path ).as( 2 * GRAPHENE_MAX_NESTED_OBJECTS ); + BOOST_CHECK( wallet.extra_keys.size() == 2 ); // nathan + account1 + BOOST_CHECK( wallet.pending_account_registrations.empty() ); + BOOST_CHECK_NO_THROW( cli.con.wallet_api_ptr->transfer( "account1", "nathan", "1000", "1.3.0", "", true ) ); + + pk = decrypt_keys( "supersecret", wallet.cipher_keys ); + BOOST_CHECK( pk.keys.size() == 3 ); // nathan key + account1 active key + account1 memo key +} diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 67e4dd0d20..65f7981330 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -26,25 +26,23 @@ #include #include +#include +#include +#include -#include - -#include -#include +#include #include #include #include #include #include +#include #include #include -#include -#include #include -#include #include "database_fixture.hpp" @@ -57,6 +55,13 @@ namespace graphene { namespace chain { using std::cout; using std::cerr; +void clearable_block::clear() +{ + _calculated_merkle_root = checksum_type(); + _signee = fc::ecc::public_key(); + _block_id = block_id_type(); +} + database_fixture::database_fixture() : app(), db( *app.chain_database() ) { @@ -71,8 +76,8 @@ database_fixture::database_fixture() if( arg == "--show-test-names" ) std::cout << "running test " << boost::unit_test::framework::current_test_case().p_name << std::endl; } - auto ahplugin = app.register_plugin(); auto mhplugin = app.register_plugin(); + auto goplugin = app.register_plugin(); init_account_pub_key = init_account_priv_key.get_public_key(); boost::program_options::variables_map options; @@ -80,7 +85,7 @@ database_fixture::database_fixture() genesis_state.initial_timestamp = time_point_sec( GRAPHENE_TESTING_GENESIS_TIMESTAMP ); genesis_state.initial_active_witnesses = 10; - for( int i = 0; i < genesis_state.initial_active_witnesses; ++i ) + for( unsigned int i = 0; i < genesis_state.initial_active_witnesses; ++i ) { auto name = "init"+fc::to_string(i); genesis_state.initial_accounts.emplace_back(name, @@ -91,19 +96,153 @@ database_fixture::database_fixture() genesis_state.initial_witness_candidates.push_back({name, init_account_priv_key.get_public_key()}); } genesis_state.initial_parameters.current_fees->zero_all_fees(); + + genesis_state_type::initial_asset_type init_mpa1; + init_mpa1.symbol = "INITMPA"; + init_mpa1.issuer_name = "committee-account"; + init_mpa1.description = "Initial MPA"; + init_mpa1.precision = 4; + init_mpa1.max_supply = GRAPHENE_MAX_SHARE_SUPPLY; + init_mpa1.accumulated_fees = 0; + init_mpa1.is_bitasset = true; + // TODO add initial UIA's; add initial short positions; test non-zero accumulated_fees + genesis_state.initial_assets.push_back( init_mpa1 ); + open_database(); - // app.initialize(); - ahplugin->plugin_set_app(&app); - ahplugin->plugin_initialize(options); + /** + * Test specific settings + */ + auto current_test_name = boost::unit_test::framework::current_test_case().p_name.value; + auto current_test_suite_id = boost::unit_test::framework::current_test_case().p_parent_id; + if (current_test_name == "get_account_history_operations") + { + options.insert(std::make_pair("max-ops-per-account", boost::program_options::variable_value((uint64_t)75, false))); + } + if (current_test_name == "api_limit_get_account_history_operations") + { + options.insert(std::make_pair("max-ops-per-account", boost::program_options::variable_value((uint64_t)125, false))); + options.insert(std::make_pair("api-limit-get-account-history-operations", boost::program_options::variable_value((uint64_t)300, false))); + options.insert(std::make_pair("plugins", boost::program_options::variable_value(string("account_history"), false))); + } + if(current_test_name =="api_limit_get_account_history") + { + options.insert(std::make_pair("max-ops-per-account", boost::program_options::variable_value((uint64_t)125, false))); + options.insert(std::make_pair("api-limit-get-account-history", boost::program_options::variable_value((uint64_t)250, false))); + options.insert(std::make_pair("plugins", boost::program_options::variable_value(string("account_history"), false))); + } + if(current_test_name =="api_limit_get_grouped_limit_orders") + { + options.insert(std::make_pair("api-limit-get-grouped-limit-orders", boost::program_options::variable_value((uint64_t)250, false))); + options.insert(std::make_pair("plugins", boost::program_options::variable_value(string("grouped_orders"), false))); + } + if(current_test_name =="api_limit_get_relative_account_history") + { + options.insert(std::make_pair("max-ops-per-account", boost::program_options::variable_value((uint64_t)125, false))); + options.insert(std::make_pair("api-limit-get-relative-account-history", boost::program_options::variable_value((uint64_t)250, false))); + options.insert(std::make_pair("plugins", boost::program_options::variable_value(string("account_history"), false))); + } + if(current_test_name =="api_limit_get_account_history_by_operations") + { + options.insert(std::make_pair("api-limit-get-account-history-by-operations", boost::program_options::variable_value((uint64_t)250, false))); + options.insert(std::make_pair("api-limit-get-relative-account-history", boost::program_options::variable_value((uint64_t)250, false))); + options.insert(std::make_pair("plugins", boost::program_options::variable_value(string("account_history"), false))); + } + if(current_test_name =="api_limit_get_asset_holders") + { + options.insert(std::make_pair("api-limit-get-asset-holders", boost::program_options::variable_value((uint64_t)250, false))); + options.insert(std::make_pair("plugins", boost::program_options::variable_value(string("account_history"), false))); + } + if(current_test_name =="api_limit_get_key_references") + { + options.insert(std::make_pair("api-limit-get-key-references", boost::program_options::variable_value((uint64_t)200, false))); + options.insert(std::make_pair("plugins", boost::program_options::variable_value(string("account_history"), false))); + } + // add account tracking for ahplugin for special test case with track-account enabled + if( !options.count("track-account") && current_test_name == "track_account") { + std::vector track_account; + std::string track = "\"1.2.17\""; + track_account.push_back(track); + options.insert(std::make_pair("track-account", boost::program_options::variable_value(track_account, false))); + options.insert(std::make_pair("partial-operations", boost::program_options::variable_value(true, false))); + } + // account tracking 2 accounts + if( !options.count("track-account") && current_test_name == "track_account2") { + std::vector track_account; + std::string track = "\"1.2.0\""; + track_account.push_back(track); + track = "\"1.2.16\""; + track_account.push_back(track); + options.insert(std::make_pair("track-account", boost::program_options::variable_value(track_account, false))); + } + // standby votes tracking + if( boost::unit_test::framework::current_test_case().p_name.value == "track_votes_witnesses_disabled" || + boost::unit_test::framework::current_test_case().p_name.value == "track_votes_committee_disabled") { + app.chain_database()->enable_standby_votes_tracking( false ); + } + if(current_test_name == "elasticsearch_account_history" || current_test_name == "elasticsearch_suite") { + auto esplugin = app.register_plugin(); + esplugin->plugin_set_app(&app); + + options.insert(std::make_pair("elasticsearch-node-url", boost::program_options::variable_value(string("http://localhost:9200/"), false))); + options.insert(std::make_pair("elasticsearch-bulk-replay", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("elasticsearch-bulk-sync", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("elasticsearch-visitor", boost::program_options::variable_value(true, false))); + //options.insert(std::make_pair("elasticsearch-basic-auth", boost::program_options::variable_value(string("elastic:changeme"), false))); + + esplugin->plugin_initialize(options); + esplugin->plugin_startup(); + } + else if( boost::unit_test::framework::get(current_test_suite_id).p_name.value != "performance_tests" ) + { + auto ahplugin = app.register_plugin(); + ahplugin->plugin_set_app(&app); + ahplugin->plugin_initialize(options); + ahplugin->plugin_startup(); + if (current_test_name == "api_limit_get_account_history_operations" || current_test_name == "api_limit_get_account_history" + || current_test_name == "api_limit_get_grouped_limit_orders" || current_test_name == "api_limit_get_relative_account_history" + || current_test_name == "api_limit_get_account_history_by_operations" || current_test_name =="api_limit_get_asset_holders" + || current_test_name =="api_limit_get_key_references") + { + app.initialize(graphene::utilities::temp_directory_path(), options); + app.set_api_limit(); + } + } + + if(current_test_name == "elasticsearch_objects" || current_test_name == "elasticsearch_suite") { + auto esobjects_plugin = app.register_plugin(); + esobjects_plugin->plugin_set_app(&app); + + options.insert(std::make_pair("es-objects-elasticsearch-url", boost::program_options::variable_value(string("http://localhost:9200/"), false))); + options.insert(std::make_pair("es-objects-bulk-replay", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("es-objects-bulk-sync", boost::program_options::variable_value(uint32_t(2), false))); + options.insert(std::make_pair("es-objects-proposals", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-accounts", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-assets", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-balances", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-limit-orders", boost::program_options::variable_value(true, false))); + options.insert(std::make_pair("es-objects-asset-bitasset", boost::program_options::variable_value(true, false))); + + esobjects_plugin->plugin_initialize(options); + esobjects_plugin->plugin_startup(); + } + + options.insert(std::make_pair("bucket-size", boost::program_options::variable_value(string("[15]"),false))); mhplugin->plugin_set_app(&app); mhplugin->plugin_initialize(options); - ahplugin->plugin_startup(); + goplugin->plugin_set_app(&app); + goplugin->plugin_initialize(options); + mhplugin->plugin_startup(); + goplugin->plugin_startup(); generate_block(); + asset_id_type mpa1_id(1); + BOOST_REQUIRE( mpa1_id(db).is_market_issued() ); + BOOST_CHECK( mpa1_id(db).bitasset_data(db).asset_id == mpa1_id ); + set_expiration( db, trx ); } catch ( const fc::exception& e ) { @@ -115,20 +254,24 @@ database_fixture::database_fixture() } database_fixture::~database_fixture() -{ try { - // If we're unwinding due to an exception, don't do any more checks. - // This way, boost test's last checkpoint tells us approximately where the error was. - if( !std::uncaught_exception() ) - { - verify_asset_supplies(db); - verify_account_history_plugin_index(); - BOOST_CHECK( db.get_node_properties().skip_flags == database::skip_nothing ); +{ + try { + // If we're unwinding due to an exception, don't do any more checks. + // This way, boost test's last checkpoint tells us approximately where the error was. + if( !std::uncaught_exception() ) + { + verify_asset_supplies(db); + BOOST_CHECK( db.get_node_properties().skip_flags == database::skip_nothing ); + } + return; + } catch (fc::exception& ex) { + BOOST_FAIL( std::string("fc::exception in ~database_fixture: ") + ex.to_detail_string() ); + } catch (std::exception& e) { + BOOST_FAIL( std::string("std::exception in ~database_fixture:") + e.what() ); + } catch (...) { + BOOST_FAIL( "Uncaught exception in ~database_fixture" ); } - - if( data_dir ) - db.close(); - return; -} FC_CAPTURE_AND_RETHROW() } +} fc::ecc::private_key database_fixture::generate_private_key(string seed) { @@ -151,18 +294,21 @@ void database_fixture::verify_asset_supplies( const database& db ) const asset_dynamic_data_object& core_asset_data = db.get_core_asset().dynamic_asset_data_id(db); BOOST_CHECK(core_asset_data.fee_pool == 0); - const simple_index& statistics_index = db.get_index_type>(); - const auto& balance_index = db.get_index_type().indices(); + const auto& statistics_index = db.get_index_type().indices(); + const auto& acct_balance_index = db.get_index_type().indices(); const auto& settle_index = db.get_index_type().indices(); + const auto& bids = db.get_index_type().indices(); map total_balances; map total_debts; share_type core_in_orders; share_type reported_core_in_orders; - for( const account_balance_object& b : balance_index ) + for( const account_balance_object& b : acct_balance_index ) total_balances[b.asset_type] += b.balance; for( const force_settlement_object& s : settle_index ) total_balances[s.balance.asset_id] += s.balance.amount; + for( const collateral_bid_object& b : bids ) + total_balances[b.inv_swan_price.base.asset_id] += b.inv_swan_price.base.amount; for( const account_statistics_object& a : statistics_index ) { reported_core_in_orders += a.total_core_in_orders; @@ -174,6 +320,7 @@ void database_fixture::verify_asset_supplies( const database& db ) if( for_sale.asset_id == asset_id_type() ) core_in_orders += for_sale.amount; total_balances[for_sale.asset_id] += for_sale.amount; total_balances[asset_id_type()] += o.deferred_fee; + total_balances[o.deferred_paid_fee.asset_id] += o.deferred_paid_fee.amount; } for( const call_order_object& o : db.get_index_type().indices() ) { @@ -184,20 +331,22 @@ void database_fixture::verify_asset_supplies( const database& db ) } for( const asset_object& asset_obj : db.get_index_type().indices() ) { - total_balances[asset_obj.id] += asset_obj.dynamic_asset_data_id(db).accumulated_fees; - if( asset_obj.id != asset_id_type() ) - BOOST_CHECK_EQUAL(total_balances[asset_obj.id].value, asset_obj.dynamic_asset_data_id(db).current_supply.value); - total_balances[asset_id_type()] += asset_obj.dynamic_asset_data_id(db).fee_pool; + const auto& dasset_obj = asset_obj.dynamic_asset_data_id(db); + total_balances[asset_obj.id] += dasset_obj.accumulated_fees; + total_balances[asset_id_type()] += dasset_obj.fee_pool; if( asset_obj.is_market_issued() ) { const auto& bad = asset_obj.bitasset_data(db); total_balances[bad.options.short_backing_asset] += bad.settlement_fund; } + total_balances[asset_obj.id] += dasset_obj.confidential_supply.value; } for( const vesting_balance_object& vbo : db.get_index_type< vesting_balance_index >().indices() ) total_balances[ vbo.balance.asset_id ] += vbo.balance.amount; for( const fba_accumulator_object& fba : db.get_index_type< simple_index< fba_accumulator_object > >() ) total_balances[ asset_id_type() ] += fba.accumulated_fba_fees; + for( const balance_object& bo : db.get_index_type< balance_index >().indices() ) + total_balances[ bo.balance.asset_id ] += bo.balance.amount; total_balances[asset_id_type()] += db.get_dynamic_global_properties().witness_budget; @@ -206,100 +355,20 @@ void database_fixture::verify_asset_supplies( const database& db ) BOOST_CHECK_EQUAL(item.first(db).dynamic_asset_data_id(db).current_supply.value, item.second.value); } - BOOST_CHECK_EQUAL( core_in_orders.value , reported_core_in_orders.value ); - BOOST_CHECK_EQUAL( total_balances[asset_id_type()].value , core_asset_data.current_supply.value - core_asset_data.confidential_supply.value); -// wlog("*** End asset supply verification ***"); -} - -void database_fixture::verify_account_history_plugin_index( )const -{ - return; - if( skip_key_index_test ) - return; - - const std::shared_ptr pin = - app.get_plugin("account_history"); - if( pin->tracked_accounts().size() == 0 ) + for( const asset_object& asset_obj : db.get_index_type().indices() ) { - /* - vector< pair< account_id_type, address > > tuples_from_db; - const auto& primary_account_idx = db.get_index_type().indices().get(); - flat_set< public_key_type > acct_addresses; - acct_addresses.reserve( 2 * GRAPHENE_DEFAULT_MAX_AUTHORITY_MEMBERSHIP + 2 ); - - for( const account_object& acct : primary_account_idx ) - { - account_id_type account_id = acct.id; - acct_addresses.clear(); - for( const pair< account_id_type, weight_type >& auth : acct.owner.account_auths ) - { - if( auth.first.type() == key_object_type ) - acct_addresses.insert( auth.first ); - } - for( const pair< object_id_type, weight_type >& auth : acct.active.auths ) - { - if( auth.first.type() == key_object_type ) - acct_addresses.insert( auth.first ); - } - acct_addresses.insert( acct.options.get_memo_key()(db).key_address() ); - for( const address& addr : acct_addresses ) - tuples_from_db.emplace_back( account_id, addr ); - } - - vector< pair< account_id_type, address > > tuples_from_index; - tuples_from_index.reserve( tuples_from_db.size() ); - const auto& key_account_idx = - db.get_index_type() - .indices().get(); - - for( const graphene::account_history::key_account_object& key_account : key_account_idx ) - { - address addr = key_account.key; - for( const account_id_type& account_id : key_account.account_ids ) - tuples_from_index.emplace_back( account_id, addr ); - } - - // TODO: use function for common functionality - { - // due to hashed index, account_id's may not be in sorted order... - std::sort( tuples_from_db.begin(), tuples_from_db.end() ); - size_t size_before_uniq = tuples_from_db.size(); - auto last = std::unique( tuples_from_db.begin(), tuples_from_db.end() ); - tuples_from_db.erase( last, tuples_from_db.end() ); - // but they should be unique (multiple instances of the same - // address within an account should have been de-duplicated - // by the flat_set above) - BOOST_CHECK( tuples_from_db.size() == size_before_uniq ); - } - - { - // (address, account) should be de-duplicated by flat_set<> - // in key_account_object - std::sort( tuples_from_index.begin(), tuples_from_index.end() ); - auto last = std::unique( tuples_from_index.begin(), tuples_from_index.end() ); - size_t size_before_uniq = tuples_from_db.size(); - tuples_from_index.erase( last, tuples_from_index.end() ); - BOOST_CHECK( tuples_from_index.size() == size_before_uniq ); - } - - //BOOST_CHECK_EQUAL( tuples_from_db, tuples_from_index ); - bool is_equal = true; - is_equal &= (tuples_from_db.size() == tuples_from_index.size()); - for( size_t i=0,n=tuples_from_db.size(); ipath(), [this]{return genesis_state;}); + db.open(data_dir->path(), [this]{return genesis_state;}, "test"); } } @@ -320,20 +389,25 @@ void database_fixture::generate_blocks( uint32_t block_count ) generate_block(); } -void database_fixture::generate_blocks(fc::time_point_sec timestamp, bool miss_intermediate_blocks, uint32_t skip) +uint32_t database_fixture::generate_blocks(fc::time_point_sec timestamp, bool miss_intermediate_blocks, uint32_t skip) { if( miss_intermediate_blocks ) { generate_block(skip); auto slots_to_miss = db.get_slot_at_time(timestamp); if( slots_to_miss <= 1 ) - return; + return 1; --slots_to_miss; generate_block(skip, init_account_priv_key, slots_to_miss); - return; + return 2; } + uint32_t blocks = 0; while( db.head_block_time() < timestamp ) + { generate_block(skip); + ++blocks; + } + return blocks; } account_create_operation database_fixture::make_account( @@ -428,7 +502,9 @@ const asset_object& database_fixture::create_bitasset( const string& name, account_id_type issuer /* = GRAPHENE_WITNESS_ACCOUNT */, uint16_t market_fee_percent /* = 100 */ /* 1% */, - uint16_t flags /* = charge_market_fee */ + uint16_t flags /* = charge_market_fee */, + uint16_t precision /* = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS */, + asset_id_type backing_asset /* = CORE */ ) { try { asset_create_operation creator; @@ -436,17 +512,18 @@ const asset_object& database_fixture::create_bitasset( creator.fee = asset(); creator.symbol = name; creator.common_options.max_supply = GRAPHENE_MAX_SHARE_SUPPLY; - creator.precision = 2; + creator.precision = precision; creator.common_options.market_fee_percent = market_fee_percent; if( issuer == GRAPHENE_WITNESS_ACCOUNT ) flags |= witness_fed_asset; creator.common_options.issuer_permissions = flags; creator.common_options.flags = flags & ~global_settle; - creator.common_options.core_exchange_rate = price({asset(1,asset_id_type(1)),asset(1)}); + creator.common_options.core_exchange_rate = price(asset(1,asset_id_type(1)),asset(1)); creator.bitasset_opts = bitasset_options(); + creator.bitasset_opts->short_backing_asset = backing_asset; trx.operations.push_back(std::move(creator)); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, ~0); trx.operations.clear(); return db.get(ptx.operation_results[0].get()); } FC_CAPTURE_AND_RETHROW( (name)(flags) ) } @@ -455,7 +532,9 @@ const asset_object& database_fixture::create_prediction_market( const string& name, account_id_type issuer /* = GRAPHENE_WITNESS_ACCOUNT */, uint16_t market_fee_percent /* = 100 */ /* 1% */, - uint16_t flags /* = charge_market_fee */ + uint16_t flags /* = charge_market_fee */, + uint16_t precision /* = 2, which seems arbitrary, but historically chosen */, + asset_id_type backing_asset /* = CORE */ ) { try { asset_create_operation creator; @@ -463,22 +542,24 @@ const asset_object& database_fixture::create_prediction_market( creator.fee = asset(); creator.symbol = name; creator.common_options.max_supply = GRAPHENE_MAX_SHARE_SUPPLY; - creator.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS; + creator.precision = precision; creator.common_options.market_fee_percent = market_fee_percent; creator.common_options.issuer_permissions = flags | global_settle; creator.common_options.flags = flags & ~global_settle; if( issuer == GRAPHENE_WITNESS_ACCOUNT ) creator.common_options.flags |= witness_fed_asset; - creator.common_options.core_exchange_rate = price({asset(1,asset_id_type(1)),asset(1)}); + creator.common_options.core_exchange_rate = price(asset(1,asset_id_type(1)),asset(1)); creator.bitasset_opts = bitasset_options(); + creator.bitasset_opts->short_backing_asset = backing_asset; creator.is_prediction_market = true; trx.operations.push_back(std::move(creator)); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, ~0); trx.operations.clear(); return db.get(ptx.operation_results[0].get()); } FC_CAPTURE_AND_RETHROW( (name)(flags) ) } + const asset_object& database_fixture::create_user_issued_asset( const string& name ) { asset_create_operation creator; @@ -487,26 +568,27 @@ const asset_object& database_fixture::create_user_issued_asset( const string& na creator.symbol = name; creator.common_options.max_supply = 0; creator.precision = 2; - creator.common_options.core_exchange_rate = price({asset(1,asset_id_type(1)),asset(1)}); + creator.common_options.core_exchange_rate = price(asset(1,asset_id_type(1)),asset(1)); creator.common_options.max_supply = GRAPHENE_MAX_SHARE_SUPPLY; creator.common_options.flags = charge_market_fee; creator.common_options.issuer_permissions = charge_market_fee; trx.operations.push_back(std::move(creator)); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, ~0); trx.operations.clear(); return db.get(ptx.operation_results[0].get()); } -const asset_object& database_fixture::create_user_issued_asset( const string& name, const account_object& issuer, uint16_t flags ) +const asset_object& database_fixture::create_user_issued_asset( const string& name, const account_object& issuer, uint16_t flags, + const price& core_exchange_rate, uint16_t precision) { asset_create_operation creator; creator.issuer = issuer.id; creator.fee = asset(); creator.symbol = name; creator.common_options.max_supply = 0; - creator.precision = 2; - creator.common_options.core_exchange_rate = price({asset(1,asset_id_type(1)),asset(1)}); + creator.precision = precision; + creator.common_options.core_exchange_rate = core_exchange_rate; creator.common_options.max_supply = GRAPHENE_MAX_SHARE_SUPPLY; creator.common_options.flags = flags; creator.common_options.issuer_permissions = flags; @@ -514,7 +596,7 @@ const asset_object& database_fixture::create_user_issued_asset( const string& na trx.operations.push_back(std::move(creator)); set_expiration( db, trx ); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, ~0); trx.operations.clear(); return db.get(ptx.operation_results[0].get()); } @@ -527,7 +609,7 @@ void database_fixture::issue_uia( const account_object& recipient, asset amount op.asset_to_issue = amount; op.issue_to_account = recipient.id; trx.operations.push_back(op); - db.push_transaction( trx, ~0 ); + PUSH_TX( db, trx, ~0 ); trx.operations.clear(); } @@ -559,7 +641,7 @@ void database_fixture::change_fees( new_fees.scale = new_scale; chain_parameters new_chain_params = current_chain_params; - new_chain_params.current_fees = new_fees; + new_chain_params.current_fees = std::make_shared(new_fees); db.modify(db.get_global_properties(), [&](global_property_object& p) { p.parameters = new_chain_params; @@ -573,7 +655,7 @@ const account_object& database_fixture::create_account( { trx.operations.push_back(make_account(name, key)); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, ~0); auto& result = db.get(ptx.operation_results[0].get()); trx.operations.clear(); return result; @@ -592,7 +674,7 @@ const account_object& database_fixture::create_account( trx.operations.resize(1); trx.operations.back() = (make_account(name, registrar, referrer, referrer_percent, key)); trx.validate(); - auto r = db.push_transaction(trx, ~0); + auto r = PUSH_TX(db, trx, ~0); const auto& result = db.get(r.operation_results[0].get()); trx.operations.clear(); return result; @@ -624,8 +706,7 @@ const account_object& database_fixture::create_account( trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); - //wdump( (ptx) ); + processed_transaction ptx = PUSH_TX(db, trx, ~0); const account_object& result = db.get(ptx.operation_results[0].get()); trx.operations.clear(); return result; @@ -639,29 +720,47 @@ const committee_member_object& database_fixture::create_committee_member( const op.committee_member_account = owner.id; trx.operations.push_back(op); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, ~0); trx.operations.clear(); return db.get(ptx.operation_results[0].get()); } -const witness_object&database_fixture::create_witness(account_id_type owner, const fc::ecc::private_key& signing_private_key) +const witness_object&database_fixture::create_witness(account_id_type owner, + const fc::ecc::private_key& signing_private_key, + uint32_t skip_flags ) { - return create_witness(owner(db), signing_private_key); + return create_witness(owner(db), signing_private_key, skip_flags ); } const witness_object& database_fixture::create_witness( const account_object& owner, - const fc::ecc::private_key& signing_private_key ) + const fc::ecc::private_key& signing_private_key, + uint32_t skip_flags ) { try { witness_create_operation op; op.witness_account = owner.id; op.block_signing_key = signing_private_key.get_public_key(); trx.operations.push_back(op); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, skip_flags ); trx.clear(); return db.get(ptx.operation_results[0].get()); } FC_CAPTURE_AND_RETHROW() } +const worker_object& database_fixture::create_worker( const account_id_type owner, const share_type daily_pay, const fc::microseconds& duration ) +{ try { + worker_create_operation op; + op.owner = owner; + op.daily_pay = daily_pay; + op.initializer = burn_worker_initializer(); + op.work_begin_date = db.head_block_time(); + op.work_end_date = op.work_begin_date + duration; + trx.operations.push_back(op); + trx.validate(); + processed_transaction ptx = PUSH_TX(db, trx, ~0); + trx.clear(); + return db.get(ptx.operation_results[0].get()); +} FC_CAPTURE_AND_RETHROW() } + uint64_t database_fixture::fund( const account_object& account, const asset& amount /* = asset(500000) */ @@ -681,27 +780,30 @@ digest_type database_fixture::digest( const transaction& tx ) return tx.digest(); } -const limit_order_object*database_fixture::create_sell_order(account_id_type user, const asset& amount, const asset& recv) +const limit_order_object*database_fixture::create_sell_order(account_id_type user, const asset& amount, const asset& recv, + const time_point_sec order_expiration, + const price& fee_core_exchange_rate ) { - auto r = create_sell_order(user(db), amount, recv); + auto r = create_sell_order( user(db), amount, recv, order_expiration, fee_core_exchange_rate ); verify_asset_supplies(db); return r; } -const limit_order_object* database_fixture::create_sell_order( const account_object& user, const asset& amount, const asset& recv ) +const limit_order_object* database_fixture::create_sell_order( const account_object& user, const asset& amount, const asset& recv, + const time_point_sec order_expiration, + const price& fee_core_exchange_rate ) { - //wdump((amount)(recv)); limit_order_create_operation buy_order; buy_order.seller = user.id; buy_order.amount_to_sell = amount; buy_order.min_to_receive = recv; + buy_order.expiration = order_expiration; trx.operations.push_back(buy_order); - for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); + for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op, fee_core_exchange_rate); trx.validate(); - auto processed = db.push_transaction(trx, ~0); + auto processed = PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); - //wdump((processed)); return db.find( processed.operation_results[0].get() ); } @@ -713,7 +815,7 @@ asset database_fixture::cancel_limit_order( const limit_order_object& order ) trx.operations.push_back(cancel_order); for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - auto processed = db.push_transaction(trx, ~0); + auto processed = PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); return processed.operation_results[0].get(); @@ -749,7 +851,7 @@ void database_fixture::transfer( for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); } trx.validate(); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); verify_asset_supplies(db); trx.operations.clear(); } FC_CAPTURE_AND_RETHROW( (from.id)(to.id)(amount)(fee) ) @@ -767,7 +869,7 @@ void database_fixture::update_feed_producers( const asset_object& mia, flat_set< for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); } FC_CAPTURE_AND_RETHROW( (mia)(producers) ) } @@ -787,11 +889,43 @@ void database_fixture::publish_feed( const asset_object& mia, const account_obje for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); } +/*** + * @brief helper method to add a price feed + * + * Adds a price feed for asset2, pushes the transaction, and generates the block + * + * @param fixture the database_fixture + * @param publisher who is publishing the feed + * @param asset1 the base asset + * @param amount1 the amount of the base asset + * @param asset2 the quote asset + * @param amount2 the amount of the quote asset + * @param core_id id of core (helps with core_exchange_rate) + */ +void database_fixture::publish_feed(const account_id_type& publisher, + const asset_id_type& asset1, int64_t amount1, + const asset_id_type& asset2, int64_t amount2, + const asset_id_type& core_id) +{ + const asset_object& a1 = asset1(db); + const asset_object& a2 = asset2(db); + const asset_object& core = core_id(db); + asset_publish_feed_operation op; + op.publisher = publisher; + op.asset_id = asset2; + op.feed.settlement_price = ~price(a1.amount(amount1),a2.amount(amount2)); + op.feed.core_exchange_rate = ~price(core.amount(amount1), a2.amount(amount2)); + trx.operations.push_back(std::move(op)); + PUSH_TX( db, trx, ~0); + generate_block(); + trx.clear(); +} + void database_fixture::force_global_settle( const asset_object& what, const price& p ) { try { set_expiration( db, trx ); @@ -803,7 +937,7 @@ void database_fixture::force_global_settle( const asset_object& what, const pric trx.operations.push_back(sop); for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); } FC_CAPTURE_AND_RETHROW( (what)(p) ) } @@ -818,25 +952,27 @@ operation_result database_fixture::force_settle( const account_object& who, asse trx.operations.push_back(sop); for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - processed_transaction ptx = db.push_transaction(trx, ~0); + processed_transaction ptx = PUSH_TX(db, trx, ~0); const operation_result& op_result = ptx.operation_results.front(); trx.operations.clear(); verify_asset_supplies(db); return op_result; } FC_CAPTURE_AND_RETHROW( (who)(what) ) } -const call_order_object* database_fixture::borrow(const account_object& who, asset what, asset collateral) +const call_order_object* database_fixture::borrow( const account_object& who, asset what, asset collateral, + optional target_cr ) { try { set_expiration( db, trx ); trx.operations.clear(); - call_order_update_operation update; + call_order_update_operation update = {}; update.funding_account = who.id; update.delta_collateral = collateral; update.delta_debt = what; + update.extensions.value.target_collateral_ratio = target_cr; trx.operations.push_back(update); for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); @@ -847,23 +983,40 @@ const call_order_object* database_fixture::borrow(const account_object& who, ass if( itr != call_idx.end() ) call_obj = &*itr; return call_obj; -} FC_CAPTURE_AND_RETHROW( (who.name)(what)(collateral) ) } +} FC_CAPTURE_AND_RETHROW( (who.name)(what)(collateral)(target_cr) ) } -void database_fixture::cover(const account_object& who, asset what, asset collateral) +void database_fixture::cover(const account_object& who, asset what, asset collateral, optional target_cr) { try { set_expiration( db, trx ); trx.operations.clear(); - call_order_update_operation update; + call_order_update_operation update = {}; update.funding_account = who.id; update.delta_collateral = -collateral; update.delta_debt = -what; + update.extensions.value.target_collateral_ratio = target_cr; trx.operations.push_back(update); for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); -} FC_CAPTURE_AND_RETHROW( (who.name)(what)(collateral) ) } +} FC_CAPTURE_AND_RETHROW( (who.name)(what)(collateral)(target_cr) ) } + +void database_fixture::bid_collateral(const account_object& who, const asset& to_bid, const asset& to_cover) +{ try { + set_expiration( db, trx ); + trx.operations.clear(); + bid_collateral_operation bid; + bid.bidder = who.id; + bid.additional_collateral = to_bid; + bid.debt_covered = to_cover; + trx.operations.push_back(bid); + for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); + trx.validate(); + PUSH_TX(db, trx, ~0); + trx.operations.clear(); + verify_asset_supplies(db); +} FC_CAPTURE_AND_RETHROW( (who.name)(to_bid)(to_cover) ) } void database_fixture::fund_fee_pool( const account_object& from, const asset_object& asset_to_fund, const share_type amount ) { @@ -875,7 +1028,8 @@ void database_fixture::fund_fee_pool( const account_object& from, const asset_ob for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - db.push_transaction(trx, ~0); + set_expiration( db, trx ); + PUSH_TX(db, trx, ~0); trx.operations.clear(); verify_asset_supplies(db); } @@ -884,7 +1038,7 @@ void database_fixture::enable_fees() { db.modify(global_property_id_type()(db), [](global_property_object& gpo) { - gpo.parameters.current_fees = fee_schedule::get_default(); + gpo.parameters.current_fees = std::make_shared(fee_schedule::get_default()); }); } @@ -902,7 +1056,7 @@ void database_fixture::upgrade_to_lifetime_member( const account_object& account op.upgrade_to_lifetime_member = true; op.fee = db.get_global_properties().parameters.current_fees->calculate_fee(op); trx.operations = {op}; - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); FC_ASSERT( op.account_to_upgrade(db).is_lifetime_member() ); trx.clear(); verify_asset_supplies(db); @@ -922,7 +1076,7 @@ void database_fixture::upgrade_to_annual_member(const account_object& account) op.account_to_upgrade = account.get_id(); op.fee = db.get_global_properties().parameters.current_fees->calculate_fee(op); trx.operations = {op}; - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); FC_ASSERT( op.account_to_upgrade(db).is_member(db.head_block_time()) ); trx.clear(); verify_asset_supplies(db); @@ -1054,6 +1208,24 @@ vector< operation_history_object > database_fixture::get_operation_history( acco return result; } +vector< graphene::market_history::order_history_object > database_fixture::get_market_order_history( asset_id_type a, asset_id_type b )const +{ + const auto& history_idx = db.get_index_type().indices().get(); + graphene::market_history::history_key hkey; + if( a > b ) std::swap(a,b); + hkey.base = a; + hkey.quote = b; + hkey.sequence = std::numeric_limits::min(); + auto itr = history_idx.lower_bound( hkey ); + vector result; + while( itr != history_idx.end()) + { + result.push_back( *itr ); + ++itr; + } + return result; +} + namespace test { void set_expiration( const database& db, transaction& tx ) @@ -1071,7 +1243,7 @@ bool _push_block( database& db, const signed_block& b, uint32_t skip_flags /* = processed_transaction _push_transaction( database& db, const signed_transaction& tx, uint32_t skip_flags /* = 0 */ ) { try { - auto pt = db.push_transaction( tx, skip_flags ); + auto pt = db.push_transaction( precomputable_transaction(tx), skip_flags ); database_fixture::verify_asset_supplies(db); return pt; } FC_CAPTURE_AND_RETHROW((tx)) } diff --git a/tests/common/database_fixture.hpp b/tests/common/database_fixture.hpp index ec5e9bd7d7..dac219d69e 100644 --- a/tests/common/database_fixture.hpp +++ b/tests/common/database_fixture.hpp @@ -25,10 +25,11 @@ #include #include +#include #include -#include #include +#include #include @@ -105,6 +106,29 @@ extern uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP; #define REQUIRE_OP_VALIDATION_FAILURE( op, field, value ) \ REQUIRE_OP_VALIDATION_FAILURE_2( op, field, value, fc::exception ) +#define REQUIRE_EXCEPTION_WITH_TEXT(op, exc_text) \ +{ \ + try \ + { \ + op; \ + BOOST_FAIL(std::string("Expected an exception with \"") + \ + std::string(exc_text) + \ + std::string("\" but none thrown")); \ + } \ + catch (fc::exception& ex) \ + { \ + std::string what = ex.to_string( \ + fc::log_level(fc::log_level::all)); \ + if (what.find(exc_text) == std::string::npos) \ + { \ + BOOST_FAIL( std::string("Expected \"") + \ + std::string(exc_text) + \ + std::string("\" but got \"") + \ + std::string(what) ); \ + } \ + } \ +} \ + #define REQUIRE_THROW_WITH_VALUE_2(op, field, value, exc_type) \ { \ auto bak = op.field; \ @@ -126,17 +150,18 @@ extern uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP; #define PREP_ACTOR(name) \ fc::ecc::private_key name ## _private_key = generate_private_key(BOOST_PP_STRINGIZE(name)); \ - public_key_type name ## _public_key = name ## _private_key.get_public_key(); + graphene::chain::public_key_type name ## _public_key = name ## _private_key.get_public_key(); \ + BOOST_CHECK( name ## _public_key != public_key_type() ); #define ACTOR(name) \ PREP_ACTOR(name) \ const auto& name = create_account(BOOST_PP_STRINGIZE(name), name ## _public_key); \ - account_id_type name ## _id = name.id; (void)name ## _id; + graphene::chain::account_id_type name ## _id = name.id; (void)name ## _id; #define GET_ACTOR(name) \ fc::ecc::private_key name ## _private_key = generate_private_key(BOOST_PP_STRINGIZE(name)); \ const account_object& name = get_account(BOOST_PP_STRINGIZE(name)); \ - account_id_type name ## _id = name.id; \ + graphene::chain::account_id_type name ## _id = name.id; \ (void)name ##_id #define ACTORS_IMPL(r, data, elem) ACTOR(elem) @@ -144,6 +169,12 @@ extern uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP; namespace graphene { namespace chain { +class clearable_block : public signed_block { +public: + /** @brief Clears internal cached values like ID, signing key, Merkle root etc. */ + void clear(); +}; + struct database_fixture { // the reason we use an app is to exercise the indexes of built-in // plugins @@ -167,7 +198,6 @@ struct database_fixture { static fc::ecc::private_key generate_private_key(string seed); string generate_anon_acct_name(); static void verify_asset_supplies( const database& db ); - void verify_account_history_plugin_index( )const; void open_database(); signed_block generate_block(uint32_t skip = ~0, const fc::ecc::private_key& key = generate_private_key("null_key"), @@ -182,8 +212,9 @@ struct database_fixture { /** * @brief Generates blocks until the head block time matches or exceeds timestamp * @param timestamp target time to generate blocks until + * @return number of blocks generated */ - void generate_blocks(fc::time_point_sec timestamp, bool miss_intermediate_blocks = true, uint32_t skip = ~0); + uint32_t generate_blocks(fc::time_point_sec timestamp, bool miss_intermediate_blocks = true, uint32_t skip = ~0); account_create_operation make_account( const std::string& name = "nathan", @@ -207,28 +238,58 @@ struct database_fixture { void update_feed_producers(const asset_object& mia, flat_set producers); void publish_feed(asset_id_type mia, account_id_type by, const price_feed& f) { publish_feed(mia(db), by(db), f); } + + /*** + * @brief helper method to add a price feed + * + * Adds a price feed for asset2, pushes the transaction, and generates the block + * + * @param publisher who is publishing the feed + * @param asset1 the base asset + * @param amount1 the amount of the base asset + * @param asset2 the quote asset + * @param amount2 the amount of the quote asset + * @param core_id id of core (helps with core_exchange_rate) + */ + void publish_feed(const account_id_type& publisher, + const asset_id_type& asset1, int64_t amount1, + const asset_id_type& asset2, int64_t amount2, + const asset_id_type& core_id); + void publish_feed(const asset_object& mia, const account_object& by, const price_feed& f); - const call_order_object* borrow(account_id_type who, asset what, asset collateral) - { return borrow(who(db), what, collateral); } - const call_order_object* borrow(const account_object& who, asset what, asset collateral); - void cover(account_id_type who, asset what, asset collateral_freed) - { cover(who(db), what, collateral_freed); } - void cover(const account_object& who, asset what, asset collateral_freed); + + const call_order_object* borrow( account_id_type who, asset what, asset collateral, + optional target_cr = {} ) + { return borrow(who(db), what, collateral, target_cr); } + const call_order_object* borrow( const account_object& who, asset what, asset collateral, + optional target_cr = {} ); + void cover(account_id_type who, asset what, asset collateral_freed, + optional target_cr = {} ) + { cover(who(db), what, collateral_freed, target_cr); } + void cover(const account_object& who, asset what, asset collateral_freed, + optional target_cr = {} ); + void bid_collateral(const account_object& who, const asset& to_bid, const asset& to_cover); const asset_object& get_asset( const string& symbol )const; const account_object& get_account( const string& name )const; const asset_object& create_bitasset(const string& name, account_id_type issuer = GRAPHENE_WITNESS_ACCOUNT, uint16_t market_fee_percent = 100 /*1%*/, - uint16_t flags = charge_market_fee); + uint16_t flags = charge_market_fee, + uint16_t precision = 2, + asset_id_type backing_asset = {}); const asset_object& create_prediction_market(const string& name, account_id_type issuer = GRAPHENE_WITNESS_ACCOUNT, uint16_t market_fee_percent = 100 /*1%*/, - uint16_t flags = charge_market_fee); + uint16_t flags = charge_market_fee, + uint16_t precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS, + asset_id_type backing_asset = {}); const asset_object& create_user_issued_asset( const string& name ); const asset_object& create_user_issued_asset( const string& name, const account_object& issuer, - uint16_t flags ); + uint16_t flags, + const price& core_exchange_rate = price(asset(1, asset_id_type(1)), asset(1)), + uint16_t precision = 2 /* traditional precision for tests */); void issue_uia( const account_object& recipient, asset amount ); void issue_uia( account_id_type recipient_id, asset amount ); @@ -255,14 +316,21 @@ struct database_fixture { const committee_member_object& create_committee_member( const account_object& owner ); const witness_object& create_witness(account_id_type owner, - const fc::ecc::private_key& signing_private_key = generate_private_key("null_key")); + const fc::ecc::private_key& signing_private_key = generate_private_key("null_key"), + uint32_t skip_flags = ~0); const witness_object& create_witness(const account_object& owner, - const fc::ecc::private_key& signing_private_key = generate_private_key("null_key")); + const fc::ecc::private_key& signing_private_key = generate_private_key("null_key"), + uint32_t skip_flags = ~0); + const worker_object& create_worker(account_id_type owner, const share_type daily_pay = 1000, const fc::microseconds& duration = fc::days(2)); uint64_t fund( const account_object& account, const asset& amount = asset(500000) ); digest_type digest( const transaction& tx ); void sign( signed_transaction& trx, const fc::ecc::private_key& key ); - const limit_order_object* create_sell_order( account_id_type user, const asset& amount, const asset& recv ); - const limit_order_object* create_sell_order( const account_object& user, const asset& amount, const asset& recv ); + const limit_order_object* create_sell_order( account_id_type user, const asset& amount, const asset& recv, + const time_point_sec order_expiration = time_point_sec::maximum(), + const price& fee_core_exchange_rate = price::unit_price() ); + const limit_order_object* create_sell_order( const account_object& user, const asset& amount, const asset& recv, + const time_point_sec order_expiration = time_point_sec::maximum(), + const price& fee_core_exchange_rate = price::unit_price() ); asset cancel_limit_order( const limit_order_object& order ); void transfer( account_id_type from, account_id_type to, const asset& amount, const asset& fee = asset() ); void transfer( const account_object& from, const account_object& to, const asset& amount, const asset& fee = asset() ); @@ -281,6 +349,7 @@ struct database_fixture { int64_t get_balance( account_id_type account, asset_id_type a )const; int64_t get_balance( const account_object& account, const asset_object& a )const; vector< operation_history_object > get_operation_history( account_id_type account_id )const; + vector< graphene::market_history::order_history_object > get_market_order_history( asset_id_type a, asset_id_type b )const; }; namespace test { diff --git a/tests/common/genesis_file_util.hpp b/tests/common/genesis_file_util.hpp new file mode 100644 index 0000000000..a87d9585af --- /dev/null +++ b/tests/common/genesis_file_util.hpp @@ -0,0 +1,46 @@ +#pragma once + +///////// +/// @brief forward declaration, using as a hack to generate a genesis.json file +/// for testing +///////// +namespace graphene { namespace app { namespace detail { + graphene::chain::genesis_state_type create_example_genesis(); +} } } // graphene::app::detail + +///////// +/// @brief create a genesis_json file +/// @param directory the directory to place the file "genesis.json" +/// @returns the full path to the file +//////// +boost::filesystem::path create_genesis_file(fc::temp_directory& directory) { + boost::filesystem::path genesis_path = boost::filesystem::path{directory.path().generic_string()} / "genesis.json"; + fc::path genesis_out = genesis_path; + graphene::chain::genesis_state_type genesis_state = graphene::app::detail::create_example_genesis(); + + /* Work In Progress: Place some accounts in the Genesis file so as to pre-make some accounts to play with + std::string test_prefix = "test"; + // helper lambda + auto get_test_key = [&]( std::string prefix, uint32_t i ) -> public_key_type + { + return fc::ecc::private_key::regenerate( fc::sha256::hash( test_prefix + prefix + std::to_string(i) ) ).get_public_key(); + }; + + // create 2 accounts to use + for (int i = 1; i <= 2; ++i ) + { + genesis_state_type::initial_account_type dev_account( + test_prefix + std::to_string(i), + get_test_key("owner-", i), + get_test_key("active-", i), + false); + + genesis_state.initial_accounts.push_back(dev_account); + // give her some coin + + } + */ + + fc::json::save_to_file(genesis_state, genesis_out); + return genesis_path; +} diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp new file mode 100644 index 0000000000..33f8b11db9 --- /dev/null +++ b/tests/elasticsearch/main.cpp @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2018 oxarbitrage and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include + +#include + +#include "../common/database_fixture.hpp" + +#define BOOST_TEST_MODULE Elastic Search Database Tests +#include + +using namespace graphene::chain; +using namespace graphene::chain::test; +using namespace graphene::app; + +BOOST_FIXTURE_TEST_SUITE( elasticsearch_tests, database_fixture ) + +BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { + try { + + CURL *curl; // curl handler + curl = curl_easy_init(); + + graphene::utilities::ES es; + es.curl = curl; + es.elasticsearch_url = "http://localhost:9200/"; + es.index_prefix = "bitshares-"; + //es.auth = "elastic:changeme"; + + // delete all first + auto delete_account_history = graphene::utilities::deleteAll(es); + fc::usleep(fc::milliseconds(1000)); // this is because index.refresh_interval, nothing to worry + + if(delete_account_history) { // all records deleted + + //account_id_type() do 3 ops + create_bitasset("USD", account_id_type()); + auto dan = create_account("dan"); + auto bob = create_account("bob"); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + // for later use + //int asset_create_op_id = operation::tag::value; + //int account_create_op_id = operation::tag::value; + + string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; + es.endpoint = es.index_prefix + "*/data/_count"; + es.query = query; + + auto res = graphene::utilities::simpleQuery(es); + variant j = fc::json::from_string(res); + auto total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "5"); + + es.endpoint = es.index_prefix + "*/data/_search"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + auto first_id = j["hits"]["hits"][size_t(0)]["_id"].as_string(); + BOOST_CHECK_EQUAL(first_id, "2.9.1"); // this should be 0? are they inserted in the right order? + + generate_block(); + auto willie = create_account("willie"); + generate_block(); + + fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + + es.endpoint = es.index_prefix + "*/data/_count"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + + total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "7"); + + // do some transfers in 1 block + transfer(account_id_type()(db), bob, asset(100)); + transfer(account_id_type()(db), bob, asset(200)); + transfer(account_id_type()(db), bob, asset(300)); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + + total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "13"); + + // check the visitor data + auto block_date = db.head_block_time(); + std::string index_name = graphene::utilities::generateIndexName(block_date, "bitshares-"); + + es.endpoint = index_name + "/data/2.9.12"; // we know last op is a transfer of amount 300 + res = graphene::utilities::getEndPoint(es); + j = fc::json::from_string(res); + auto last_transfer_amount = j["_source"]["additional_data"]["transfer_data"]["amount"].as_string(); + BOOST_CHECK_EQUAL(last_transfer_amount, "300"); + } + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(elasticsearch_objects) { + try { + + CURL *curl; // curl handler + curl = curl_easy_init(); + + graphene::utilities::ES es; + es.curl = curl; + es.elasticsearch_url = "http://localhost:9200/"; + es.index_prefix = "objects-"; + //es.auth = "elastic:changeme"; + + // delete all first + auto delete_objects = graphene::utilities::deleteAll(es); + + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + if(delete_objects) { // all records deleted + + // asset and bitasset + create_bitasset("USD", account_id_type()); + generate_block(); + fc::usleep(fc::milliseconds(1000)); + + string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; + es.endpoint = es.index_prefix + "*/data/_count"; + es.query = query; + + auto res = graphene::utilities::simpleQuery(es); + variant j = fc::json::from_string(res); + auto total = j["count"].as_string(); + BOOST_CHECK_EQUAL(total, "2"); + + es.endpoint = es.index_prefix + "asset/data/_search"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + auto first_id = j["hits"]["hits"][size_t(0)]["_source"]["symbol"].as_string(); + BOOST_CHECK_EQUAL(first_id, "USD"); + + auto bitasset_data_id = j["hits"]["hits"][size_t(0)]["_source"]["bitasset_data_id"].as_string(); + es.endpoint = es.index_prefix + "bitasset/data/_search"; + es.query = "{ \"query\" : { \"bool\": { \"must\" : [{ \"term\": { \"object_id\": \""+bitasset_data_id+"\"}}] } } }"; + res = graphene::utilities::simpleQuery(es); + j = fc::json::from_string(res); + auto bitasset_object_id = j["hits"]["hits"][size_t(0)]["_source"]["object_id"].as_string(); + BOOST_CHECK_EQUAL(bitasset_object_id, bitasset_data_id); + } + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(elasticsearch_suite) { + try { + + CURL *curl; // curl handler + curl = curl_easy_init(); + + graphene::utilities::ES es; + es.curl = curl; + es.elasticsearch_url = "http://localhost:9200/"; + es.index_prefix = "bitshares-"; + auto delete_account_history = graphene::utilities::deleteAll(es); + fc::usleep(fc::milliseconds(1000)); + es.index_prefix = "objects-"; + auto delete_objects = graphene::utilities::deleteAll(es); + fc::usleep(fc::milliseconds(1000)); + + if(delete_account_history && delete_objects) { // all records deleted + + + } + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/generate_empty_blocks/main.cpp b/tests/generate_empty_blocks/main.cpp index 1b45340d1a..721747eef2 100644 --- a/tests/generate_empty_blocks/main.cpp +++ b/tests/generate_empty_blocks/main.cpp @@ -30,10 +30,8 @@ #include #include #include -#include #include -#include #include #include @@ -102,7 +100,7 @@ int main( int argc, char** argv ) std::cerr << "embed_genesis: Reading genesis from file " << genesis_json_filename.preferred_string() << "\n"; std::string genesis_json; read_file_contents( genesis_json_filename, genesis_json ); - genesis = fc::json::from_string( genesis_json ).as< genesis_state_type >(); + genesis = fc::json::from_string( genesis_json ).as< genesis_state_type >(20); } else genesis = graphene::app::detail::create_example_genesis(); @@ -119,12 +117,11 @@ int main( int argc, char** argv ) uint32_t num_blocks = options["num-blocks"].as(); uint32_t miss_rate = options["miss-rate"].as(); - fc::ecc::private_key init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); fc::ecc::private_key nathan_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan"))); database db; fc::path db_path = data_dir / "db"; - db.open(db_path, [&]() { return genesis; } ); + db.open(db_path, [&]() { return genesis; }, "TEST" ); uint32_t slot = 1; uint32_t missed = 0; diff --git a/tests/intense/api_stress.py b/tests/intense/api_stress.py old mode 100755 new mode 100644 index 3aa1f2d3c8..d454f16cbf --- a/tests/intense/api_stress.py +++ b/tests/intense/api_stress.py @@ -72,4 +72,4 @@ def peek_random_account(): time.sleep(2) for pid in child_procs: - os.kill(pid, signal.SIGKILL) +os.kill(pid, signal.SIGKILL) diff --git a/tests/intense/block_tests.cpp b/tests/intense/block_tests.cpp deleted file mode 100644 index aaabf4e635..0000000000 --- a/tests/intense/block_tests.cpp +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include -#include - -#include - -#include -#include -#include -#include -#include - -#include - -#include "../common/database_fixture.hpp" - -using namespace graphene::chain; - -BOOST_AUTO_TEST_SUITE(block_tests) - -BOOST_FIXTURE_TEST_CASE( update_account_keys, database_fixture ) -{ - try - { - const asset_object& core = asset_id_type()(db); - uint32_t skip_flags = - database::skip_transaction_dupe_check - | database::skip_witness_signature - | database::skip_transaction_signatures - | database::skip_authority_check - ; - - // Sam is the creator of accounts - private_key_type committee_key = init_account_priv_key; - private_key_type sam_key = generate_private_key("sam"); - - // - // A = old key set - // B = new key set - // - // we measure how many times we test following four cases: - // - // A-B B-A - // alice case_count[0] A == B empty empty - // bob case_count[1] A < B empty nonempty - // charlie case_count[2] B < A nonempty empty - // dan case_count[3] A nc B nonempty nonempty - // - // and assert that all four cases were tested at least once - // - account_object sam_account_object = create_account( "sam", sam_key ); - - //Get a sane head block time - generate_block( skip_flags ); - - db.modify(db.get_global_properties(), [](global_property_object& p) { - p.parameters.committee_proposal_review_period = fc::hours(1).to_seconds(); - }); - - transaction tx; - processed_transaction ptx; - - account_object committee_account_object = committee_account(db); - // transfer from committee account to Sam account - transfer(committee_account_object, sam_account_object, core.amount(100000)); - - const int num_keys = 5; - vector< private_key_type > numbered_private_keys; - vector< vector< public_key_type > > numbered_key_id; - numbered_private_keys.reserve( num_keys ); - numbered_key_id.push_back( vector() ); - numbered_key_id.push_back( vector() ); - - for( int i=0; i > possible_key_sched; - const int num_key_sched = (1 << num_keys)-1; - possible_key_sched.reserve( num_key_sched ); - - for( int s=1; s<=num_key_sched; s++ ) - { - vector< int > v; - int i = 0; - v.reserve( num_keys ); - while( v.size() < num_keys ) - { - if( s & (1 << i) ) - v.push_back( i ); - i++; - if( i >= num_keys ) - i = 0; - } - possible_key_sched.push_back( v ); - } - - // we can only undo in blocks - generate_block( skip_flags ); - - std::cout << "update_account_keys: this test will take a few minutes...\n"; - for( int use_addresses=0; use_addresses<2; use_addresses++ ) - { - vector< public_key_type > key_ids = numbered_key_id[ use_addresses ]; - for( int num_owner_keys=1; num_owner_keys<=2; num_owner_keys++ ) - { - for( int num_active_keys=1; num_active_keys<=2; num_active_keys++ ) - { - std::cout << use_addresses << num_owner_keys << num_active_keys << "\n"; - for( const vector< int >& key_sched_before : possible_key_sched ) - { - auto it = key_sched_before.begin(); - vector< const private_key_type* > owner_privkey; - vector< const public_key_type* > owner_keyid; - owner_privkey.reserve( num_owner_keys ); - - trx.clear(); - account_create_operation create_op; - create_op.name = "alice"; - - for( int owner_index=0; owner_index(); - - generate_block( skip_flags ); - for( const vector< int >& key_sched_after : possible_key_sched ) - { - auto it = key_sched_after.begin(); - - trx.clear(); - account_update_operation update_op; - update_op.account = alice_account_id; - update_op.owner = authority(); - update_op.active = authority(); - update_op.new_options = create_op.options; - - for( int owner_index=0; owner_indexkey_auths[ key_ids[ *(it++) ] ] = 1; - // size() < num_owner_keys is possible when some keys are duplicates - update_op.owner->weight_threshold = update_op.owner->key_auths.size(); - for( int active_index=0; active_indexkey_auths[ key_ids[ *(it++) ] ] = 1; - // size() < num_active_keys is possible when some keys are duplicates - update_op.active->weight_threshold = update_op.active->key_auths.size(); - FC_ASSERT( update_op.new_options.valid() ); - update_op.new_options->memo_key = key_ids[ *(it++) ] ; - - trx.operations.push_back( update_op ); - for( int i=0; i> 1; - - vector< witness_id_type > cur_round; - vector< witness_id_type > full_schedule; - // if we make the maximum witness count testable, - // we'll need to enlarge this. - std::bitset< 0x40 > witness_seen; - size_t total_blocks = 1000000; - - cur_round.reserve( num_witnesses ); - full_schedule.reserve( total_blocks ); - cur_round.push_back( db.get_dynamic_global_properties().current_witness ); - - // we assert so the test doesn't continue, which would - // corrupt memory - assert( num_witnesses <= witness_seen.size() ); - - while( full_schedule.size() < total_blocks ) - { - if( (db.head_block_num() & 0x3FFF) == 0 ) - { - wdump( (db.head_block_num()) ); - } - witness_id_type wid = db.get_scheduled_witness( 1 ); - full_schedule.push_back( wid ); - cur_round.push_back( wid ); - if( cur_round.size() == num_witnesses ) - { - // check that the current round contains exactly 1 copy - // of each witness - witness_seen.reset(); - for( const witness_id_type& w : cur_round ) - { - uint64_t inst = w.instance.value; - BOOST_CHECK( !witness_seen.test( inst ) ); - assert( !witness_seen.test( inst ) ); - witness_seen.set( inst ); - } - cur_round.clear(); - } - generate_block(); - } - - for( size_t i=0,m=full_schedule.size(); i`` + + +100k TX/s +--------- + +``tests/performance_test -t performance_tests/one_hundred_k_benchmark`` + +This test will create 200,000 accounts, make two transfers from each account, +then create an asset and issue tokens to each account, for a total of one +million operations. + +Different operation types have different execution times, but on fairly modern +off-the-shelf hardware an average of 100,000 transactions per second should be +achieved. + +Signature verification +---------------------- + +``tests/performance_test -t performance_tests/sigcheck_benchmark`` + +This suite pre-creates 100,000 signatures and then measures how long it takes +to verify them. Results vary depending on CPU type and clockspeed, but should be +somewhere between 5,000 and 20,000 per second. diff --git a/tests/performance/performance_tests.cpp b/tests/performance/performance_tests.cpp index e7d0c98b1f..ae8a94fd77 100644 --- a/tests/performance/performance_tests.cpp +++ b/tests/performance/performance_tests.cpp @@ -24,7 +24,6 @@ #include #include -#include #include #include @@ -33,11 +32,14 @@ #include #include + #include "../common/database_fixture.hpp" +#include +#include using namespace graphene::chain; -//BOOST_FIXTURE_TEST_SUITE( performance_tests, database_fixture ) +BOOST_FIXTURE_TEST_SUITE( performance_tests, database_fixture ) BOOST_AUTO_TEST_CASE( sigcheck_benchmark ) { @@ -45,35 +47,168 @@ BOOST_AUTO_TEST_CASE( sigcheck_benchmark ) auto digest = fc::sha256::hash("hello"); auto sig = nathan_key.sign_compact( digest ); auto start = fc::time_point::now(); - for( uint32_t i = 0; i < 100000; ++i ) - auto pub = fc::ecc::public_key( sig, digest ); + const uint64_t cycles = 100000; + for( uint32_t i = 0; i < cycles; ++i ) + fc::ecc::public_key( sig, digest ); auto end = fc::time_point::now(); auto elapsed = end-start; - wdump( ((100000.0*1000000.0) / elapsed.count()) ); + wlog( "Benchmark: verify ${sps} signatures/s", ("sps",(cycles*1000000)/elapsed.count()) ); } -/* -BOOST_AUTO_TEST_CASE( transfer_benchmark ) -{ - fc::ecc::private_key nathan_key = fc::ecc::private_key::generate(); - const key_object& key = register_key(nathan_key.get_public_key()); + +// See https://bitshares.org/blog/2015/06/08/measuring-performance/ +// (note this is not the original test mentioned in the above post, but was +// recreated later according to the description) +BOOST_AUTO_TEST_CASE( one_hundred_k_benchmark ) +{ try { + ACTORS( (alice) ); + fund( alice, asset(10000000) ); + db._undo_db.disable(); // Blog post mentions replay, this implies no undo + + const fc::ecc::private_key nathan_key = fc::ecc::private_key::generate(); + const fc::ecc::public_key nathan_pub = nathan_key.get_public_key();; const auto& committee_account = account_id_type()(db); - auto start = fc::time_point::now(); - for( uint32_t i = 0; i < 1000*1000; ++i ) + + const uint64_t cycles = 200000; + uint64_t total_time = 0; + uint64_t total_count = 0; + std::vector accounts; + accounts.reserve( cycles+1 ); + std::vector assets; + assets.reserve( cycles ); + + std::vector transactions; + transactions.reserve( cycles ); + { - const auto& a = create_account("a"+fc::to_string(i), key.id); - transfer( committee_account, a, asset(1000) ); + account_create_operation aco; + aco.name = "a1"; + aco.registrar = committee_account.id; + aco.owner = authority( 1, public_key_type(nathan_pub), 1 ); + aco.active = authority( 1, public_key_type(nathan_pub), 1 ); + aco.options.memo_key = nathan_pub; + aco.options.voting_account = GRAPHENE_PROXY_TO_SELF_ACCOUNT; + aco.options.num_committee = 0; + aco.options.num_witness = 0; + aco.fee = db.current_fee_schedule().calculate_fee( aco ); + trx.clear(); + test::set_expiration( db, trx ); + for( uint32_t i = 0; i < cycles; ++i ) + { + aco.name = "a" + fc::to_string(i); + trx.operations.push_back( aco ); + transactions.push_back( trx ); + trx.operations.clear(); + ++total_count; + } + + auto start = fc::time_point::now(); + for( uint32_t i = 0; i < cycles; ++i ) + { + auto result = db.apply_transaction( transactions[i], ~0 ); + accounts[i] = result.operation_results[0].get(); + } + auto end = fc::time_point::now(); + auto elapsed = end - start; + total_time += elapsed.count(); + wlog( "Create ${aps} accounts/s over ${total}ms", + ("aps",(cycles*1000000)/elapsed.count())("total",elapsed.count()/1000) ); } - auto end = fc::time_point::now(); - auto elapsed = end - start; - wdump( (elapsed) ); -} -*/ -//BOOST_AUTO_TEST_SUITE_END() + { + accounts[cycles] = accounts[0]; + transfer_operation to1; + to1.from = committee_account.id; + to1.amount = asset( 1000000 ); + to1.fee = asset( 10 ); + transfer_operation to2; + to2.amount = asset( 100 ); + to2.fee = asset( 10 ); + for( uint32_t i = 0; i < cycles; ++i ) + { + to1.to = accounts[i]; + to2.from = accounts[i]; + to2.to = accounts[i+1]; + trx.operations.push_back( to1 ); + ++total_count; + trx.operations.push_back( to2 ); + ++total_count; + transactions[i] = trx; + trx.operations.clear(); + } + + auto start = fc::time_point::now(); + for( uint32_t i = 0; i < cycles; ++i ) + db.apply_transaction( transactions[i], ~0 ); + auto end = fc::time_point::now(); + auto elapsed = end - start; + total_time += elapsed.count(); + wlog( "${aps} transfers/s over ${total}ms", + ("aps",(2*cycles*1000000)/elapsed.count())("total",elapsed.count()/1000) ); + trx.clear(); + } + + { + asset_create_operation aco; + aco.fee = asset( 100000 ); + aco.common_options.core_exchange_rate = asset( 1 ) / asset( 1, asset_id_type(1) ); + for( uint32_t i = 0; i < cycles; ++i ) + { + aco.issuer = accounts[i]; + aco.symbol = "ASSET" + fc::to_string( i ); + trx.operations.push_back( aco ); + ++total_count; + transactions[i] = trx; + trx.operations.clear(); + } + + auto start = fc::time_point::now(); + for( uint32_t i = 0; i < cycles; ++i ) + { + auto result = db.apply_transaction( transactions[i], ~0 ); + assets[i] = result.operation_results[0].get(); + } + auto end = fc::time_point::now(); + auto elapsed = end - start; + total_time += elapsed.count(); + wlog( "${aps} asset create/s over ${total}ms", + ("aps",(cycles*1000000)/elapsed.count())("total",elapsed.count()/1000) ); + trx.clear(); + } + + { + asset_issue_operation aio; + aio.fee = asset( 10 ); + for( uint32_t i = 0; i < cycles; ++i ) + { + aio.issuer = accounts[i]; + aio.issue_to_account = accounts[i+1]; + aio.asset_to_issue = asset( 10, assets[i] ); + trx.operations.push_back( aio ); + ++total_count; + transactions[i] = trx; + trx.operations.clear(); + } + + auto start = fc::time_point::now(); + for( uint32_t i = 0; i < cycles; ++i ) + db.apply_transaction( transactions[i], ~0 ); + auto end = fc::time_point::now(); + auto elapsed = end - start; + total_time += elapsed.count(); + wlog( "${aps} issuances/s over ${total}ms", + ("aps",(cycles*1000000)/elapsed.count())("total",elapsed.count()/1000) ); + trx.clear(); + } + + wlog( "${total} operations in ${total_time}ms => ${avg} ops/s on average", + ("total",total_count)("total_time",total_time/1000) + ("avg",(total_count*1000000)/total_time) ); + + db._undo_db.enable(); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_SUITE_END() -//#define BOOST_TEST_MODULE "C++ Unit Tests for Graphene Blockchain Database" -#include -#include #include boost::unit_test::test_suite* init_unit_test_suite(int argc, char* argv[]) { diff --git a/tests/slow_tests/call_order_tests.cpp b/tests/slow_tests/call_order_tests.cpp new file mode 100644 index 0000000000..2095de0ac1 --- /dev/null +++ b/tests/slow_tests/call_order_tests.cpp @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2018 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE( call_order_tests, database_fixture ) + +BOOST_AUTO_TEST_CASE( call_order_object_test ) +{ try { + // assume GRAPHENE_COLLATERAL_RATIO_DENOM is 1000 in this test case + BOOST_REQUIRE_EQUAL( 1000, GRAPHENE_COLLATERAL_RATIO_DENOM ); + + // function to create a new call_order_object + auto new_call_obj = []( const share_type c, const share_type d, int16_t mcr, optional tcr = {} ) { + call_order_object o; + o.collateral = c; + o.debt = d; + o.call_price = price::call_price( asset( d, asset_id_type(1)), asset(c) , mcr ); + o.target_collateral_ratio = tcr; + return o; + }; + + // function to validate result of call_order_object::get_max_debt_to_cover(...) + auto validate_result = []( const call_order_object& o, const price& match_price, const price& feed_price, + int16_t mcr, const share_type result, bool print_log = true ) { + if( result == 0 ) + return 1; + + BOOST_REQUIRE_GT( result.value, 0 ); + BOOST_REQUIRE_LE( result.value, o.debt.value ); + + BOOST_REQUIRE( match_price.base.asset_id == o.collateral_type() ); + BOOST_REQUIRE( match_price.quote.asset_id == o.debt_type() ); + BOOST_REQUIRE( feed_price.base.asset_id == o.collateral_type() ); + BOOST_REQUIRE( feed_price.quote.asset_id == o.debt_type() ); + + // should be in margin call territory + price call_price = price::call_price( o.get_debt(), o.get_collateral(), mcr ); + BOOST_CHECK( call_price <= feed_price ); + + if( !o.target_collateral_ratio.valid() ) + { + BOOST_CHECK_EQUAL( result.value, o.debt.value ); + return 2; + } + + auto tcr = *o.target_collateral_ratio; + if( tcr == 0 ) + tcr = 1; + + asset to_cover( result, o.debt_type() ); + asset to_pay = o.get_collateral(); + if( result < o.debt ) + { + to_pay = to_cover.multiply_and_round_up( match_price ); + BOOST_CHECK_LT( to_pay.amount.value, o.collateral.value ); // should cover more on black swan event + BOOST_CHECK_EQUAL( result.value, (to_pay * match_price).amount.value ); // should not change after rounded down debt to cover + + // should have target_cr set + // after sold some collateral, the collateral ratio will be higher than expected + price new_tcr_call_price = price::call_price( o.get_debt() - to_cover, o.get_collateral() - to_pay, tcr ); + price new_mcr_call_price = price::call_price( o.get_debt() - to_cover, o.get_collateral() - to_pay, mcr ); + BOOST_CHECK( new_tcr_call_price > feed_price ); + BOOST_CHECK( new_mcr_call_price > feed_price ); + } + + // if sell less than calculated, the collateral ratio will not be higher than expected + int j = 3; + for( int i = 100000; i >= 10; i /= 10, ++j ) + { + int total_passes = 3; + for( int k = 1; k <= total_passes; ++k ) + { + bool last_check = (k == total_passes); + asset sell_less = to_pay; + asset cover_less; + for( int m = 0; m < k; ++m ) + { + if( i == 100000 ) + sell_less.amount -= 1; + else + sell_less.amount -= ( ( sell_less.amount + i - 1 ) / i ); + cover_less = sell_less * match_price; // round down debt to cover + if( cover_less >= to_cover ) + { + cover_less.amount = to_cover.amount - 1; + sell_less = cover_less * match_price; // round down collateral + cover_less = sell_less * match_price; // round down debt to cover + } + sell_less = cover_less.multiply_and_round_up( match_price ); // round up to get collateral to sell + if( sell_less.amount <= 0 || cover_less.amount <= 0 ) // unable to sell or cover less, we return + { + if( to_pay.amount == o.collateral ) + return j; + return (j + 10); + } + } + BOOST_REQUIRE_LT( cover_less.amount.value, o.debt.value ); + BOOST_REQUIRE_LT( sell_less.amount.value, o.collateral.value ); + price tmp_tcr_call_price = price::call_price( o.get_debt() - cover_less, o.get_collateral() - sell_less, tcr ); + price tmp_mcr_call_price = price::call_price( o.get_debt() - cover_less, o.get_collateral() - sell_less, mcr ); + bool cover_less_is_enough = ( tmp_tcr_call_price > feed_price && tmp_mcr_call_price > feed_price ); + if( !cover_less_is_enough ) + { + if( !last_check ) + continue; + if( to_pay.amount == o.collateral ) + return j; + return (j + 10); + } + if( print_log ) + { + print_log = false; + wlog( "Impefect result >= 1 / ${i}", ("i",i) ); + wdump( (o)(match_price)(feed_price)(mcr)(result)(sell_less)(cover_less)(tmp_mcr_call_price)(tmp_tcr_call_price) ); + } + break; + } + } + if( to_pay.amount == o.collateral ) + return j; + return (j + 10); + }; + + // init + int16_t mcr = 1750; + price mp, fp; + call_order_object obj; + int64_t expected; + share_type result; + + mp = price( asset(1100), asset(1000, asset_id_type(1)) ); // match_price + fp = price( asset(1000), asset(1000, asset_id_type(1)) ); // feed_price + + // fixed tests + obj = new_call_obj( 1751, 1000, mcr ); // order is not in margin call territory + expected = 0; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + obj = new_call_obj( 1751, 1000, mcr, 10000 ); // order is not in margin call territory + expected = 0; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + obj = new_call_obj( 160, 100, mcr ); // target_cr is not set + expected = 100; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + obj = new_call_obj( 1009, 1000, mcr, 200 ); // target_cr set, but order is in black swan territory + expected = 1000; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + obj = new_call_obj( 1499, 999, mcr, 1600 ); // target_cr is 160%, less than 175%, so use 175% + expected = 385; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + obj = new_call_obj( 1500, 1000, mcr, 1800 ); // target_cr is 180% + expected = 429; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + obj = new_call_obj( 1501, 1001, mcr, 2000 ); // target_cr is 200% + expected = 558; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + obj = new_call_obj( 1502, 1002, mcr, 3000 ); // target_cr is 300% + expected = 793; + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + BOOST_CHECK_EQUAL( result.value, expected ); + validate_result( obj, mp, fp, mcr, result ); + + mcr = 1750; + mp = price( asset(40009), asset(79070, asset_id_type(1)) ); // match_price + fp = price( asset(40009), asset(86977, asset_id_type(1)) ); // feed_price + + obj = new_call_obj( 557197, 701502, mcr, 1700 ); // target_cr is less than mcr + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + validate_result( obj, mp, fp, mcr, result ); + + mcr = 1455; + mp = price( asset(1150171), asset(985450, asset_id_type(1)) ); // match_price + fp = price( asset(418244), asset(394180, asset_id_type(1)) ); // feed_price + + obj = new_call_obj( 423536, 302688, mcr, 200 ); // target_cr is less than mcr + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + validate_result( obj, mp, fp, mcr, result ); + + // random tests + std::mt19937_64 gen( time(NULL) ); + std::uniform_int_distribution amt_uid(1, GRAPHENE_MAX_SHARE_SUPPLY); + std::uniform_int_distribution amt_uid2(1, 1000*1000*1000); + std::uniform_int_distribution amt_uid3(1, 1000*1000); + std::uniform_int_distribution amt_uid4(1, 300); + std::uniform_int_distribution mp_num_uid(800, 1100); + std::uniform_int_distribution mcr_uid(1001, 32767); + std::uniform_int_distribution mcr_uid2(1001, 3000); + std::uniform_int_distribution tcr_uid(0, 65535); + std::uniform_int_distribution tcr_uid2(0, 3000); + + vector count(20,0); + int total = 500*1000; + for( int i = total; i > 0; --i ) + { + if( i % 9 == 0 ) + mcr = 1002; + else if( i % 3 == 0 ) + mcr = 1750; + else if( i % 3 == 1 ) + mcr = mcr_uid(gen); + else // if( i % 3 == 2 ) + mcr = mcr_uid2(gen); + + // call_object + if( i % 17 <= 0 ) + obj = new_call_obj( amt_uid(gen), amt_uid(gen), mcr, tcr_uid(gen) ); + else if( i % 17 <= 2 ) + obj = new_call_obj( amt_uid2(gen), amt_uid2(gen), mcr, tcr_uid(gen) ); + else if( i % 17 <= 3 ) + obj = new_call_obj( amt_uid3(gen), amt_uid3(gen), mcr, tcr_uid(gen) ); + else if( i % 17 <= 4 ) + obj = new_call_obj( amt_uid4(gen), amt_uid4(gen), mcr, tcr_uid(gen) ); + else if( i % 17 <= 5 ) + obj = new_call_obj( amt_uid(gen), amt_uid(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 7 ) + obj = new_call_obj( amt_uid2(gen), amt_uid2(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 8 ) + obj = new_call_obj( amt_uid3(gen), amt_uid3(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 9 ) + obj = new_call_obj( amt_uid4(gen), amt_uid4(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 11 ) + obj = new_call_obj( amt_uid3(gen), amt_uid2(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 12 ) + obj = new_call_obj( amt_uid2(gen), amt_uid3(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 13 ) + obj = new_call_obj( amt_uid4(gen), amt_uid2(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 14 ) + obj = new_call_obj( amt_uid2(gen), amt_uid4(gen), mcr, tcr_uid2(gen) ); + else if( i % 17 <= 15 ) + obj = new_call_obj( amt_uid3(gen), amt_uid4(gen), mcr, tcr_uid2(gen) ); + else // if( i % 17 <= 16 ) + obj = new_call_obj( amt_uid4(gen), amt_uid3(gen), mcr, tcr_uid2(gen) ); + + // call_price + price cp = price::call_price( obj.get_debt(), obj.get_collateral(), mcr ); + + // get feed_price, and make sure we have sufficient good samples + int retry = 20; + do { + if( i % 5 == 0 ) + fp = price( asset(amt_uid(gen)), asset(amt_uid(gen), asset_id_type(1)) ); + else if( i % 5 == 1 ) + fp = price( asset(amt_uid2(gen)), asset(amt_uid2(gen), asset_id_type(1)) ); + else if( i % 5 == 2 ) + fp = price( asset(amt_uid3(gen)), asset(amt_uid3(gen), asset_id_type(1)) ); + else if( i % 25 <= 18 ) + fp = price( asset(amt_uid4(gen)), asset(amt_uid4(gen), asset_id_type(1)) ); + else if( i % 25 == 19 ) + fp = price( asset(amt_uid2(gen)), asset(amt_uid3(gen), asset_id_type(1)) ); + else if( i % 25 == 20 ) + fp = price( asset(amt_uid3(gen)), asset(amt_uid2(gen), asset_id_type(1)) ); + else if( i % 25 == 21 ) + fp = price( asset(amt_uid3(gen)), asset(amt_uid4(gen), asset_id_type(1)) ); + else if( i % 25 == 22 ) + fp = price( asset(amt_uid4(gen)), asset(amt_uid3(gen), asset_id_type(1)) ); + else if( i % 25 == 23 ) + fp = price( asset(amt_uid4(gen)), asset(amt_uid2(gen), asset_id_type(1)) ); + else // if( i % 25 == 24 ) + fp = price( asset(amt_uid2(gen)), asset(amt_uid4(gen), asset_id_type(1)) ); + --retry; + } while( retry > 0 && ( cp > fp || cp < ( fp / ratio_type( mcr, 1000 ) ) ) ); + + // match_price + if( i % 16 == 0 ) + mp = fp * ratio_type( 1001, 1000 ); + else if( i % 4 == 0 ) + mp = fp * ratio_type( 1100, 1000 ); + else if( i % 4 == 1 ) + mp = fp * ratio_type( mp_num_uid(gen) , 1000 ); + else if( i % 8 == 4 ) + mp = price( asset(amt_uid2(gen)), asset(amt_uid3(gen), asset_id_type(1)) ); + else if( i % 8 == 5 ) + mp = price( asset(amt_uid3(gen)), asset(amt_uid2(gen), asset_id_type(1)) ); + else if( i % 8 == 6 ) + mp = price( asset(amt_uid2(gen)), asset(amt_uid2(gen), asset_id_type(1)) ); + else // if( i % 8 == 7 ) + mp = price( asset(amt_uid(gen)), asset(amt_uid(gen), asset_id_type(1)) ); + + try { + result = obj.get_max_debt_to_cover( mp, fp, mcr ); + auto vr = validate_result( obj, mp, fp, mcr, result, false ); + ++count[vr]; + } + catch( fc::assert_exception& e ) + { + BOOST_CHECK( e.to_detail_string().find( "result <= GRAPHENE_MAX_SHARE_SUPPLY" ) != string::npos ); + ++count[0]; + } + } + ilog( "count: [bad_input,sell zero,not set," + " sell full (perfect), sell full (<0.01%), sell full (<0.1%),sell full (<1%), sell full (other), ...," + " sell some (perfect), sell some (<0.01%), sell some (<0.1%),sell some (<1%), sell some (other), ... ]" ); + idump( (total)(count) ); + +} FC_CAPTURE_LOG_AND_RETHROW( (0) ) } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/intense/main.cpp b/tests/slow_tests/main.cpp similarity index 79% rename from tests/intense/main.cpp rename to tests/slow_tests/main.cpp index c337407fbe..405e7c1059 100644 --- a/tests/intense/main.cpp +++ b/tests/slow_tests/main.cpp @@ -25,8 +25,16 @@ #include #include +extern uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP; + boost::unit_test::test_suite* init_unit_test_suite(int argc, char* argv[]) { std::srand(time(NULL)); std::cout << "Random number generator seeded to " << time(NULL) << std::endl; + const char* genesis_timestamp_str = getenv("GRAPHENE_TESTING_GENESIS_TIMESTAMP"); + if( genesis_timestamp_str != nullptr ) + { + GRAPHENE_TESTING_GENESIS_TIMESTAMP = std::stoul( genesis_timestamp_str ); + } + std::cout << "GRAPHENE_TESTING_GENESIS_TIMESTAMP is " << GRAPHENE_TESTING_GENESIS_TIMESTAMP << std::endl; return nullptr; } diff --git a/tests/tests/app_util_tests.cpp b/tests/tests/app_util_tests.cpp new file mode 100644 index 0000000000..b85c9a3e8d --- /dev/null +++ b/tests/tests/app_util_tests.cpp @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2018 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; +using namespace graphene::app; + +BOOST_AUTO_TEST_SUITE(app_util_tests) + +BOOST_AUTO_TEST_CASE(uint128_amount_to_string_test) { + + fc::uint128 max_u64( std::numeric_limits::max() ); + fc::uint128 min_gt_u64 = max_u64 + 1; + fc::uint128 one_u128 = max_u64 * 10; + fc::uint128 max_u128 = fc::uint128::max_value(); + //idump( ( uint128_amount_to_string( fc::uint128::max_value(), 0) ) ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 0), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 0), "1" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 0), "100" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 0), "1024000" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 0), "1234567890" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 0), "18446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 0), "18446744073709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 0), "184467440737095516150" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 0), "340282366920938463463374607431768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 1), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 1), "0.1" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 1), "10" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 1), "102400" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 1), "123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 1), "1844674407370955161.5" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 1), "1844674407370955161.6" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 1), "18446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 1), "34028236692093846346337460743176821145.5" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 2), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 2), "0.01" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 2), "1" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 2), "10240" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 2), "12345678.9" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 2), "184467440737095516.15" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 2), "184467440737095516.16" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 2), "1844674407370955161.5" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 2), "3402823669209384634633746074317682114.55" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 3), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 3), "0.001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 3), "0.1" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 3), "1024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 3), "1234567.89" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 3), "18446744073709551.615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 3), "18446744073709551.616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 3), "184467440737095516.15" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 3), "340282366920938463463374607431768211.455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 4), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 4), "0.0001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 4), "0.01" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 4), "102.4" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 4), "123456.789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 4), "1844674407370955.1615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 4), "1844674407370955.1616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 4), "18446744073709551.615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 4), "34028236692093846346337460743176821.1455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 9), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 9), "0.000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 9), "0.0000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 9), "0.001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 9), "1.23456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 9), "18446744073.709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 9), "18446744073.709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 9), "184467440737.09551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 9), "340282366920938463463374607431.768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 10), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 10), "0.0000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 10), "0.00000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 10), "0.0001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 10), "0.123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 10), "1844674407.3709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 10), "1844674407.3709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 10), "18446744073.709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 10), "34028236692093846346337460743.1768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 19), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 19), "0.0000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 19), "0.00000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 19), "0.0000000000001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 19), "0.000000000123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 19), "1.8446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 19), "1.8446744073709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 19), "18.446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 19), "34028236692093846346.3374607431768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 20), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 20), "0.00000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 20), "0.000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 20), "0.00000000000001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 20), "0.0000000000123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 20), "0.18446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 20), "0.18446744073709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 20), "1.8446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 20), "3402823669209384634.63374607431768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 21), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 21), "0.000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 21), "0.0000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 21), "0.000000000000001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 21), "0.00000000000123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 21), "0.018446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 21), "0.018446744073709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 21), "0.18446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 21), "340282366920938463.463374607431768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 38), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 38), "0.00000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 38), "0.000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 38), "0.00000000000000000000000000000001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 38), "0.0000000000000000000000000000123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 38), "0.00000000000000000018446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 38), "0.00000000000000000018446744073709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 38), "0.0000000000000000018446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 38), "3.40282366920938463463374607431768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 39), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 39), "0.000000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 39), "0.0000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 39), "0.000000000000000000000000000000001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 39), "0.00000000000000000000000000000123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 39), "0.000000000000000000018446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 39), "0.000000000000000000018446744073709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 39), "0.00000000000000000018446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 39), "0.340282366920938463463374607431768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 40), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 40), "0.0000000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 40), "0.00000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1024000, 40), "0.0000000000000000000000000000000001024" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1234567890, 40), "0.000000000000000000000000000000123456789" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u64, 40), "0.0000000000000000000018446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( min_gt_u64, 40), "0.0000000000000000000018446744073709551616" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( one_u128, 40), "0.000000000000000000018446744073709551615" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 40), "0.0340282366920938463463374607431768211455" ); + + BOOST_CHECK_EQUAL( uint128_amount_to_string( 0, 127), "0" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 1, 127), "0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( 100, 127), "0.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001" ); + BOOST_CHECK_EQUAL( uint128_amount_to_string( max_u128, 127), "0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000340282366920938463463374607431768211455" ); + +} + +BOOST_AUTO_TEST_CASE(price_to_string_throws) { + + int64_t m = std::numeric_limits::max(); + int64_t n = -1; + int64_t x = m / 10000; + int64_t y = m / 2; + int64_t z = m - 1; + + int64_t a[11] = {n,0,1,2,3,10,200,x,y,z,m}; + price p[11][11]; + for( int i = 0; i < 11; ++i ) + for( int j = 0; j < 11; ++j ) + p[i][j] = price( asset( a[i] ), asset( a[j] ) ); + + for( int i = 0; i < 11; ++i ) + { + for( int j = 0; j < 11; ++j ) + { + price pr = p[i][j]; + if( i == 0 ) + { + GRAPHENE_REQUIRE_THROW( price_to_string( p[i][j], 0, 0 ), fc::exception ); + } + else if( i == 1 ) + { + BOOST_CHECK_EQUAL( price_to_string( p[i][j], 0, 0 ), "0" ); + BOOST_CHECK_EQUAL( price_to_string( p[i][j], 0, 19 ), "0" ); + BOOST_CHECK_EQUAL( price_to_string( p[i][j], 19, 0 ), "0" ); + BOOST_CHECK_EQUAL( price_to_string( p[i][j], 19, 19 ), "0" ); + BOOST_CHECK_EQUAL( price_to_string( p[i][j], 20, 20 ), "0" ); + } + else + { + GRAPHENE_REQUIRE_THROW( price_to_string( p[i][j], 20, 0 ), fc::exception ); + GRAPHENE_REQUIRE_THROW( price_to_string( p[i][j], 0, 20 ), fc::exception ); + } + try { + if ( pr.base.amount == 0 || (pr.base.amount > 0 && pr.quote.amount >= 0 ) ) + { + // idump( (i) (j) (pr) ); // for debugging + // These should not throw + // TODO: Verify results + BOOST_CHECK( !price_to_string( pr ,0,0).empty() ); + BOOST_CHECK( !price_to_string( pr ,0,1).empty() ); + BOOST_CHECK( !price_to_string( pr ,0,2).empty() ); + BOOST_CHECK( !price_to_string( pr ,0,8).empty() ); + BOOST_CHECK( !price_to_string( pr ,0,19).empty() ); + BOOST_CHECK( !price_to_string( pr ,1,0).empty() ); + BOOST_CHECK( !price_to_string( pr ,1,15).empty() ); + BOOST_CHECK( !price_to_string( pr ,2,6).empty() ); + BOOST_CHECK( !price_to_string( pr ,2,10).empty() ); + BOOST_CHECK( !price_to_string( pr ,5,0).empty() ); + BOOST_CHECK( !price_to_string( pr ,9,1).empty() ); + BOOST_CHECK( !price_to_string( pr ,9,9).empty() ); + BOOST_CHECK( !price_to_string( pr ,9,19).empty() ); + BOOST_CHECK( !price_to_string( pr ,18,10).empty() ); + BOOST_CHECK( !price_to_string( pr ,18,13).empty() ); + BOOST_CHECK( !price_to_string( pr ,18,19).empty() ); + BOOST_CHECK( !price_to_string( pr ,19,0).empty() ); + BOOST_CHECK( !price_to_string( pr ,19,7).empty() ); + BOOST_CHECK( !price_to_string( pr ,19,19).empty() ); + price new_price = p[j][i]; + if (pr.quote.amount >= 0) + BOOST_CHECK( !price_diff_percent_string( pr, new_price ).empty() ); + else + GRAPHENE_REQUIRE_THROW( price_diff_percent_string( pr, new_price ), fc::exception ); + } else { + GRAPHENE_REQUIRE_THROW( price_to_string( pr, 0, 0 ), fc::exception ); + } + } catch(fc::exception& fcx) { + BOOST_FAIL( "FC Exception logging price_to_string: " + fcx.to_detail_string() ); + } catch(...) { + BOOST_FAIL( "Uncaught exception in price_to_string. i=" + std::to_string(i) + " j=" + std::to_string(j)); + } + } + } +} + +/** + * Verify that price_to_string comes back with the correct results. Put edge cases here. + */ +BOOST_AUTO_TEST_CASE(price_to_string_verify) +{ + try + { + BOOST_CHECK_EQUAL( price_to_string( price{ asset(1), asset(1) }, 0, 0 ), "1" ); + BOOST_CHECK_EQUAL( price_to_string( price{ asset(10), asset(10) }, 0, 0), "1" ); + int64_t mx = std::numeric_limits::max(); + BOOST_CHECK_EQUAL( price_to_string( price{ asset(mx), asset(mx) }, 0, 0), "1" ); + BOOST_CHECK_EQUAL( price_to_string( price{ asset(1), asset(mx) }, 0, 0), "0.0000000000000000001" ); + BOOST_CHECK_EQUAL( price_to_string( price{ asset(mx), asset(1) }, 0, 0), "9223372036854775807" ); + } + catch (fc::exception& fx) + { + BOOST_FAIL( "FC Exception: " + fx.to_detail_string() ); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/asset_api_tests.cpp b/tests/tests/asset_api_tests.cpp new file mode 100644 index 0000000000..7536b3529a --- /dev/null +++ b/tests/tests/asset_api_tests.cpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; +using namespace graphene::app; + +BOOST_FIXTURE_TEST_SUITE(asset_api_tests, database_fixture) + +BOOST_AUTO_TEST_CASE( asset_holders ) +{ + graphene::app::asset_api asset_api(app); + + // create an asset and some accounts + create_bitasset("USD", account_id_type()); + auto dan = create_account("dan"); + auto bob = create_account("bob"); + auto alice = create_account("alice"); + + // send them some bts + transfer(account_id_type()(db), dan, asset(100)); + transfer(account_id_type()(db), alice, asset(200)); + transfer(account_id_type()(db), bob, asset(300)); + + // make call + vector holders = asset_api.get_asset_holders( std::string( static_cast(asset_id_type())), 0, 100); + BOOST_CHECK_EQUAL(holders.size(), 4u); + + // by now we can guarantee the order + BOOST_CHECK(holders[0].name == "committee-account"); + BOOST_CHECK(holders[1].name == "bob"); + BOOST_CHECK(holders[2].name == "alice"); + BOOST_CHECK(holders[3].name == "dan"); +} +BOOST_AUTO_TEST_CASE( api_limit_get_asset_holders ) +{ + graphene::app::asset_api asset_api(app); + + // create an asset and some accounts + create_bitasset("USD", account_id_type()); + auto dan = create_account("dan"); + auto bob = create_account("bob"); + auto alice = create_account("alice"); + + // send them some bts + transfer(account_id_type()(db), dan, asset(100)); + transfer(account_id_type()(db), alice, asset(200)); + transfer(account_id_type()(db), bob, asset(300)); + + // make call + GRAPHENE_CHECK_THROW(asset_api.get_asset_holders(std::string( static_cast(asset_id_type())), 0, 260), fc::exception); + vector holders = asset_api.get_asset_holders(std::string( static_cast(asset_id_type())), 0, 210); + BOOST_REQUIRE_EQUAL( holders.size(), 4u ); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/asset_tests.cpp b/tests/tests/asset_tests.cpp new file mode 100644 index 0000000000..69f681b782 --- /dev/null +++ b/tests/tests/asset_tests.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018 Bitshares Foundation, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include + +BOOST_AUTO_TEST_SUITE(asset_tests) + +BOOST_AUTO_TEST_CASE( asset_to_from_string ) +{ + std::string positive_results[19]; + positive_results[0] = "12345"; + positive_results[1] = "1234.5"; + positive_results[2] = "123.45"; + positive_results[3] = "12.345"; + positive_results[4] = "1.2345"; + positive_results[5] = "0.12345"; + positive_results[6] = "0.012345"; + positive_results[7] = "0.0012345"; + positive_results[8] = "0.00012345"; + positive_results[9] = "0.000012345"; + positive_results[10] = "0.0000012345"; + positive_results[11] = "0.00000012345"; + positive_results[12] = "0.000000012345"; + positive_results[13] = "0.0000000012345"; + positive_results[14] = "0.00000000012345"; + positive_results[15] = "0.000000000012345"; + positive_results[16] = "0.0000000000012345"; + positive_results[17] = "0.00000000000012345"; + positive_results[18] = "0.000000000000012345"; + std::string negative_results[19]; + for(int i = 0; i < 19; ++i) + { + negative_results[i] = "-" + positive_results[i]; + } + graphene::chain::asset_object test_obj; + graphene::chain::share_type amt12345 = 12345; + BOOST_TEST_MESSAGE( "Testing positive numbers" ); + for (int i = 0; i < 19; i++) + { + test_obj.precision = i; + BOOST_CHECK_EQUAL(positive_results[i], test_obj.amount_to_string(amt12345)); + } + BOOST_TEST_MESSAGE( "Testing negative numbers" ); + for (int i = 0; i < 19; i++) + { + test_obj.precision = i; + BOOST_CHECK_EQUAL(negative_results[i], test_obj.amount_to_string(amt12345 * -1)); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/authority_tests.cpp b/tests/tests/authority_tests.cpp index c46e698fe2..7b571fe889 100644 --- a/tests/tests/authority_tests.cpp +++ b/tests/tests/authority_tests.cpp @@ -25,13 +25,13 @@ #include #include -#include #include #include #include #include #include +#include #include @@ -59,7 +59,7 @@ BOOST_AUTO_TEST_CASE( simple_single_signature ) sign(trx, nathan_key); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 500); + BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast(old_balance - 500)); } catch (fc::exception& e) { edump((e.to_detail_string())); throw; @@ -84,8 +84,7 @@ BOOST_AUTO_TEST_CASE( any_two_of_three ) trx.operations.push_back(op); sign(trx, nathan_key1); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - trx.operations.clear(); - trx.signatures.clear(); + trx.clear(); } FC_CAPTURE_AND_RETHROW ((nathan.active)) transfer_operation op; @@ -97,25 +96,25 @@ BOOST_AUTO_TEST_CASE( any_two_of_three ) GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception); sign(trx, nathan_key2); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 500); + BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast(old_balance - 500)); - trx.signatures.clear(); + trx.clear_signatures(); sign(trx, nathan_key2); sign(trx, nathan_key3); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 1000); + BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast(old_balance - 1000)); - trx.signatures.clear(); + trx.clear_signatures(); sign(trx, nathan_key1); sign(trx, nathan_key3); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 1500); + BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast(old_balance - 1500)); - trx.signatures.clear(); + trx.clear_signatures(); //sign(trx, fc::ecc::private_key::generate()); sign(trx,nathan_key3); GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception); - BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 1500); + BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast(old_balance - 1500)); } catch (fc::exception& e) { edump((e.to_detail_string())); throw; @@ -156,7 +155,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) BOOST_TEST_MESSAGE( "Attempting to transfer with parent1 signature, should fail" ); sign(trx,parent1_key); GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception); - trx.signatures.clear(); + trx.clear_signatures(); BOOST_TEST_MESSAGE( "Attempting to transfer with parent2 signature, should fail" ); sign(trx,parent2_key); @@ -165,9 +164,8 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) BOOST_TEST_MESSAGE( "Attempting to transfer with parent1 and parent2 signature, should succeed" ); sign(trx,parent1_key); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 500); - trx.operations.clear(); - trx.signatures.clear(); + BOOST_CHECK_EQUAL(get_balance(child, core), static_cast(old_balance - 500)); + trx.clear(); BOOST_TEST_MESSAGE( "Adding a key for the child that can override parents" ); fc::ecc::private_key child_key = fc::ecc::private_key::generate(); @@ -180,9 +178,8 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) sign(trx,parent1_key); sign(trx,parent2_key); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_REQUIRE_EQUAL(child.active.num_auths(), 3); - trx.operations.clear(); - trx.signatures.clear(); + BOOST_REQUIRE_EQUAL(child.active.num_auths(), 3u); + trx.clear(); } op.from = child.id; @@ -195,7 +192,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) BOOST_TEST_MESSAGE( "Attempting transfer just parent1, should fail" ); sign(trx, parent1_key); GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception); - trx.signatures.clear(); + trx.clear_signatures(); BOOST_TEST_MESSAGE( "Attempting transfer just parent2, should fail" ); sign(trx, parent2_key); GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception); @@ -203,15 +200,14 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) BOOST_TEST_MESSAGE( "Attempting transfer both parents, should succeed" ); sign(trx, parent1_key); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 1000); - trx.signatures.clear(); + BOOST_CHECK_EQUAL(get_balance(child, core), static_cast(old_balance - 1000)); + trx.clear_signatures(); BOOST_TEST_MESSAGE( "Attempting transfer with just child key, should succeed" ); sign(trx, child_key); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 1500); - trx.operations.clear(); - trx.signatures.clear(); + BOOST_CHECK_EQUAL(get_balance(child, core), static_cast(old_balance - 1500)); + trx.clear(); BOOST_TEST_MESSAGE( "Creating grandparent account, parent1 now requires authority of grandparent" ); auto grandparent = create_account("grandparent"); @@ -227,8 +223,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) op.owner = *op.active; trx.operations.push_back(op); PUSH_TX( db, trx, ~0 ); - trx.operations.clear(); - trx.signatures.clear(); + trx.clear(); } BOOST_TEST_MESSAGE( "Attempt to transfer using old parent keys, should fail" ); @@ -236,13 +231,13 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) sign(trx, parent1_key); sign(trx, parent2_key); GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception); - trx.signatures.clear(); + trx.clear_signatures(); sign( trx, parent2_key ); sign( trx, grandparent_key ); BOOST_TEST_MESSAGE( "Attempt to transfer using parent2_key and grandparent_key" ); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 2000); + BOOST_CHECK_EQUAL(get_balance(child, core), static_cast(old_balance - 2000)); trx.clear(); BOOST_TEST_MESSAGE( "Update grandparent account authority to be committee account" ); @@ -253,8 +248,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) op.owner = *op.active; trx.operations.push_back(op); PUSH_TX( db, trx, ~0 ); - trx.operations.clear(); - trx.signatures.clear(); + trx.clear(); } BOOST_TEST_MESSAGE( "Create recursion depth failure" ); @@ -265,12 +259,11 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) //Fails due to recursion depth. GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception); BOOST_TEST_MESSAGE( "verify child key can override recursion checks" ); - trx.signatures.clear(); + trx.clear_signatures(); sign(trx, child_key); PUSH_TX( db, trx, database::skip_transaction_dupe_check ); - BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 2500); - trx.operations.clear(); - trx.signatures.clear(); + BOOST_CHECK_EQUAL(get_balance(child, core), static_cast(old_balance - 2500)); + trx.clear(); BOOST_TEST_MESSAGE( "Verify a cycle fails" ); { @@ -280,8 +273,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts ) op.owner = *op.active; trx.operations.push_back(op); PUSH_TX( db, trx, ~0 ); - trx.operations.clear(); - trx.signatures.clear(); + trx.clear(); } trx.operations.push_back(op); @@ -329,17 +321,17 @@ BOOST_AUTO_TEST_CASE( proposed_single_account ) vector other; flat_set active_set, owner_set; operation_get_required_authorities(op,active_set,owner_set,other); - BOOST_CHECK_EQUAL(active_set.size(), 1); - BOOST_CHECK_EQUAL(owner_set.size(), 0); - BOOST_CHECK_EQUAL(other.size(), 0); + BOOST_CHECK_EQUAL(active_set.size(), 1lu); + BOOST_CHECK_EQUAL(owner_set.size(), 0lu); + BOOST_CHECK_EQUAL(other.size(), 0lu); BOOST_CHECK(*active_set.begin() == moneyman.get_id()); active_set.clear(); other.clear(); operation_get_required_authorities(op.proposed_ops.front().op,active_set,owner_set,other); - BOOST_CHECK_EQUAL(active_set.size(), 1); - BOOST_CHECK_EQUAL(owner_set.size(), 0); - BOOST_CHECK_EQUAL(other.size(), 0); + BOOST_CHECK_EQUAL(active_set.size(), 1lu); + BOOST_CHECK_EQUAL(owner_set.size(), 0lu); + BOOST_CHECK_EQUAL(other.size(), 0lu); BOOST_CHECK(*active_set.begin() == nathan.id); } @@ -349,10 +341,10 @@ BOOST_AUTO_TEST_CASE( proposed_single_account ) sign( trx, init_account_priv_key ); const proposal_object& proposal = db.get(PUSH_TX( db, trx ).operation_results.front().get()); - BOOST_CHECK_EQUAL(proposal.required_active_approvals.size(), 1); - BOOST_CHECK_EQUAL(proposal.available_active_approvals.size(), 0); - BOOST_CHECK_EQUAL(proposal.required_owner_approvals.size(), 0); - BOOST_CHECK_EQUAL(proposal.available_owner_approvals.size(), 0); + BOOST_CHECK_EQUAL(proposal.required_active_approvals.size(), 1lu); + BOOST_CHECK_EQUAL(proposal.available_active_approvals.size(), 0lu); + BOOST_CHECK_EQUAL(proposal.required_owner_approvals.size(), 0lu); + BOOST_CHECK_EQUAL(proposal.available_owner_approvals.size(), 0lu); BOOST_CHECK(*proposal.required_active_approvals.begin() == nathan.id); proposal_update_operation pup; @@ -372,7 +364,7 @@ BOOST_AUTO_TEST_CASE( proposed_single_account ) //committee has no stake in the transaction. GRAPHENE_CHECK_THROW(PUSH_TX( db, trx ), fc::exception); - trx.signatures.clear(); + trx.clear_signatures(); pup.active_approvals_to_add.clear(); pup.active_approvals_to_add.insert(nathan.id); @@ -389,6 +381,49 @@ BOOST_AUTO_TEST_CASE( proposed_single_account ) } } +BOOST_AUTO_TEST_CASE( proposal_failure ) +{ + try + { + ACTORS( (bob) (alice) ); + + fund( bob, asset(1000000) ); + fund( alice, asset(1000000) ); + + // create proposal that will eventually fail due to lack of funds + transfer_operation top; + top.to = alice_id; + top.from = bob_id; + top.amount = asset(2000000); + proposal_create_operation pop; + pop.proposed_ops.push_back( { top } ); + pop.expiration_time = db.head_block_time() + fc::days(1); + pop.fee_paying_account = bob_id; + trx.operations.push_back( pop ); + trx.clear_signatures(); + sign( trx, bob_private_key ); + processed_transaction processed = PUSH_TX( db, trx ); + proposal_object prop = db.get(processed.operation_results.front().get()); + trx.clear(); + generate_block(); + // add signature + proposal_update_operation up_op; + up_op.proposal = prop.id; + up_op.fee_paying_account = bob_id; + up_op.active_approvals_to_add.emplace( bob_id ); + trx.operations.push_back( up_op ); + sign( trx, bob_private_key ); + PUSH_TX( db, trx ); + trx.clear(); + + // check fail reason + const proposal_object& result = db.get(prop.id); + BOOST_CHECK(!result.fail_reason.empty()); + BOOST_CHECK_EQUAL( result.fail_reason.substr(0, 16), "Assert Exception"); + } + FC_LOG_AND_RETHROW() +} + /// Verify that committee authority cannot be invoked in a normal transaction BOOST_AUTO_TEST_CASE( committee_authority ) { try { @@ -413,7 +448,7 @@ BOOST_AUTO_TEST_CASE( committee_authority ) sign(trx, committee_key); GRAPHENE_CHECK_THROW(PUSH_TX( db, trx ), graphene::chain::invalid_committee_approval ); - auto _sign = [&] { trx.signatures.clear(); sign( trx, nathan_key ); }; + auto _sign = [&] { trx.clear_signatures(); sign( trx, nathan_key ); }; proposal_create_operation pop; pop.proposed_ops.push_back({trx.operations.front()}); @@ -447,8 +482,7 @@ BOOST_AUTO_TEST_CASE( committee_authority ) BOOST_TEST_MESSAGE( "Checking that the proposal is not authorized to execute" ); BOOST_REQUIRE(!db.get(prop.id).is_authorized_to_execute(db)); - trx.operations.clear(); - trx.signatures.clear(); + trx.clear(); proposal_update_operation uop; uop.fee_paying_account = GRAPHENE_TEMP_ACCOUNT; uop.proposal = prop.id; @@ -464,11 +498,11 @@ BOOST_AUTO_TEST_CASE( committee_authority ) */ trx.operations.push_back(uop); sign( trx, committee_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_CHECK_EQUAL(get_balance(nathan, asset_id_type()(db)), 0); BOOST_CHECK(db.get(prop.id).is_authorized_to_execute(db)); - trx.signatures.clear(); + trx.clear_signatures(); generate_blocks(*prop.review_period_time); uop.key_approvals_to_add.clear(); uop.key_approvals_to_add.insert(committee_key.get_public_key()); // was 7 @@ -479,6 +513,8 @@ BOOST_AUTO_TEST_CASE( committee_authority ) generate_blocks(prop.expiration_time); BOOST_CHECK_EQUAL(get_balance(nathan, asset_id_type()(db)), 100000); + // proposal deleted + BOOST_CHECK_THROW( db.get(prop.id), fc::exception ); } FC_LOG_AND_RETHROW() } BOOST_FIXTURE_TEST_CASE( fired_committee_members, database_fixture ) @@ -693,7 +729,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_delete, database_fixture ) PUSH_TX( db, trx ); trx.clear(); BOOST_CHECK(!prop.is_authorized_to_execute(db)); - BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 1); + BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 1lu); std::swap(uop.active_approvals_to_add, uop.active_approvals_to_remove); trx.operations.push_back(uop); @@ -701,7 +737,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_delete, database_fixture ) PUSH_TX( db, trx ); trx.clear(); BOOST_CHECK(!prop.is_authorized_to_execute(db)); - BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 0); + BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 0lu); } { @@ -755,8 +791,8 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture ) } const proposal_object& prop = *db.get_index_type().indices().begin(); - BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1); - BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1); + BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1lu); + BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1lu); BOOST_CHECK(!prop.is_authorized_to_execute(db)); { @@ -769,7 +805,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture ) PUSH_TX( db, trx ); trx.clear(); BOOST_CHECK(!prop.is_authorized_to_execute(db)); - BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 1); + BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 1lu); std::swap(uop.owner_approvals_to_add, uop.owner_approvals_to_remove); trx.operations.push_back(uop); @@ -777,7 +813,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture ) PUSH_TX( db, trx ); trx.clear(); BOOST_CHECK(!prop.is_authorized_to_execute(db)); - BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 0); + BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 0lu); } { @@ -832,8 +868,8 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture ) } const proposal_object& prop = *db.get_index_type().indices().begin(); - BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1); - BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1); + BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1lu); + BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1lu); BOOST_CHECK(!prop.is_authorized_to_execute(db)); { @@ -849,7 +885,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture ) PUSH_TX( db, trx ); trx.clear(); BOOST_CHECK(!prop.is_authorized_to_execute(db)); - BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1); + BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1lu); std::swap(uop.key_approvals_to_add, uop.key_approvals_to_remove); trx.operations.push_back(uop); @@ -859,7 +895,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture ) PUSH_TX( db, trx ); trx.clear(); BOOST_CHECK(!prop.is_authorized_to_execute(db)); - BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 0); + BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 0lu); std::swap(uop.key_approvals_to_add, uop.key_approvals_to_remove); trx.operations.push_back(uop); @@ -869,7 +905,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture ) PUSH_TX( db, trx ); trx.clear(); BOOST_CHECK(!prop.is_authorized_to_execute(db)); - BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1); + BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1lu); uop.key_approvals_to_add.clear(); uop.owner_approvals_to_add.insert(nathan.get_id()); @@ -894,7 +930,6 @@ BOOST_FIXTURE_TEST_CASE( max_authority_membership, database_fixture ) }); transaction tx; - processed_transaction ptx; private_key_type committee_key = init_account_priv_key; // Sam is the creator of accounts @@ -1027,11 +1062,11 @@ BOOST_FIXTURE_TEST_CASE( bogus_signature, database_fixture ) GRAPHENE_REQUIRE_THROW( PUSH_TX( db, trx, skip ), fc::exception ); // Re-sign, now OK (sig is replaced) BOOST_TEST_MESSAGE( "Resign with Alice's Signature" ); - trx.signatures.clear(); + trx.clear_signatures(); sign( trx, alice_key ); PUSH_TX( db, trx, skip ); - trx.signatures.clear(); + trx.clear_signatures(); trx.operations.pop_back(); sign( trx, alice_key ); sign( trx, charlie_key ); @@ -1080,7 +1115,7 @@ BOOST_FIXTURE_TEST_CASE( voting_account, database_fixture ) GRAPHENE_CHECK_THROW(PUSH_TX( db, trx ), fc::exception); op.new_options->num_committee = 3; trx.operations = {op}; - trx.signatures.clear(); + trx.clear_signatures(); sign( trx, vikram_private_key ); PUSH_TX( db, trx ); trx.clear(); @@ -1133,7 +1168,7 @@ BOOST_FIXTURE_TEST_CASE( get_required_signatures_test, database_fixture ) op.owner = auth; tx.operations.push_back( op ); set_expiration( db, tx ); - PUSH_TX( db, tx, database::skip_transaction_signatures | database::skip_authority_check ); + PUSH_TX( db, tx, database::skip_transaction_signatures ); } ; auto get_active = [&]( @@ -1247,7 +1282,7 @@ BOOST_FIXTURE_TEST_CASE( nonminimal_sig_test, database_fixture ) op.owner = auth; tx.operations.push_back( op ); set_expiration( db, tx ); - PUSH_TX( db, tx, database::skip_transaction_signatures | database::skip_authority_check ); + PUSH_TX( db, tx, database::skip_transaction_signatures ); } ; auto get_active = [&]( @@ -1313,4 +1348,473 @@ BOOST_FIXTURE_TEST_CASE( nonminimal_sig_test, database_fixture ) } } +BOOST_FIXTURE_TEST_CASE( parent_owner_test, database_fixture ) +{ + try + { + ACTORS( + (alice)(bob) + ); + + auto set_auth2 = [&]( + account_id_type aid, + const authority& active, + const authority& owner + ) + { + signed_transaction tx; + account_update_operation op; + op.account = aid; + op.active = active; + op.owner = owner; + tx.operations.push_back( op ); + set_expiration( db, tx ); + PUSH_TX( db, tx, database::skip_transaction_signatures ); + } ; + + auto set_auth = [&]( + account_id_type aid, + const authority& auth + ) + { + set_auth2( aid, auth, auth ); + } ; + + auto get_active = [&]( + account_id_type aid + ) -> const authority* + { + return &(aid(db).active); + } ; + + auto get_owner = [&]( + account_id_type aid + ) -> const authority* + { + return &(aid(db).owner); + } ; + + auto chk = [&]( + const signed_transaction& tx, + flat_set available_keys, + set ref_set + ) -> bool + { + //wdump( (tx)(available_keys) ); + set result_set = tx.get_required_signatures( db.get_chain_id(), available_keys, get_active, get_owner ); + //wdump( (result_set)(ref_set) ); + return result_set == ref_set; + } ; + + fc::ecc::private_key alice_active_key = fc::ecc::private_key::regenerate(fc::digest("alice_active")); + fc::ecc::private_key alice_owner_key = fc::ecc::private_key::regenerate(fc::digest("alice_owner")); + public_key_type alice_active_pub( alice_active_key.get_public_key() ); + public_key_type alice_owner_pub( alice_owner_key.get_public_key() ); + set_auth2( alice_id, authority( 1, alice_active_pub, 1 ), authority( 1, alice_owner_pub, 1 ) ); + set_auth( bob_id, authority( 1, alice_id, 1 ) ); + + signed_transaction tx; + transfer_operation op; + op.from = bob_id; + op.to = alice_id; + op.amount = asset(1); + tx.operations.push_back( op ); + + // https://github.com/bitshares/bitshares-core/issues/584 + BOOST_CHECK( chk( tx, { alice_owner_pub }, { } ) ); + BOOST_CHECK( chk( tx, { alice_active_pub, alice_owner_pub }, { alice_active_pub } ) ); + sign( tx, alice_owner_key ); + GRAPHENE_REQUIRE_THROW( tx.verify_authority( db.get_chain_id(), get_active, get_owner ), fc::exception ); + + tx.clear_signatures(); + sign( tx, alice_active_key ); + tx.verify_authority( db.get_chain_id(), get_active, get_owner ); + + } + catch(fc::exception& e) + { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_FIXTURE_TEST_CASE( owner_delegation_test, database_fixture ) +{ try { + ACTORS( (alice)(bob) ); + + fc::ecc::private_key bob_active_key = fc::ecc::private_key::regenerate(fc::digest("bob_active")); + fc::ecc::private_key bob_owner_key = fc::ecc::private_key::regenerate(fc::digest("bob_owner")); + + trx.clear(); + + // Make sure Bob has different keys + account_update_operation auo; + auo.account = bob_id; + auo.active = authority( 1, public_key_type(bob_active_key.get_public_key()), 1 ); + auo.owner = authority( 1, public_key_type(bob_owner_key.get_public_key()), 1 ); + trx.operations.push_back( auo ); + sign( trx, bob_private_key ); + PUSH_TX( db, trx ); + trx.clear(); + + // Delegate Alice's owner auth to herself and active auth to Bob + auo.account = alice_id; + auo.active = authority( 1, bob_id, 1 ); + auo.owner = authority( 1, alice_id, 1 ); + trx.operations.push_back( auo ); + sign( trx, alice_private_key ); + PUSH_TX( db, trx ); + trx.clear(); + + // Now Bob has full control over Alice's account + auo.account = alice_id; + auo.active.reset(); + auo.owner = authority( 1, bob_id, 1 ); + trx.operations.push_back( auo ); + sign( trx, bob_active_key ); + PUSH_TX( db, trx ); + trx.clear(); +} FC_LOG_AND_RETHROW() } + +/// This test case reproduces https://github.com/bitshares/bitshares-core/issues/944 +/// and https://github.com/bitshares/bitshares-core/issues/580 +BOOST_FIXTURE_TEST_CASE( missing_owner_auth_test, database_fixture ) +{ + try + { + ACTORS( + (alice) + ); + + auto set_auth = [&]( + account_id_type aid, + const authority& active, + const authority& owner + ) + { + signed_transaction tx; + account_update_operation op; + op.account = aid; + op.active = active; + op.owner = owner; + tx.operations.push_back( op ); + set_expiration( db, tx ); + PUSH_TX( db, tx, database::skip_transaction_signatures ); + } ; + + auto get_active = [&]( + account_id_type aid + ) -> const authority* + { + return &(aid(db).active); + } ; + + auto get_owner = [&]( + account_id_type aid + ) -> const authority* + { + return &(aid(db).owner); + } ; + + fc::ecc::private_key alice_active_key = fc::ecc::private_key::regenerate(fc::digest("alice_active")); + fc::ecc::private_key alice_owner_key = fc::ecc::private_key::regenerate(fc::digest("alice_owner")); + public_key_type alice_active_pub( alice_active_key.get_public_key() ); + public_key_type alice_owner_pub( alice_owner_key.get_public_key() ); + set_auth( alice_id, authority( 1, alice_active_pub, 1 ), authority( 1, alice_owner_pub, 1 ) ); + + // creating a transaction that needs owner permission + signed_transaction tx; + account_update_operation op; + op.account = alice_id; + op.owner = authority( 1, alice_active_pub, 1 ); + tx.operations.push_back( op ); + + // not signed, should throw tx_missing_owner_auth + GRAPHENE_REQUIRE_THROW( tx.verify_authority( db.get_chain_id(), get_active, get_owner ), + graphene::chain::tx_missing_owner_auth ); + + // signed with alice's active key, should throw tx_missing_owner_auth + sign( tx, alice_active_key ); + GRAPHENE_REQUIRE_THROW( tx.verify_authority( db.get_chain_id(), get_active, get_owner ), + graphene::chain::tx_missing_owner_auth ); + + // signed with alice's owner key, should not throw + tx.clear_signatures(); + sign( tx, alice_owner_key ); + tx.verify_authority( db.get_chain_id(), get_active, get_owner ); + + // signed with both alice's owner key and active key, + // it does not throw due to https://github.com/bitshares/bitshares-core/issues/580 + sign( tx, alice_active_key ); + tx.verify_authority( db.get_chain_id(), get_active, get_owner ); + + // creating a transaction that needs active permission + tx.clear(); + op.owner.reset(); + op.active = authority( 1, alice_owner_pub, 1 ); + tx.operations.push_back( op ); + + // not signed, should throw tx_missing_active_auth + GRAPHENE_REQUIRE_THROW( tx.verify_authority( db.get_chain_id(), get_active, get_owner ), + graphene::chain::tx_missing_active_auth ); + + // signed with alice's active key, should not throw + sign( tx, alice_active_key ); + tx.verify_authority( db.get_chain_id(), get_active, get_owner ); + + // signed with alice's owner key, should not throw + tx.clear_signatures(); + sign( tx, alice_owner_key ); + tx.verify_authority( db.get_chain_id(), get_active, get_owner ); + + // signed with both alice's owner key and active key, should throw tx_irrelevant_sig + sign( tx, alice_active_key ); + GRAPHENE_REQUIRE_THROW( tx.verify_authority( db.get_chain_id(), get_active, get_owner ), + graphene::chain::tx_irrelevant_sig ); + + } + catch(fc::exception& e) + { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( nested_execution ) +{ try { + ACTORS( (alice)(bob) ); + fund( alice ); + + generate_blocks( HARDFORK_CORE_214_TIME + fc::hours(1) ); + set_expiration( db, trx ); + + const auto& gpo = db.get_global_properties(); + + proposal_create_operation pco; + pco.expiration_time = db.head_block_time() + fc::minutes(1); + pco.fee_paying_account = alice_id; + proposal_id_type inner; + { + transfer_operation top; + top.from = alice_id; + top.to = bob_id; + top.amount = asset( 10 ); + pco.proposed_ops.emplace_back( top ); + trx.operations.push_back( pco ); + inner = PUSH_TX( db, trx, ~0 ).operation_results.front().get(); + trx.clear(); + pco.proposed_ops.clear(); + } + + std::vector nested; + nested.push_back( inner ); + for( size_t i = 0; i < gpo.active_witnesses.size() * 2; i++ ) + { + proposal_update_operation pup; + pup.fee_paying_account = alice_id; + pup.proposal = nested.back(); + pup.active_approvals_to_add.insert( alice_id ); + pco.proposed_ops.emplace_back( pup ); + trx.operations.push_back( pco ); + nested.push_back( PUSH_TX( db, trx, ~0 ).operation_results.front().get() ); + trx.clear(); + pco.proposed_ops.clear(); + } + + proposal_update_operation pup; + pup.fee_paying_account = alice_id; + pup.proposal = nested.back(); + pup.active_approvals_to_add.insert( alice_id ); + trx.operations.push_back( pup ); + PUSH_TX( db, trx, ~0 ); + + for( size_t i = 1; i < nested.size(); i++ ) + BOOST_CHECK_THROW( db.get( nested[i] ), fc::assert_exception ); // executed successfully -> object removed + db.get( inner ); // wasn't executed -> object exists, doesn't throw +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( issue_214 ) +{ try { + ACTORS( (alice)(bob) ); + fund( alice ); + + generate_blocks( HARDFORK_CORE_214_TIME - fc::hours(1) ); + set_expiration( db, trx ); + + // Bob proposes that Alice transfer 500 CORE to himself + transfer_operation top; + top.from = alice_id; + top.to = bob_id; + top.amount = asset( 500 ); + proposal_create_operation pop; + pop.proposed_ops.emplace_back(top); + pop.fee_paying_account = bob_id; + pop.expiration_time = db.head_block_time() + fc::days(1); + trx.operations.push_back(pop); + sign( trx, bob_private_key ); + const proposal_id_type pid1 = PUSH_TX( db, trx ).operation_results[0].get(); + trx.clear(); + + // Bob wants to propose that Alice confirm the first proposal + proposal_update_operation pup; + pup.fee_paying_account = alice_id; + pup.proposal = pid1; + pup.active_approvals_to_add.insert( alice_id ); + pop.proposed_ops.clear(); + pop.proposed_ops.emplace_back( pup ); + trx.operations.push_back(pop); + sign( trx, bob_private_key ); + // before HF_CORE_214, Bob can't do that + BOOST_REQUIRE_THROW( PUSH_TX( db, trx ), fc::assert_exception ); + trx.clear_signatures(); + + { // Bob can create a proposal nesting the one containing the proposal_update + proposal_create_operation npop; + npop.proposed_ops.emplace_back(pop); + npop.fee_paying_account = bob_id; + npop.expiration_time = db.head_block_time() + fc::days(2); + signed_transaction ntx; + set_expiration( db, ntx ); + ntx.operations.push_back(npop); + sign( ntx, bob_private_key ); + const proposal_id_type pid1a = PUSH_TX( db, ntx ).operation_results[0].get(); + ntx.clear(); + + // But execution after confirming it fails + proposal_update_operation npup; + npup.fee_paying_account = bob_id; + npup.proposal = pid1a; + npup.active_approvals_to_add.insert( bob_id ); + ntx.operations.push_back(npup); + sign( ntx, bob_private_key ); + PUSH_TX( db, ntx ); + ntx.clear(); + + db.get( pid1a ); // still exists + } + + generate_blocks( HARDFORK_CORE_214_TIME + fc::hours(1) ); + set_expiration( db, trx ); + sign( trx, bob_private_key ); + // after the HF the previously failed tx works too + const proposal_id_type pid2 = PUSH_TX( db, trx ).operation_results[0].get(); + trx.clear(); + + // For completeness, Alice confirms Bob's second proposal + pup.proposal = pid2; + trx.operations.push_back(pup); + sign( trx, alice_private_key ); + PUSH_TX( db, trx ); + trx.clear(); + + // Execution of the second proposal should have confirmed the first, + // which should have been executed by now. + BOOST_CHECK_THROW( db.get(pid1), fc::assert_exception ); + BOOST_CHECK_THROW( db.get(pid2), fc::assert_exception ); + BOOST_CHECK_EQUAL( top.amount.amount.value, get_balance( bob_id, top.amount.asset_id ) ); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( irrelevant_signatures ) +{ try { + ACTORS( (alice)(bob) ); + fund( alice ); + + // PK: BTS4vsFgTXJcGQMKCFayF2hrNRfYcKjNZ6Mzk8aw9M4zuWfscPhzE, A: BTSGfxPKKLj6tdTUB7i3mHsd2m7QvPLPy2YA + const fc::ecc::private_key test2 = fc::ecc::private_key::regenerate( fc::sha256::hash( std::string( "test-2" ) ) ); + const public_key_type test2_pub( test2.get_public_key() ); + + // PK: BTS7FXC7S9UH7HEH8QiuJ8Xv1NRJJZd1GomALLm9ffjtH95Tb2ZQB, A: BTSBajRqmdrXqmDpZhJ8sgkGagdeXneHFVeM + const fc::ecc::private_key test3 = fc::ecc::private_key::regenerate( fc::sha256::hash( std::string( "test-3" ) ) ); + const public_key_type test3_pub( test3.get_public_key() ); + + BOOST_REQUIRE( test2_pub.key_data < test3_pub.key_data ); + BOOST_REQUIRE( address( test3_pub ) < address( test2_pub ) ); + + account_update_operation auo; + auo.account = alice_id; + auo.active = authority( 2, test2_pub, 2, test3_pub, 1 ); + + trx.clear(); + set_expiration( db, trx ); + trx.operations.push_back( auo ); + sign( trx, alice_private_key ); + PUSH_TX( db, trx ); + trx.clear(); + + transfer_operation to; + to.amount = asset( 1 ); + to.from = alice_id; + to.to = bob_id; + trx.operations.push_back( to ); + sign( trx, test2 ); + sign( trx, test3 ); + PUSH_TX( db, trx ); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( self_approving_proposal ) +{ try { + ACTORS( (alice) ); + fund( alice ); + + generate_blocks( HARDFORK_CORE_1479_TIME ); + trx.clear(); + set_expiration( db, trx ); + + proposal_update_operation pup; + pup.fee_paying_account = alice_id; + pup.proposal = proposal_id_type(0); + pup.active_approvals_to_add.insert( alice_id ); + + proposal_create_operation pop; + pop.proposed_ops.emplace_back(pup); + pop.fee_paying_account = alice_id; + pop.expiration_time = db.head_block_time() + fc::days(1); + trx.operations.push_back(pop); + const proposal_id_type pid1 = PUSH_TX( db, trx, ~0 ).operation_results[0].get(); + trx.clear(); + BOOST_REQUIRE_EQUAL( 0u, pid1.instance.value ); + db.get(pid1); + + trx.operations.push_back(pup); + PUSH_TX( db, trx, ~0 ); + + // Proposal failed and still exists + db.get(pid1); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( self_deleting_proposal ) +{ try { + ACTORS( (alice) ); + fund( alice ); + + generate_blocks( HARDFORK_CORE_1479_TIME ); + trx.clear(); + set_expiration( db, trx ); + + proposal_delete_operation pdo; + pdo.fee_paying_account = alice_id; + pdo.proposal = proposal_id_type(0); + pdo.using_owner_authority = false; + + proposal_create_operation pop; + pop.proposed_ops.emplace_back( pdo ); + pop.fee_paying_account = alice_id; + pop.expiration_time = db.head_block_time() + fc::days(1); + trx.operations.push_back( pop ); + const proposal_id_type pid1 = PUSH_TX( db, trx, ~0 ).operation_results[0].get(); + trx.clear(); + BOOST_REQUIRE_EQUAL( 0u, pid1.instance.value ); + db.get(pid1); + + proposal_update_operation pup; + pup.fee_paying_account = alice_id; + pup.proposal = proposal_id_type(0); + pup.active_approvals_to_add.insert( alice_id ); + trx.operations.push_back(pup); + PUSH_TX( db, trx, ~0 ); + + // Proposal failed and still exists + db.get(pid1); +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/basic_tests.cpp b/tests/tests/basic_tests.cpp index 2390a7c65f..97d03ed41b 100644 --- a/tests/tests/basic_tests.cpp +++ b/tests/tests/basic_tests.cpp @@ -25,7 +25,6 @@ #include #include -#include #include #include @@ -51,22 +50,22 @@ BOOST_FIXTURE_TEST_SUITE( basic_tests, database_fixture ) */ BOOST_AUTO_TEST_CASE( valid_name_test ) { - BOOST_CHECK( !is_valid_name( "a" ) ); + BOOST_CHECK( is_valid_name( "a" ) ); BOOST_CHECK( !is_valid_name( "A" ) ); BOOST_CHECK( !is_valid_name( "0" ) ); BOOST_CHECK( !is_valid_name( "." ) ); BOOST_CHECK( !is_valid_name( "-" ) ); - BOOST_CHECK( !is_valid_name( "aa" ) ); + BOOST_CHECK( is_valid_name( "aa" ) ); BOOST_CHECK( !is_valid_name( "aA" ) ); - BOOST_CHECK( !is_valid_name( "a0" ) ); + BOOST_CHECK( is_valid_name( "a0" ) ); BOOST_CHECK( !is_valid_name( "a." ) ); BOOST_CHECK( !is_valid_name( "a-" ) ); BOOST_CHECK( is_valid_name( "aaa" ) ); BOOST_CHECK( !is_valid_name( "aAa" ) ); BOOST_CHECK( is_valid_name( "a0a" ) ); - BOOST_CHECK( !is_valid_name( "a.a" ) ); + BOOST_CHECK( is_valid_name( "a.a" ) ); BOOST_CHECK( is_valid_name( "a-a" ) ); BOOST_CHECK( is_valid_name( "aa0" ) ); @@ -97,9 +96,9 @@ BOOST_AUTO_TEST_CASE( valid_name_test ) BOOST_CHECK( is_valid_name( "aaa.bbb.ccc" ) ); BOOST_CHECK( is_valid_name( "aaa--bbb--ccc" ) ); - BOOST_CHECK( !is_valid_name( "xn--sandmnnchen-p8a.de" ) ); + BOOST_CHECK( is_valid_name( "xn--sandmnnchen-p8a.de" ) ); BOOST_CHECK( is_valid_name( "xn--sandmnnchen-p8a.dex" ) ); - BOOST_CHECK( !is_valid_name( "xn-sandmnnchen-p8a.de" ) ); + BOOST_CHECK( is_valid_name( "xn-sandmnnchen-p8a.de" ) ); BOOST_CHECK( is_valid_name( "xn-sandmnnchen-p8a.dex" ) ); BOOST_CHECK( is_valid_name( "this-label-has-less-than-64-char.acters-63-to-be-really-precise" ) ); @@ -170,6 +169,123 @@ BOOST_AUTO_TEST_CASE( price_test ) BOOST_CHECK(a == c); BOOST_CHECK(!(b == c)); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1)) * ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(0), asset(1, asset_id_type(1))) * ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(-1), asset(1, asset_id_type(1))) * ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(0, asset_id_type(1))) * ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(-1, asset_id_type(1))) * ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) * ratio_type(0,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) * ratio_type(-1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) * ratio_type(1,0), std::domain_error ); // zero denominator + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) * ratio_type(1,-1), fc::exception ); + + GRAPHENE_REQUIRE_THROW( price(asset(0), asset(1, asset_id_type(1))) / ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(-1), asset(1, asset_id_type(1))) / ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(0, asset_id_type(1))) / ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(-1, asset_id_type(1))) / ratio_type(1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) / ratio_type(0,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) / ratio_type(-1,1), fc::exception ); + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) / ratio_type(1,0), std::domain_error ); // zero denominator + GRAPHENE_REQUIRE_THROW( price(asset(1), asset(1, asset_id_type(1))) / ratio_type(1,-1), fc::exception ); + + BOOST_CHECK( price(asset(1), asset(1, asset_id_type(1))) * ratio_type(1,1) == price(asset(1), asset(1, asset_id_type(1))) ); + BOOST_CHECK( price(asset(3), asset(2, asset_id_type(1))) * ratio_type(80,100) == price(asset(12), asset(10, asset_id_type(1))) ); + BOOST_CHECK( price(asset(3), asset(2, asset_id_type(1))) * ratio_type(120,100) == price(asset(9), asset(5, asset_id_type(1))) ); + + BOOST_CHECK( price(asset(1), asset(1, asset_id_type(1))) / ratio_type(1,1) == price(asset(1), asset(1, asset_id_type(1))) ); + BOOST_CHECK( price(asset(3), asset(2, asset_id_type(1))) / ratio_type(80,100) == price(asset(15), asset(8, asset_id_type(1))) ); + BOOST_CHECK( price(asset(3), asset(2, asset_id_type(1))) / ratio_type(120,100) == price(asset(30), asset(24, asset_id_type(1))) ); + + BOOST_CHECK( price_max(0,1) * ratio_type(2,1) == price_max(0,1) ); + BOOST_CHECK( price_max(0,1) * ratio_type(125317293,125317292) == price_max(0,1) ); + BOOST_CHECK( price_max(0,1) * ratio_type(125317293,105317292) == price_max(0,1) ); + BOOST_CHECK( price_max(0,1) * ratio_type(125317293,25317292) == price_max(0,1) ); + BOOST_CHECK( price_min(0,1) * ratio_type(1,2) == price_min(0,1) ); + BOOST_CHECK( price_min(0,1) * ratio_type(98752395,98752396) == price_min(0,1) ); + BOOST_CHECK( price_min(0,1) * ratio_type(70000000,99999999) == price_min(0,1) ); + BOOST_CHECK( price_min(0,1) * ratio_type(30000000,99999999) == price_min(0,1) ); + + price more_than_max = price_max(0,1); + more_than_max.base.amount *= 5; + more_than_max.quote.amount *= 3; + BOOST_CHECK( more_than_max * ratio_type(125317293,125317292) == more_than_max ); + BOOST_CHECK( more_than_max * ratio_type(125317293,125317293) == more_than_max ); + BOOST_CHECK( more_than_max * ratio_type(125317293,125317294) == price_max(0,1) ); + + price less_than_min = price_min(0,1); + less_than_min.base.amount *= 19; + less_than_min.quote.amount *= 47; + BOOST_CHECK( less_than_min * ratio_type(125317293,125317292) == price_min(0,1) ); + BOOST_CHECK( less_than_min * ratio_type(125317293,125317293) == less_than_min ); + BOOST_CHECK( less_than_min * ratio_type(125317293,125317294) == less_than_min ); + + price less_than_max = price_max(0,1); + less_than_max.quote.amount = 11; + BOOST_CHECK( less_than_max * ratio_type(7,1) == price(asset(less_than_max.base.amount*7/11),asset(1,asset_id_type(1))) ); + less_than_max.quote.amount = 92131419; + BOOST_CHECK( less_than_max * ratio_type(7,1) == price(asset(less_than_max.base.amount*7/92131419),asset(1,asset_id_type(1))) ); + less_than_max.quote.amount = 192131419; + BOOST_CHECK( less_than_max * ratio_type(7,1) == price(asset(less_than_max.base.amount.value*7>>3),asset(192131419>>3,asset_id_type(1))) ); + + price more_than_min = price_min(0,1); + more_than_min.base.amount = 11; + BOOST_CHECK( more_than_min * ratio_type(1,7) == price(asset(1),asset(more_than_min.quote.amount*7/11,asset_id_type(1))) ); + more_than_min.base.amount = 64823; + BOOST_CHECK( more_than_min * ratio_type(31672,102472047) == price(asset(1),asset((fc::uint128(more_than_min.quote.amount.value)*102472047/(64823*31672)).to_uint64(),asset_id_type(1))) ); + more_than_min.base.amount = 13; + BOOST_CHECK( more_than_min * ratio_type(202472059,3) == price(asset((int64_t(13)*202472059)>>1),asset((more_than_min.quote.amount.value*3)>>1,asset_id_type(1))) ); // after >>1, quote = max*1.5, but gcd = 3, so quote/=3 = max/2, less than max + + price less_than_max2 = price_max(0,1); + less_than_max2.base.amount *= 2; + less_than_max2.quote.amount *= 7; + BOOST_CHECK( less_than_max2 * ratio_type(1,1) == less_than_max2 ); + BOOST_CHECK( less_than_max2 * ratio_type(5,2) == price(asset(less_than_max2.base.amount*5/2/7),asset(1,asset_id_type(1))) ); + + BOOST_CHECK( ( asset(1) * price( asset(1), asset(1, asset_id_type(1)) ) ) == asset(1, asset_id_type(1)) ); + BOOST_CHECK( ( asset(1) * price( asset(1, asset_id_type(1)), asset(1) ) ) == asset(1, asset_id_type(1)) ); + BOOST_CHECK( ( asset(1, asset_id_type(1)) * price( asset(1), asset(1, asset_id_type(1)) ) ) == asset(1) ); + BOOST_CHECK( ( asset(1, asset_id_type(1)) * price( asset(1, asset_id_type(1)), asset(1) ) ) == asset(1) ); + + BOOST_CHECK( ( asset(3) * price( asset(3), asset(5, asset_id_type(1)) ) ) == asset(5, asset_id_type(1)) ); // round_down(3*5/3) + BOOST_CHECK( ( asset(5) * price( asset(2, asset_id_type(1)), asset(7) ) ) == asset(1, asset_id_type(1)) ); // round_down(5*2/7) + BOOST_CHECK( ( asset(7, asset_id_type(1)) * price( asset(2), asset(3, asset_id_type(1)) ) ) == asset(4) ); // round_down(7*2/3) + BOOST_CHECK( ( asset(9, asset_id_type(1)) * price( asset(8, asset_id_type(1)), asset(7) ) ) == asset(7) ); // round_down(9*7/8) + + // asset and price doesn't match + BOOST_CHECK_THROW( asset(1) * price( asset(1, asset_id_type(2)), asset(1, asset_id_type(1)) ), fc::assert_exception ); + // divide by zero + BOOST_CHECK_THROW( asset(1) * price( asset(0), asset(1, asset_id_type(1)) ), fc::assert_exception ); + BOOST_CHECK_THROW( asset(1) * price( asset(1, asset_id_type(1)), asset(0) ), fc::assert_exception ); + // overflow + BOOST_CHECK_THROW( asset(GRAPHENE_MAX_SHARE_SUPPLY/2+1) * price( asset(1), asset(2, asset_id_type(1)) ), fc::assert_exception ); + BOOST_CHECK_THROW( asset(2) * price( asset(GRAPHENE_MAX_SHARE_SUPPLY/2+1, asset_id_type(1)), asset(1) ), fc::assert_exception ); + + BOOST_CHECK( asset(1).multiply_and_round_up( price( asset(1), asset(1, asset_id_type(1)) ) ) == asset(1, asset_id_type(1)) ); + BOOST_CHECK( asset(1).multiply_and_round_up( price( asset(1, asset_id_type(1)), asset(1) ) ) == asset(1, asset_id_type(1)) ); + BOOST_CHECK( asset(1, asset_id_type(1)).multiply_and_round_up( price( asset(1), asset(1, asset_id_type(1)) ) ) == asset(1) ); + BOOST_CHECK( asset(1, asset_id_type(1)).multiply_and_round_up( price( asset(1, asset_id_type(1)), asset(1) ) ) == asset(1) ); + + // round_up(3*5/3) + BOOST_CHECK( asset(3).multiply_and_round_up( price( asset(3), asset(5, asset_id_type(1)) ) ) == asset(5, asset_id_type(1)) ); + // round_up(5*2/7) + BOOST_CHECK( asset(5).multiply_and_round_up( price( asset(2, asset_id_type(1)), asset(7) ) ) == asset(2, asset_id_type(1)) ); + // round_up(7*2/3) + BOOST_CHECK( asset(7, asset_id_type(1)).multiply_and_round_up( price( asset(2), asset(3, asset_id_type(1)) ) ) == asset(5) ); + // round_up(9*7/8) + BOOST_CHECK( asset(9, asset_id_type(1)).multiply_and_round_up( price( asset(8, asset_id_type(1)), asset(7) ) ) == asset(8) ); + + // asset and price doesn't match + BOOST_CHECK_THROW( asset(1, asset_id_type(3)).multiply_and_round_up( price( asset(1, asset_id_type(2)), asset(1) ) ), + fc::assert_exception ); + // divide by zero + BOOST_CHECK_THROW( asset(1).multiply_and_round_up( price( asset(0), asset(1, asset_id_type(1)) ) ), fc::assert_exception ); + BOOST_CHECK_THROW( asset(1).multiply_and_round_up( price( asset(1, asset_id_type(1)), asset(0) ) ), fc::assert_exception ); + // overflow + BOOST_CHECK_THROW( asset(GRAPHENE_MAX_SHARE_SUPPLY/2+1).multiply_and_round_up( price( asset(1), asset(2, asset_id_type(1)) ) ), + fc::assert_exception ); + BOOST_CHECK_THROW( asset(2).multiply_and_round_up( price( asset(GRAPHENE_MAX_SHARE_SUPPLY/2+1, asset_id_type(1)), asset(1) ) ), + fc::assert_exception ); + price_feed dummy; dummy.maintenance_collateral_ratio = 1002; dummy.maximum_short_squeeze_ratio = 1234; @@ -178,6 +294,63 @@ BOOST_AUTO_TEST_CASE( price_test ) BOOST_CHECK(dummy == dummy2); } +BOOST_AUTO_TEST_CASE( price_multiplication_test ) +{ try { + // random test + std::mt19937_64 gen( time(NULL) ); + std::uniform_int_distribution amt_uid(1, GRAPHENE_MAX_SHARE_SUPPLY); + std::uniform_int_distribution amt_uid2(1, 1000*1000*1000); + std::uniform_int_distribution amt_uid3(1, 1000*1000); + std::uniform_int_distribution amt_uid4(1, 1000); + asset a; + price p; + for( int i = 1*1000*1000; i > 0; --i ) + { + if( i <= 30 ) + a = asset( 0 ); + else if( i % 4 == 0 ) + a = asset( amt_uid(gen) ); + else if( i % 4 == 1 ) + a = asset( amt_uid2(gen) ); + else if( i % 4 == 2 ) + a = asset( amt_uid3(gen) ); + else // if( i % 4 == 3 ) + a = asset( amt_uid4(gen) ); + + if( i % 7 == 0 ) + p = price( asset(amt_uid(gen)), asset(amt_uid(gen), asset_id_type(1)) ); + else if( i % 7 == 1 ) + p = price( asset(amt_uid2(gen)), asset(amt_uid2(gen), asset_id_type(1)) ); + else if( i % 7 == 2 ) + p = price( asset(amt_uid3(gen)), asset(amt_uid3(gen), asset_id_type(1)) ); + else if( i % 7 == 3 ) + p = price( asset(amt_uid4(gen)), asset(amt_uid4(gen), asset_id_type(1)) ); + else if( i % 7 == 4 ) + p = price( asset(amt_uid(gen)), asset(amt_uid(gen), asset_id_type(1)) ); + else if( i % 7 == 5 ) + p = price( asset(amt_uid4(gen)), asset(amt_uid2(gen), asset_id_type(1)) ); + else // if( i % 7 == 6 ) + p = price( asset(amt_uid2(gen)), asset(amt_uid4(gen), asset_id_type(1)) ); + + try + { + asset b = a * p; + asset a1 = b.multiply_and_round_up( p ); + BOOST_CHECK( a1 <= a ); + BOOST_CHECK( (a1 * p) == b ); + + b = a.multiply_and_round_up( p ); + a1 = b * p; + BOOST_CHECK( a1 >= a ); + BOOST_CHECK( a1.multiply_and_round_up( p ) == b ); + } + catch( fc::assert_exception& e ) + { + BOOST_CHECK( e.to_detail_string().find( "result <= GRAPHENE_MAX_SHARE_SUPPLY" ) != string::npos ); + } + } +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_CASE( memo_test ) { try { memo_data m; @@ -235,7 +408,7 @@ BOOST_AUTO_TEST_CASE( scaled_precision ) BOOST_AUTO_TEST_CASE( merkle_root ) { - signed_block block; + clearable_block block; vector tx; vector t; const uint32_t num_tx = 10; @@ -271,6 +444,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dA = d(t[0], t[1]); block.transactions.push_back( tx[1] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dA) ); /* @@ -285,6 +459,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dI = d(dA, dB); block.transactions.push_back( tx[2] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dI) ); /* @@ -299,6 +474,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dI = d(dA, dB); block.transactions.push_back( tx[3] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dI) ); /* @@ -316,6 +492,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dM = d(dI, dJ); block.transactions.push_back( tx[4] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dM) ); /* @@ -333,6 +510,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dM = d(dI, dJ); block.transactions.push_back( tx[5] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dM) ); /* @@ -350,6 +528,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dM = d(dI, dJ); block.transactions.push_back( tx[6] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dM) ); /* @@ -367,6 +546,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dM = d(dI, dJ); block.transactions.push_back( tx[7] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dM) ); /* @@ -387,6 +567,7 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dO = d(dM, dN); block.transactions.push_back( tx[8] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dO) ); /* @@ -407,7 +588,23 @@ BOOST_AUTO_TEST_CASE( merkle_root ) dO = d(dM, dN); block.transactions.push_back( tx[9] ); + block.clear(); BOOST_CHECK( block.calculate_merkle_root() == c(dO) ); } +/** + * Reproduces https://github.com/bitshares/bitshares-core/issues/888 and tests fix for it. + */ +BOOST_AUTO_TEST_CASE( bitasset_feed_expiration_test ) +{ + time_point_sec now = fc::time_point::now(); + + asset_bitasset_data_object o; + + o.current_feed_publication_time = now - fc::hours(1); + o.options.feed_lifetime_sec = std::numeric_limits::max() - 1; + + BOOST_CHECK( !o.feed_is_expired( now ) ); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/bitasset_tests.cpp b/tests/tests/bitasset_tests.cpp new file mode 100644 index 0000000000..4fc6097650 --- /dev/null +++ b/tests/tests/bitasset_tests.cpp @@ -0,0 +1,1379 @@ +/* + * Copyright (c) 2018 Bitshares Foundation, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE( bitasset_tests, database_fixture ) + +/***** + * @brief helper method to change a backing asset to a new one + * @param fixture the database_fixture + * @param signing_key the signer + * @param asset_id_to_update asset to update + * @param new_backing_asset_id the new backing asset + */ +void change_backing_asset(database_fixture& fixture, const fc::ecc::private_key& signing_key, + asset_id_type asset_id_to_update, asset_id_type new_backing_asset_id) +{ + try + { + asset_update_bitasset_operation ba_op; + const asset_object& asset_to_update = asset_id_to_update(fixture.db); + ba_op.asset_to_update = asset_id_to_update; + ba_op.issuer = asset_to_update.issuer; + ba_op.new_options.short_backing_asset = new_backing_asset_id; + fixture.trx.operations.push_back(ba_op); + fixture.sign(fixture.trx, signing_key); + PUSH_TX(fixture.db, fixture.trx, ~0); + fixture.generate_block(); + fixture.trx.clear(); + } + catch (fc::exception& ex) + { + BOOST_FAIL( "Exception thrown in chainge_backing_asset. Exception was: " + + ex.to_string(fc::log_level(fc::log_level::all)) ); + } +} + +/****** + * @brief helper method to turn witness_fed_asset on and off + * @param fixture the database_fixture + * @param new_issuer optionally change the issuer + * @param signing_key signer + * @param asset_id asset we want to change + * @param witness_fed true if you want this to be a witness fed asset + */ +void change_asset_options(database_fixture& fixture, const optional& new_issuer, + const fc::ecc::private_key& signing_key, + asset_id_type asset_id, bool witness_fed) +{ + asset_update_operation op; + const asset_object& obj = asset_id(fixture.db); + op.asset_to_update = asset_id; + op.issuer = obj.issuer; + if (new_issuer) + op.new_issuer = new_issuer; + op.new_options = obj.options; + if (witness_fed) + { + op.new_options.flags |= witness_fed_asset; + op.new_options.flags &= ~committee_fed_asset; + } + else + { + op.new_options.flags &= ~witness_fed_asset; // we don't care about the committee flag here + } + fixture.trx.operations.push_back(op); + fixture.sign( fixture.trx, signing_key ); + PUSH_TX( fixture.db, fixture.trx, ~0 ); + fixture.generate_block(); + fixture.trx.clear(); + +} + +/********* + * @brief helper method to create a coin backed by a bitasset + * @param fixture the database_fixture + * @param index added to name of the coin + * @param backing the backing asset + * @param signing_key the signing key + */ +const graphene::chain::asset_object& create_bitasset_backed(graphene::chain::database_fixture& fixture, + int index, graphene::chain::asset_id_type backing, const fc::ecc::private_key& signing_key) +{ + // create the coin + std::string name = "COIN" + std::to_string(index + 1) + "TEST"; + const graphene::chain::asset_object& obj = fixture.create_bitasset(name); + asset_id_type asset_id = obj.get_id(); + // adjust the backing asset + change_backing_asset(fixture, signing_key, asset_id, backing); + fixture.trx.set_expiration(fixture.db.get_dynamic_global_properties().next_maintenance_time); + return obj; +} + + +/********* + * @brief make sure feeds still work after changing backing asset on a witness-fed asset + */ +BOOST_AUTO_TEST_CASE( reset_backing_asset_on_witness_asset ) +{ + ACTORS((nathan)); + + /* + // do a maintenance block + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + // generate blocks until close to hard fork + generate_blocks( HARDFORK_CORE_868_890_TIME - fc::hours(1) ); + */ + + BOOST_TEST_MESSAGE("Advance to near hard fork"); + auto maint_interval = db.get_global_properties().parameters.maintenance_interval; + generate_blocks( HARDFORK_CORE_868_890_TIME - maint_interval); + trx.set_expiration(HARDFORK_CORE_868_890_TIME - fc::seconds(1)); + + BOOST_TEST_MESSAGE("Create USDBIT"); + asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + asset_id_type core_id = bit_usd_id(db).bitasset_data(db).options.short_backing_asset; + + { + BOOST_TEST_MESSAGE("Update the USDBIT asset options"); + change_asset_options(*this, nathan_id, nathan_private_key, bit_usd_id, false ); + } + + BOOST_TEST_MESSAGE("Create JMJBIT based on USDBIT."); + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + { + BOOST_TEST_MESSAGE("Update the JMJBIT asset options"); + change_asset_options(*this, nathan_id, nathan_private_key, bit_jmj_id, true ); + } + + { + BOOST_TEST_MESSAGE("Update the JMJBIT bitasset options"); + asset_update_bitasset_operation ba_op; + const asset_object& obj = bit_jmj_id(db); + ba_op.asset_to_update = obj.get_id(); + ba_op.issuer = obj.issuer; + ba_op.new_options.short_backing_asset = bit_usd_id; + ba_op.new_options.minimum_feeds = 1; + trx.operations.push_back(ba_op); + sign(trx, nathan_private_key); + PUSH_TX(db, trx, ~0); + generate_block(); + trx.clear(); + } + + BOOST_TEST_MESSAGE("Grab active witnesses"); + auto& global_props = db.get_global_properties(); + std::vector active_witnesses; + for(const witness_id_type& wit_id : global_props.active_witnesses) + active_witnesses.push_back(wit_id(db).witness_account); + BOOST_REQUIRE_EQUAL(active_witnesses.size(), 10lu); + + { + BOOST_TEST_MESSAGE("Adding price feed 1"); + publish_feed(active_witnesses[0], bit_usd_id, 1, bit_jmj_id, 300, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 300.0); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + } + { + BOOST_TEST_MESSAGE("Adding price feed 2"); + publish_feed(active_witnesses[1], bit_usd_id, 1, bit_jmj_id, 100, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 300.0); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + } + { + BOOST_TEST_MESSAGE("Adding price feed 3"); + publish_feed(active_witnesses[2], bit_usd_id, 1, bit_jmj_id, 1, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 100.0); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + } + { + BOOST_TEST_MESSAGE("Change underlying asset of bit_jmj from bit_usd to core"); + change_backing_asset(*this, nathan_private_key, bit_jmj_id, core_id); + + BOOST_TEST_MESSAGE("Verify feed producers have not been reset"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 3ul); + } + { + BOOST_TEST_MESSAGE("With underlying bitasset changed from one to another, price feeds should still be publish-able"); + BOOST_TEST_MESSAGE("Re-Adding Witness 1 price feed"); + publish_feed(active_witnesses[0], core_id, 1, bit_jmj_id, 30, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 1); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + + BOOST_CHECK(bitasset.current_feed.core_exchange_rate.base.asset_id != bitasset.current_feed.core_exchange_rate.quote.asset_id); + } + { + BOOST_TEST_MESSAGE("Re-Adding Witness 2 price feed"); + publish_feed(active_witnesses[1], core_id, 1, bit_jmj_id, 100, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 100); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + } + { + BOOST_TEST_MESSAGE("Advance to after hard fork"); + generate_blocks( HARDFORK_CORE_868_890_TIME + fc::seconds(1)); + trx.set_expiration(HARDFORK_CORE_868_890_TIME + fc::hours(2)); + + BOOST_TEST_MESSAGE("After hardfork, 1 feed should have been erased"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 2ul); + } + { + BOOST_TEST_MESSAGE("After hardfork, change underlying asset of bit_jmj from core to bit_usd"); + change_backing_asset(*this, nathan_private_key, bit_jmj_id, bit_usd_id); + + BOOST_TEST_MESSAGE("Verify feed producers have been reset"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 0ul); + } + { + BOOST_TEST_MESSAGE("With underlying bitasset changed from one to another, price feeds should still be publish-able"); + BOOST_TEST_MESSAGE("Re-Adding Witness 1 price feed"); + publish_feed(active_witnesses[0], bit_usd_id, 1, bit_jmj_id, 30, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 30); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + + BOOST_CHECK(bitasset.current_feed.core_exchange_rate.base.asset_id != bitasset.current_feed.core_exchange_rate.quote.asset_id); + } +} + +/**** + * @brief make sure feeds work correctly after changing the backing asset on a non-witness-fed asset + */ +BOOST_AUTO_TEST_CASE( reset_backing_asset_on_non_witness_asset ) +{ + ACTORS((nathan)(dan)(ben)(vikram)); + + BOOST_TEST_MESSAGE("Advance to near hard fork"); + auto maint_interval = db.get_global_properties().parameters.maintenance_interval; + generate_blocks( HARDFORK_CORE_868_890_TIME - maint_interval); + trx.set_expiration(HARDFORK_CORE_868_890_TIME - fc::seconds(1)); + + + BOOST_TEST_MESSAGE("Create USDBIT"); + asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + asset_id_type core_id = bit_usd_id(db).bitasset_data(db).options.short_backing_asset; + + { + BOOST_TEST_MESSAGE("Update the USDBIT asset options"); + change_asset_options(*this, nathan_id, nathan_private_key, bit_usd_id, false ); + } + + BOOST_TEST_MESSAGE("Create JMJBIT based on USDBIT."); + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + { + BOOST_TEST_MESSAGE("Update the JMJBIT asset options"); + change_asset_options(*this, nathan_id, nathan_private_key, bit_jmj_id, false ); + } + { + BOOST_TEST_MESSAGE("Update the JMJBIT bitasset options"); + asset_update_bitasset_operation ba_op; + const asset_object& obj = bit_jmj_id(db); + ba_op.asset_to_update = obj.get_id(); + ba_op.issuer = obj.issuer; + ba_op.new_options.short_backing_asset = bit_usd_id; + ba_op.new_options.minimum_feeds = 1; + trx.operations.push_back(ba_op); + sign(trx, nathan_private_key); + PUSH_TX(db, trx, ~0); + generate_block(); + trx.clear(); + } + { + BOOST_TEST_MESSAGE("Set feed producers for JMJBIT"); + asset_update_feed_producers_operation op; + op.asset_to_update = bit_jmj_id; + op.issuer = nathan_id; + op.new_feed_producers = {dan_id, ben_id, vikram_id}; + trx.operations.push_back(op); + sign( trx, nathan_private_key ); + PUSH_TX( db, trx, ~0 ); + generate_block(); + trx.clear(); + } + + { + BOOST_TEST_MESSAGE("Verify feed producers are registered for JMJBIT"); + const asset_bitasset_data_object& obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(obj.feeds.size(), 3ul); + BOOST_CHECK(obj.current_feed == price_feed()); + + BOOST_CHECK( bit_usd_id == obj.options.short_backing_asset ); + } + { + BOOST_TEST_MESSAGE("Adding Vikram's price feed"); + publish_feed(vikram_id, bit_usd_id, 1, bit_jmj_id, 300, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 300.0); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + } + { + BOOST_TEST_MESSAGE("Adding Ben's pricing to JMJBIT"); + publish_feed(ben_id, bit_usd_id, 1, bit_jmj_id, 100, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 300); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + } + { + BOOST_TEST_MESSAGE("Adding Dan's pricing to JMJBIT"); + publish_feed(dan_id, bit_usd_id, 1, bit_jmj_id, 1, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 100); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + generate_block(); + trx.clear(); + + BOOST_CHECK(bitasset.current_feed.core_exchange_rate.base.asset_id != bitasset.current_feed.core_exchange_rate.quote.asset_id); + } + { + BOOST_TEST_MESSAGE("Change underlying asset of bit_jmj from bit_usd to core"); + change_backing_asset(*this, nathan_private_key, bit_jmj_id, core_id); + + BOOST_TEST_MESSAGE("Verify feed producers have not been reset"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 3ul); + for(const auto& feed : jmj_obj.feeds) { + BOOST_CHECK(!feed.second.second.settlement_price.is_null()); + } + } + { + BOOST_TEST_MESSAGE("Add a new (and correct) feed price for 1 feed producer"); + publish_feed(vikram_id, core_id, 1, bit_jmj_id, 300, core_id); + } + { + BOOST_TEST_MESSAGE("Advance to past hard fork"); + generate_blocks( HARDFORK_CORE_868_890_TIME + maint_interval); + trx.set_expiration(HARDFORK_CORE_868_890_TIME + fc::hours(48)); + + BOOST_TEST_MESSAGE("Verify that the incorrect feeds have been corrected"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 3ul); + int nan_count = 0; + for(const auto& feed : jmj_obj.feeds) + { + if (feed.second.second.settlement_price.is_null()) + nan_count++; + } + BOOST_CHECK_EQUAL(nan_count, 2); + // the settlement price will be NaN until 50% of price feeds are valid + //BOOST_CHECK_EQUAL(jmj_obj.current_feed.settlement_price.to_real(), 300); + } + { + BOOST_TEST_MESSAGE("After hardfork, change underlying asset of bit_jmj from core to bit_usd"); + change_backing_asset(*this, nathan_private_key, bit_jmj_id, bit_usd_id); + + BOOST_TEST_MESSAGE("Verify feed producers have been reset"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 3ul); + for(const auto& feed : jmj_obj.feeds) + { + BOOST_CHECK(feed.second.second.settlement_price.is_null()); + } + } + { + BOOST_TEST_MESSAGE("With underlying bitasset changed from one to another, price feeds should still be publish-able"); + BOOST_TEST_MESSAGE("Adding Vikram's price feed"); + publish_feed(vikram_id, bit_usd_id, 1, bit_jmj_id, 30, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 30); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + + BOOST_TEST_MESSAGE("Adding Ben's pricing to JMJBIT"); + publish_feed(ben_id, bit_usd_id, 1, bit_jmj_id, 25, core_id); + + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 30); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + + BOOST_TEST_MESSAGE("Adding Dan's pricing to JMJBIT"); + publish_feed(dan_id, bit_usd_id, 1, bit_jmj_id, 10, core_id); + + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 25); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + generate_block(); + trx.clear(); + + BOOST_CHECK(bitasset.current_feed.core_exchange_rate.base.asset_id != bitasset.current_feed.core_exchange_rate.quote.asset_id); + } +} + +/********* + * @brief Update median feeds after feed_lifetime_sec changed + */ +BOOST_AUTO_TEST_CASE( hf_890_test ) +{ + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + generate_blocks(HARDFORK_615_TIME, true, skip); // get around Graphene issue #615 feed expiration bug + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time, true, skip); + + for( int i=0; i<2; ++i ) + { + int blocks = 0; + auto mi = db.get_global_properties().parameters.maintenance_interval; + + if( i == 1 ) // go beyond hard fork + { + blocks += generate_blocks(HARDFORK_CORE_868_890_TIME - mi, true, skip); + blocks += generate_blocks(db.get_dynamic_global_properties().next_maintenance_time, true, skip); + } + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(feedproducer)); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + asset_id_type usd_id = bitusd.id; + + { + // change feed lifetime + const asset_object& asset_to_update = usd_id(db); + asset_update_bitasset_operation ba_op; + ba_op.asset_to_update = usd_id; + ba_op.issuer = asset_to_update.issuer; + ba_op.new_options = asset_to_update.bitasset_data(db).options; + ba_op.new_options.feed_lifetime_sec = 600; + trx.operations.push_back(ba_op); + PUSH_TX(db, trx, ~0); + trx.clear(); + } + + // prepare feed data + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + + // set price feed + update_feed_producers( usd_id(db), {feedproducer_id} ); + current_feed.settlement_price = asset(100, usd_id) / asset(5); + publish_feed( usd_id, feedproducer_id, current_feed ); + + // Place some collateralized orders + // start out with 300% collateral, call price is 15/175 CORE/USD = 60/700 + borrow( borrower_id, asset(100, usd_id), asset(15) ); + + transfer( borrower_id, seller_id, asset(100, usd_id) ); + + // Adjust price feed to get call order into margin call territory + current_feed.settlement_price = asset(100, usd_id) / asset(10); + publish_feed( usd_id, feedproducer_id, current_feed ); + // settlement price = 100 USD / 10 CORE, mssp = 100/11 USD/CORE + + // let the feed expire + blocks += generate_blocks( db.head_block_time() + 1200, true, skip ); + set_expiration( db, trx ); + + // check: median feed should be null + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price.is_null() ); + + // place a sell order, it won't be matched with the call order + limit_order_id_type sell_id = create_sell_order(seller_id, asset(10, usd_id), asset(1))->id; + + { + // change feed lifetime to longer + const asset_object& asset_to_update = usd_id(db); + asset_update_bitasset_operation ba_op; + ba_op.asset_to_update = usd_id; + ba_op.issuer = asset_to_update.issuer; + ba_op.new_options = asset_to_update.bitasset_data(db).options; + ba_op.new_options.feed_lifetime_sec = HARDFORK_CORE_868_890_TIME.sec_since_epoch() + - db.head_block_time().sec_since_epoch() + + mi + + 1800; + trx.operations.push_back(ba_op); + PUSH_TX(db, trx, ~0); + trx.clear(); + } + + // check + if( i == 0 ) // before hard fork, median feed is still null, and limit order is still there + { + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price.is_null() ); + BOOST_CHECK( db.find( sell_id ) ); + + // go beyond hard fork + blocks += generate_blocks(HARDFORK_CORE_868_890_TIME - mi, true, skip); + blocks += generate_blocks(db.get_dynamic_global_properties().next_maintenance_time, true, skip); + } + + // after hard fork, median feed should become valid, and the limit order should be filled + { + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + BOOST_CHECK( !db.find( sell_id ) ); + } + + // undo above tx's and reset + generate_block( skip ); + ++blocks; + while( blocks > 0 ) + { + db.pop_block(); + --blocks; + } + } +} + +class bitasset_evaluator_wrapper : public asset_update_bitasset_evaluator +{ +public: + void set_db(database& db) + { + this->trx_state = new transaction_evaluation_state(&db); + } +}; + +struct assets_922_931 +{ + asset_id_type bit_usd; + asset_id_type bit_usdbacked; + asset_id_type bit_usdbacked2; + asset_id_type bit_child_bitasset; + asset_id_type bit_parent; + asset_id_type user_issued; + asset_id_type six_precision; + asset_id_type prediction; +}; + +assets_922_931 create_assets_922_931(database_fixture* fixture) +{ + assets_922_931 asset_objs; + BOOST_TEST_MESSAGE( "Create USDBIT" ); + asset_objs.bit_usd = fixture->create_bitasset( "USDBIT", GRAPHENE_COMMITTEE_ACCOUNT ).get_id(); + + BOOST_TEST_MESSAGE( "Create USDBACKED" ); + asset_objs.bit_usdbacked = fixture->create_bitasset( "USDBACKED", GRAPHENE_COMMITTEE_ACCOUNT, + 100, charge_market_fee, 2, asset_objs.bit_usd ).get_id(); + + BOOST_TEST_MESSAGE( "Create USDBACKEDII" ); + asset_objs.bit_usdbacked2 = fixture->create_bitasset( "USDBACKEDII", GRAPHENE_WITNESS_ACCOUNT, + 100, charge_market_fee, 2, asset_objs.bit_usd ).get_id(); + + BOOST_TEST_MESSAGE( "Create PARENT" ); + asset_objs.bit_parent = fixture->create_bitasset( "PARENT", GRAPHENE_WITNESS_ACCOUNT).get_id(); + + BOOST_TEST_MESSAGE( "Create CHILDUSER" ); + asset_objs.bit_child_bitasset = fixture->create_bitasset( "CHILDUSER", GRAPHENE_WITNESS_ACCOUNT, + 100, charge_market_fee, 2, asset_objs.bit_parent ).get_id(); + + BOOST_TEST_MESSAGE( "Create user issued USERISSUED" ); + asset_objs.user_issued = fixture->create_user_issued_asset( "USERISSUED", + GRAPHENE_WITNESS_ACCOUNT(fixture->db), charge_market_fee ).get_id(); + + BOOST_TEST_MESSAGE( "Create a user-issued asset with a precision of 6" ); + asset_objs.six_precision = fixture->create_user_issued_asset( "SIXPRECISION", GRAPHENE_WITNESS_ACCOUNT(fixture->db), + charge_market_fee, price(asset(1, asset_id_type(1)), asset(1)), 6 ).get_id(); + + BOOST_TEST_MESSAGE( "Create Prediction market with precision of 6, backed by SIXPRECISION" ); + asset_objs.prediction = fixture->create_prediction_market( "PREDICTION", GRAPHENE_WITNESS_ACCOUNT, + 100, charge_market_fee, 6, asset_objs.six_precision ).get_id(); + + return asset_objs; +} +/****** + * @brief Test various bitasset asserts within the asset_evaluator before the HF 922 / 931 + */ +BOOST_AUTO_TEST_CASE( bitasset_evaluator_test_before_922_931 ) +{ + BOOST_TEST_MESSAGE("Advance to near hard fork 922 / 931"); + auto global_params = db.get_global_properties().parameters; + generate_blocks( HARDFORK_CORE_922_931_TIME - global_params.maintenance_interval ); + trx.set_expiration( HARDFORK_CORE_922_931_TIME - global_params.maintenance_interval + global_params.maximum_time_until_expiration ); + + ACTORS( (nathan) (john) ); + + assets_922_931 asset_objs = create_assets_922_931( this ); + const asset_id_type bit_usd_id = asset_objs.bit_usd; + + // make a generic operation + bitasset_evaluator_wrapper evaluator; + evaluator.set_db(db); + asset_update_bitasset_operation op; + op.asset_to_update = bit_usd_id; + op.issuer = asset_objs.bit_usd(db).issuer; + op.new_options = asset_objs.bit_usd(db).bitasset_data(db).options; + + // this should pass + BOOST_TEST_MESSAGE( "Evaluating a good operation" ); + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + + // test with a market issued asset + BOOST_TEST_MESSAGE( "Sending a non-bitasset." ); + op.asset_to_update = asset_objs.user_issued; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "on a non-BitAsset." ); + op.asset_to_update = bit_usd_id; + + // test changing issuer + BOOST_TEST_MESSAGE( "Test changing issuer." ); + account_id_type original_issuer = op.issuer; + op.issuer = john_id; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Only asset issuer can update" ); + op.issuer = original_issuer; + + // bad backing_asset + BOOST_TEST_MESSAGE( "Non-existent backing asset." ); + asset_id_type correct_asset_id = op.new_options.short_backing_asset; + op.new_options.short_backing_asset = asset_id_type(); + op.new_options.short_backing_asset.instance = 123; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Unable to find Object" ); + op.new_options.short_backing_asset = correct_asset_id; + + // now check the things that are wrong, but still pass before HF 922 / 931 + BOOST_TEST_MESSAGE( "Now check the things that are wrong, but still pass before HF 922 / 931" ); + + // back by self + BOOST_TEST_MESSAGE( "Message should contain: op.new_options.short_backing_asset == asset_obj.get_id()" ); + op.new_options.short_backing_asset = bit_usd_id; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + op.new_options.short_backing_asset = correct_asset_id; + + // prediction market with different precision + BOOST_TEST_MESSAGE( "Message should contain: for a PM, asset_obj.precision != new_backing_asset.precision" ); + op.asset_to_update = asset_objs.prediction; + op.issuer = asset_objs.prediction(db).issuer; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + op.asset_to_update = bit_usd_id; + op.issuer = asset_objs.bit_usd(db).issuer; + + // checking old backing asset instead of new backing asset + BOOST_TEST_MESSAGE( "Message should contain: to be backed by an asset which is not market issued asset nor CORE" ); + op.new_options.short_backing_asset = asset_objs.six_precision; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + BOOST_TEST_MESSAGE( "Message should contain: modified a blockchain-controlled market asset to be backed by an asset " + "which is not backed by CORE" ); + op.new_options.short_backing_asset = asset_objs.prediction; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + op.new_options.short_backing_asset = correct_asset_id; + + // CHILDUSER is a non-committee asset backed by PARENT which is backed by CORE + // Cannot change PARENT's backing asset from CORE to something that is not [CORE | UIA] + // because that will make CHILD be backed by an asset that is not itself backed by CORE or a UIA. + BOOST_TEST_MESSAGE( "Message should contain: but this asset is a backing asset for another MPA, which would cause MPA " + "backed by MPA backed by MPA." ); + op.asset_to_update = asset_objs.bit_parent; + op.issuer = asset_objs.bit_parent(db).issuer; + op.new_options.short_backing_asset = asset_objs.bit_usdbacked; + // this should generate a warning in the log, but not fail. + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + // changing the backing asset to a UIA should work + BOOST_TEST_MESSAGE( "Switching to a backing asset that is a UIA should work. No warning should be produced." ); + op.new_options.short_backing_asset = asset_objs.user_issued; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + // A -> B -> C, change B to be backed by A (circular backing) + BOOST_TEST_MESSAGE( "Message should contain: A cannot be backed by B which is backed by A." ); + op.new_options.short_backing_asset = asset_objs.bit_child_bitasset; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + op.new_options.short_backing_asset = asset_objs.user_issued; + BOOST_TEST_MESSAGE( "Message should contain: but this asset is a backing asset for a committee-issued asset." ); + // CHILDCOMMITTEE is a committee asset backed by PARENT which is backed by CORE + // Cannot change PARENT's backing asset from CORE to something else because that will make CHILD be backed by + // an asset that is not itself backed by CORE + create_bitasset( "CHILDCOMMITTEE", GRAPHENE_COMMITTEE_ACCOUNT, 100, charge_market_fee, 2, + asset_objs.bit_parent ); + // it should again work, generating 2 warnings in the log. 1 for the above, and 1 new one. + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + op.asset_to_update = asset_objs.bit_usd; + op.issuer = asset_objs.bit_usd(db).issuer; + op.new_options.short_backing_asset = correct_asset_id; + + // USDBACKED is backed by USDBIT (which is backed by CORE) + // USDBACKEDII is backed by USDBIT + // We should not be able to make USDBACKEDII be backed by USDBACKED + // because that would be a MPA backed by MPA backed by MPA. + BOOST_TEST_MESSAGE( "Message should contain: a BitAsset cannot be backed by a BitAsset that " + "itself is backed by a BitAsset." ); + op.asset_to_update = asset_objs.bit_usdbacked2; + op.issuer = asset_objs.bit_usdbacked2(db).issuer; + op.new_options.short_backing_asset = asset_objs.bit_usdbacked; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + // set everything to a more normal state + op.asset_to_update = asset_objs.bit_usdbacked; + op.issuer = asset_objs.bit_usd(db).issuer; + op.new_options.short_backing_asset = asset_id_type(); + + // Feed lifetime must exceed block interval + BOOST_TEST_MESSAGE( "Message should contain: op.new_options.feed_lifetime_sec <= chain_parameters.block_interval" ); + const auto good_feed_lifetime = op.new_options.feed_lifetime_sec; + op.new_options.feed_lifetime_sec = db.get_global_properties().parameters.block_interval; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + BOOST_TEST_MESSAGE( "Message should contain: op.new_options.feed_lifetime_sec <= chain_parameters.block_interval" ); + op.new_options.feed_lifetime_sec = db.get_global_properties().parameters.block_interval - 1; // default interval > 1 + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + op.new_options.feed_lifetime_sec = good_feed_lifetime; + + // Force settlement delay must exceed block interval. + BOOST_TEST_MESSAGE( "Message should contain: op.new_options.force_settlement_delay_sec <= chain_parameters.block_interval" ); + const auto good_force_settlement_delay_sec = op.new_options.force_settlement_delay_sec; + op.new_options.force_settlement_delay_sec = db.get_global_properties().parameters.block_interval; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + BOOST_TEST_MESSAGE( "Message should contain: op.new_options.force_settlement_delay_sec <= chain_parameters.block_interval" ); + op.new_options.force_settlement_delay_sec = db.get_global_properties().parameters.block_interval - 1; // default interval > 1 + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + op.new_options.force_settlement_delay_sec = good_force_settlement_delay_sec; + + // this should pass + BOOST_TEST_MESSAGE( "We should be all good again." ); + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); +} + +/****** + * @brief Test various bitasset asserts within the asset_evaluator before the HF 922 / 931 + */ +BOOST_AUTO_TEST_CASE( bitasset_evaluator_test_after_922_931 ) +{ + BOOST_TEST_MESSAGE("Advance to after hard fork 922 / 931"); + auto global_params = db.get_global_properties().parameters; + generate_blocks( HARDFORK_CORE_922_931_TIME + global_params.maintenance_interval ); + trx.set_expiration( HARDFORK_CORE_922_931_TIME + global_params.maintenance_interval + global_params.maximum_time_until_expiration ); + + ACTORS( (nathan) (john) ); + + assets_922_931 asset_objs = create_assets_922_931( this ); + const asset_id_type& bit_usd_id = asset_objs.bit_usd; + + // make a generic operation + bitasset_evaluator_wrapper evaluator; + evaluator.set_db( db ); + asset_update_bitasset_operation op; + op.asset_to_update = bit_usd_id; + op.issuer = asset_objs.bit_usd(db).issuer; + op.new_options = asset_objs.bit_usd(db).bitasset_data(db).options; + + // this should pass + BOOST_TEST_MESSAGE( "Evaluating a good operation" ); + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + + // test with a market issued asset + BOOST_TEST_MESSAGE( "Sending a non-bitasset." ); + op.asset_to_update = asset_objs.user_issued; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Cannot update BitAsset-specific settings on a non-BitAsset" ); + op.asset_to_update = bit_usd_id; + + // test changing issuer + BOOST_TEST_MESSAGE( "Test changing issuer." ); + account_id_type original_issuer = op.issuer; + op.issuer = john_id; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Only asset issuer can update" ); + op.issuer = original_issuer; + + // bad backing_asset + BOOST_TEST_MESSAGE( "Non-existent backing asset." ); + asset_id_type correct_asset_id = op.new_options.short_backing_asset; + op.new_options.short_backing_asset = asset_id_type(); + op.new_options.short_backing_asset.instance = 123; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Unable to find" ); + op.new_options.short_backing_asset = correct_asset_id; + + // now check the things that are wrong and won't pass after HF 922 / 931 + BOOST_TEST_MESSAGE( "Now check the things that are wrong and won't pass after HF 922 / 931" ); + + // back by self + BOOST_TEST_MESSAGE( "Back by itself" ); + op.new_options.short_backing_asset = bit_usd_id; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Cannot update an asset to be backed by itself" ); + op.new_options.short_backing_asset = correct_asset_id; + + // prediction market with different precision + BOOST_TEST_MESSAGE( "Prediction market with different precision" ); + op.asset_to_update = asset_objs.prediction; + op.issuer = asset_objs.prediction(db).issuer; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "The precision of the asset and backing asset must" ); + op.asset_to_update = bit_usd_id; + op.issuer = asset_objs.bit_usd(db).issuer; + + // checking old backing asset instead of new backing asset + BOOST_TEST_MESSAGE( "Correctly checking new backing asset rather than old backing asset" ); + op.new_options.short_backing_asset = asset_objs.six_precision; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "which is not market issued asset nor CORE." ); + op.new_options.short_backing_asset = asset_objs.prediction; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "which is not backed by CORE" ); + op.new_options.short_backing_asset = correct_asset_id; + + // CHILD is a non-committee asset backed by PARENT which is backed by CORE + // Cannot change PARENT's backing asset from CORE to something that is not [CORE | UIA] + // because that will make CHILD be backed by an asset that is not itself backed by CORE or a UIA. + BOOST_TEST_MESSAGE( "Attempting to change PARENT to be backed by a non-core and non-user-issued asset" ); + op.asset_to_update = asset_objs.bit_parent; + op.issuer = asset_objs.bit_parent(db).issuer; + op.new_options.short_backing_asset = asset_objs.bit_usdbacked; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "A non-blockchain controlled BitAsset would be invalidated" ); + // changing the backing asset to a UIA should work + BOOST_TEST_MESSAGE( "Switching to a backing asset that is a UIA should work." ); + op.new_options.short_backing_asset = asset_objs.user_issued; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + // A -> B -> C, change B to be backed by A (circular backing) + BOOST_TEST_MESSAGE( "Check for circular backing. This should generate an exception" ); + op.new_options.short_backing_asset = asset_objs.bit_child_bitasset; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "'A' backed by 'B' backed by 'A'" ); + op.new_options.short_backing_asset = asset_objs.user_issued; + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + BOOST_TEST_MESSAGE( "Creating CHILDCOMMITTEE" ); + // CHILDCOMMITTEE is a committee asset backed by PARENT which is backed by CORE + // Cannot change PARENT's backing asset from CORE to something else because that will make CHILDCOMMITTEE + // be backed by an asset that is not itself backed by CORE + create_bitasset( "CHILDCOMMITTEE", GRAPHENE_COMMITTEE_ACCOUNT, 100, charge_market_fee, 2, + asset_objs.bit_parent ); + // it should again not work + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "A blockchain-controlled market asset would be invalidated" ); + op.asset_to_update = asset_objs.bit_usd; + op.issuer = asset_objs.bit_usd(db).issuer; + op.new_options.short_backing_asset = correct_asset_id; + + // USDBACKED is backed by USDBIT (which is backed by CORE) + // USDBACKEDII is backed by USDBIT + // We should not be able to make USDBACKEDII be backed by USDBACKED + // because that would be a MPA backed by MPA backed by MPA. + BOOST_TEST_MESSAGE( "MPA -> MPA -> MPA not allowed" ); + op.asset_to_update = asset_objs.bit_usdbacked2; + op.issuer = asset_objs.bit_usdbacked2(db).issuer; + op.new_options.short_backing_asset = asset_objs.bit_usdbacked; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), + "A BitAsset cannot be backed by a BitAsset that itself is backed by a BitAsset" ); + // set everything to a more normal state + op.asset_to_update = asset_objs.bit_usdbacked; + op.issuer = asset_objs.bit_usd(db).issuer; + op.new_options.short_backing_asset = asset_id_type(); + + // Feed lifetime must exceed block interval + BOOST_TEST_MESSAGE( "Feed lifetime less than or equal to block interval" ); + const auto good_feed_lifetime = op.new_options.feed_lifetime_sec; + op.new_options.feed_lifetime_sec = db.get_global_properties().parameters.block_interval; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Feed lifetime must exceed block" ); + op.new_options.feed_lifetime_sec = db.get_global_properties().parameters.block_interval - 1; // default interval > 1 + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Feed lifetime must exceed block" ); + op.new_options.feed_lifetime_sec = good_feed_lifetime; + + // Force settlement delay must exceed block interval. + BOOST_TEST_MESSAGE( "Force settlement delay less than or equal to block interval" ); + const auto good_force_settlement_delay_sec = op.new_options.force_settlement_delay_sec; + op.new_options.force_settlement_delay_sec = db.get_global_properties().parameters.block_interval; + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Force settlement delay must" ); + op.new_options.force_settlement_delay_sec = db.get_global_properties().parameters.block_interval - 1; // default interval > 1 + REQUIRE_EXCEPTION_WITH_TEXT( evaluator.evaluate(op), "Force settlement delay must" ); + op.new_options.force_settlement_delay_sec = good_force_settlement_delay_sec; + + // this should pass + BOOST_TEST_MESSAGE( "We should be all good again." ); + BOOST_CHECK( evaluator.evaluate(op) == void_result() ); + +} + +/********* + * @brief Call check_call_orders after current_feed changed but not only settlement_price changed. + */ +BOOST_AUTO_TEST_CASE( hf_935_test ) +{ try { + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + generate_blocks( HARDFORK_615_TIME, true, skip ); // get around Graphene issue #615 feed expiration bug + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time, true, skip ); + generate_block( skip ); + + for( int i = 0; i < 6; ++i ) + { + idump( (i) ); + int blocks = 0; + auto mi = db.get_global_properties().parameters.maintenance_interval; + + if( i == 2 ) // go beyond hard fork 890 + { + generate_blocks( HARDFORK_CORE_868_890_TIME - mi, true, skip ); + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time, true, skip ); + } + else if( i == 4 ) // go beyond hard fork 935 + { + generate_blocks( HARDFORK_CORE_935_TIME - mi, true, skip ); + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time, true, skip ); + } + set_expiration( db, trx ); + + ACTORS( (seller)(borrower)(feedproducer)(feedproducer2)(feedproducer3) ); + + int64_t init_balance( 1000000 ); + + transfer( committee_account, borrower_id, asset(init_balance) ); + + const auto& bitusd = create_bitasset( "USDBIT", feedproducer_id ); + asset_id_type usd_id = bitusd.id; + + { + // set a short feed lifetime + const asset_object& asset_to_update = usd_id(db); + asset_update_bitasset_operation ba_op; + ba_op.asset_to_update = usd_id; + ba_op.issuer = asset_to_update.issuer; + ba_op.new_options = asset_to_update.bitasset_data(db).options; + ba_op.new_options.feed_lifetime_sec = 300; + trx.operations.push_back(ba_op); + PUSH_TX(db, trx, ~0); + trx.clear(); + } + + // set feed producers + flat_set producers; + producers.insert( feedproducer_id ); + producers.insert( feedproducer2_id ); + producers.insert( feedproducer3_id ); + update_feed_producers( usd_id(db), producers ); + + // prepare feed data + price_feed current_feed; + if( i % 2 == 0 ) // MCR test + { + current_feed.maintenance_collateral_ratio = 3500; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = asset(100, usd_id) / asset(5); + } + else // MSSR test + { + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1250; + current_feed.settlement_price = asset(100, usd_id) / asset(10); + // mssp = 1000/125 + } + + // set 2 price feeds which should call some later + publish_feed( usd_id, feedproducer_id, current_feed ); + publish_feed( usd_id, feedproducer2_id, current_feed ); + + // check median + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + if( i % 2 == 0 ) // MCR test, MCR should be 350% + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 3500 ); + else // MSSR test, MSSR should be 125% + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maximum_short_squeeze_ratio, 1250 ); + + // generate some blocks, let the feeds expire + blocks += generate_blocks( db.head_block_time() + 360, true, skip ); + set_expiration( db, trx ); + + // check median, should be null + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price.is_null() ); + + // publish a new feed with 175% MCR and 110% MSSR + current_feed.settlement_price = asset(100, usd_id) / asset(5); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + publish_feed( usd_id, feedproducer3_id, current_feed ); + + // check median, MCR would be 175%, MSSR would be 110% + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 1750 ); + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maximum_short_squeeze_ratio, 1100 ); + + // Place some collateralized orders + // start out with 300% collateral, call price is 15/175 CORE/USD = 60/700 + borrow( borrower_id, asset(100, usd_id), asset(15) ); + + transfer( borrower_id, seller_id, asset(100, usd_id) ); + + if( i % 2 == 1) // MSSR test + { + // publish a new feed to put the call order into margin call territory + current_feed.settlement_price = asset(100, usd_id) / asset(10); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + publish_feed( usd_id, feedproducer3_id, current_feed ); + // mssp = 100/11 + } + + // place a sell order, it won't be matched with the call order now. + // For MCR test, the sell order is at feed price (100/5), + // when median MCR changed to 350%, the call order with 300% collateral will be in margin call territory, + // then this limit order should be filled + // For MSSR test, the sell order is above 110% of feed price (100/10) but below 125% of feed price, + // when median MSSR changed to 125%, the call order will be matched, + // then this limit order should be filled + limit_order_id_type sell_id = ( i % 2 == 0 ) ? + create_sell_order( seller_id, asset(20, usd_id), asset(1) )->id : // for MCR test + create_sell_order( seller_id, asset(8, usd_id), asset(1) )->id; // for MSSR test + + { + // change feed lifetime to longer, let all 3 feeds be valid + const asset_object& asset_to_update = usd_id(db); + asset_update_bitasset_operation ba_op; + ba_op.asset_to_update = usd_id; + ba_op.issuer = asset_to_update.issuer; + ba_op.new_options = asset_to_update.bitasset_data(db).options; + ba_op.new_options.feed_lifetime_sec = HARDFORK_CORE_935_TIME.sec_since_epoch() + + mi * 3 + 86400 * 2 + - db.head_block_time().sec_since_epoch(); + trx.operations.push_back(ba_op); + PUSH_TX(db, trx, ~0); + trx.clear(); + } + + bool affected_by_hf_343 = false; + + // check + if( i / 2 == 0 ) // before hard fork 890 + { + // median feed won't change (issue 890) + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 1750 ); + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maximum_short_squeeze_ratio, 1100 ); + // limit order is still there + BOOST_CHECK( db.find( sell_id ) ); + + // go beyond hard fork 890 + blocks += generate_blocks( HARDFORK_CORE_868_890_TIME - mi, true, skip ); + bool was_before_hf_343 = ( db.get_dynamic_global_properties().next_maintenance_time <= HARDFORK_CORE_343_TIME ); + + blocks += generate_blocks( db.get_dynamic_global_properties().next_maintenance_time, true, skip ); + bool now_after_hf_343 = ( db.get_dynamic_global_properties().next_maintenance_time > HARDFORK_CORE_343_TIME ); + + if( was_before_hf_343 && now_after_hf_343 ) // if hf 343 executed at same maintenance interval, actually after hf 890 + affected_by_hf_343 = true; + } + + // after hard fork 890, if it's before hard fork 935 + if( db.get_dynamic_global_properties().next_maintenance_time <= HARDFORK_CORE_935_TIME ) + { + // median should have changed + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + if( i % 2 == 0 ) // MCR test, MCR should be 350% + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 3500 ); + else // MSSR test, MSSR should be 125% + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maximum_short_squeeze_ratio, 1250 ); + + if( affected_by_hf_343 ) // if updated bitasset before hf 890, and hf 343 executed after hf 890 + // the limit order should have been filled + BOOST_CHECK( !db.find( sell_id ) ); + else // if not affected by hf 343 + // the limit order should be still there, because `check_call_order` was incorrectly skipped + BOOST_CHECK( db.find( sell_id ) ); + + // go beyond hard fork 935 + blocks += generate_blocks(HARDFORK_CORE_935_TIME - mi, true, skip); + blocks += generate_blocks(db.get_dynamic_global_properties().next_maintenance_time, true, skip); + } + + // after hard fork 935, the limit order should be filled + { + // check median + BOOST_CHECK( usd_id(db).bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + if( i % 2 == 0 ) // MCR test, median MCR should be 350% + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maintenance_collateral_ratio, 3500 ); + else // MSSR test, MSSR should be 125% + BOOST_CHECK_EQUAL( usd_id(db).bitasset_data(db).current_feed.maximum_short_squeeze_ratio, 1250 ); + // the limit order should have been filled + // TODO FIXME this test case is failing for MCR test, + // because call_order's call_price didn't get updated after MCR changed + // BOOST_CHECK( !db.find( sell_id ) ); + if( i % 2 == 1 ) // MSSR test + BOOST_CHECK( !db.find( sell_id ) ); + } + + + // undo above tx's and reset + generate_block( skip ); + ++blocks; + while( blocks > 0 ) + { + db.pop_block(); + --blocks; + } + } +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( bitasset_secondary_index ) +{ + ACTORS( (nathan) ); + + graphene::chain::asset_id_type core_id; + BOOST_TEST_MESSAGE( "Running test bitasset_secondary_index" ); + BOOST_TEST_MESSAGE( "Core asset id: " + fc::json::to_pretty_string( core_id ) ); + BOOST_TEST_MESSAGE("Create coins"); + try + { + // make 5 coins (backed by core) + for(int i = 0; i < 5; i++) + { + create_bitasset_backed(*this, i, core_id, nathan_private_key); + } + // make the next 5 (10-14) be backed by COIN1 + graphene::chain::asset_id_type coin1_id = get_asset("COIN1TEST").get_id(); + for(int i = 5; i < 10; i++) + { + create_bitasset_backed(*this, i, coin1_id, nathan_private_key); + } + // make the next 5 (15-19) be backed by COIN2 + graphene::chain::asset_id_type coin2_id = get_asset("COIN2TEST").get_id(); + for(int i = 10; i < 15; i++) + { + create_bitasset_backed(*this, i, coin2_id, nathan_private_key); + } + // make the last 5 be backed by core + for(int i = 15; i < 20; i++) + { + create_bitasset_backed(*this, i, core_id, nathan_private_key); + } + + BOOST_TEST_MESSAGE("Searching for all coins backed by CORE"); + const auto& idx = db.get_index_type().indices().get(); + auto core_itr = idx.equal_range( core_id ); + BOOST_TEST_MESSAGE("Searching for all coins backed by COIN1"); + auto coin1_itr = idx.equal_range( coin1_id ); + BOOST_TEST_MESSAGE("Searching for all coins backed by COIN2"); + auto coin2_itr = idx.equal_range( coin2_id ); + + int core_count = 0, coin1_count = 0, coin2_count = 0; + + BOOST_TEST_MESSAGE("Counting coins in each category"); + + for( auto itr = core_itr.first ; itr != core_itr.second; ++itr) + { + BOOST_CHECK(itr->options.short_backing_asset == core_id); + BOOST_TEST_MESSAGE( fc::json::to_pretty_string(itr->asset_id) + " is backed by CORE" ); + core_count++; + } + for( auto itr = coin1_itr.first ; itr != coin1_itr.second; ++itr ) + { + BOOST_CHECK(itr->options.short_backing_asset == coin1_id); + BOOST_TEST_MESSAGE( fc::json::to_pretty_string( itr->asset_id) + " is backed by COIN1TEST" ); + coin1_count++; + } + for( auto itr = coin2_itr.first; itr != coin2_itr.second; ++itr ) + { + BOOST_CHECK(itr->options.short_backing_asset == coin2_id); + BOOST_TEST_MESSAGE( fc::json::to_pretty_string( itr->asset_id) + " is backed by COIN2TEST" ); + coin2_count++; + } + + BOOST_CHECK( core_count >= 10 ); + BOOST_CHECK_EQUAL( coin1_count, 5 ); + BOOST_CHECK_EQUAL( coin2_count, 5 ); + } + catch (fc::exception& ex) + { + BOOST_FAIL(ex.to_string(fc::log_level(fc::log_level::all))); + } +} + + +/***** + * @brief make sure feeds work correctly after changing from non-witness-fed to witness-fed before the 868 fork + * NOTE: This test case is a different issue than what is currently being worked on, and fails. Hopefully it + * will help when the fix for that issue is being coded. + */ +/* +BOOST_AUTO_TEST_CASE( reset_backing_asset_switching_to_witness_fed ) +{ + ACTORS((nathan)(dan)(ben)(vikram)); + + BOOST_TEST_MESSAGE("Advance to near hard fork"); + auto maint_interval = db.get_global_properties().parameters.maintenance_interval; + generate_blocks( HARDFORK_CORE_868_890_TIME - maint_interval); + trx.set_expiration(HARDFORK_CORE_868_890_TIME - fc::seconds(1)); + + + BOOST_TEST_MESSAGE("Create USDBIT"); + asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + asset_id_type core_id = bit_usd_id(db).bitasset_data(db).options.short_backing_asset; + + { + BOOST_TEST_MESSAGE("Update the USDBIT asset options"); + change_asset_options(*this, nathan_id, nathan_private_key, bit_usd_id, false ); + } + + BOOST_TEST_MESSAGE("Create JMJBIT based on USDBIT."); + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + { + BOOST_TEST_MESSAGE("Update the JMJBIT asset options"); + change_asset_options(*this, nathan_id, nathan_private_key, bit_jmj_id, false ); + } + { + BOOST_TEST_MESSAGE("Update the JMJBIT bitasset options"); + asset_update_bitasset_operation ba_op; + const asset_object& obj = bit_jmj_id(db); + ba_op.asset_to_update = obj.get_id(); + ba_op.issuer = obj.issuer; + ba_op.new_options.short_backing_asset = bit_usd_id; + ba_op.new_options.minimum_feeds = 1; + trx.operations.push_back(ba_op); + sign(trx, nathan_private_key); + PUSH_TX(db, trx, ~0); + generate_block(); + trx.clear(); + } + { + BOOST_TEST_MESSAGE("Set feed producers for JMJBIT"); + asset_update_feed_producers_operation op; + op.asset_to_update = bit_jmj_id; + op.issuer = nathan_id; + op.new_feed_producers = {dan_id, ben_id, vikram_id}; + trx.operations.push_back(op); + sign( trx, nathan_private_key ); + PUSH_TX( db, trx, ~0 ); + generate_block(); + trx.clear(); + } + { + BOOST_TEST_MESSAGE("Verify feed producers are registered for JMJBIT"); + const asset_bitasset_data_object& obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(obj.feeds.size(), 3ul); + BOOST_CHECK(obj.current_feed == price_feed()); + + + BOOST_CHECK_EQUAL("1", std::to_string(obj.options.short_backing_asset.space_id)); + BOOST_CHECK_EQUAL("3", std::to_string(obj.options.short_backing_asset.type_id)); + BOOST_CHECK_EQUAL("1", std::to_string(obj.options.short_backing_asset.instance.value)); + + BOOST_CHECK_EQUAL("1", std::to_string(bit_jmj_id.space_id)); + BOOST_CHECK_EQUAL("3", std::to_string(bit_jmj_id.type_id)); + BOOST_CHECK_EQUAL("2", std::to_string(bit_jmj_id.instance.value)); + } + { + BOOST_TEST_MESSAGE("Adding Vikram's price feed"); + add_price_feed(*this, vikram_id, bit_usd_id, 1, bit_jmj_id, 300, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 300.0); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + } + { + BOOST_TEST_MESSAGE("Change JMJBIT to be witness_fed"); + optional noone; + change_asset_options(*this, noone, nathan_private_key, bit_jmj_id, true ); + } + { + BOOST_TEST_MESSAGE("Change underlying asset of bit_jmj from bit_usd to core"); + change_backing_asset(*this, nathan_private_key, bit_jmj_id, core_id); + + BOOST_TEST_MESSAGE("Verify feed producers have not been reset"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 3ul); + int nan_count = 0; + for(const auto& feed : jmj_obj.feeds) { + if(feed.second.second.settlement_price.is_null()) + ++nan_count; + } + BOOST_CHECK_EQUAL(nan_count, 2); + } + { + BOOST_TEST_MESSAGE("Add a new (and correct) feed price from a witness"); + auto& global_props = db.get_global_properties(); + std::vector active_witnesses; + const witness_id_type& first_witness_id = (*global_props.active_witnesses.begin()); + const account_id_type witness_account_id = first_witness_id(db).witness_account; + add_price_feed(*this, witness_account_id, core_id, 1, bit_jmj_id, 300, core_id); + + // we should have 2 feeds nan, 1 old feed with wrong asset, and 1 witness feed + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 4ul); + int nan_count = 0; + for(const auto& feed : jmj_obj.feeds) { + if ( feed.second.second.settlement_price.is_null() ) + ++nan_count; + } + BOOST_CHECK_EQUAL(nan_count, 2); + } + { + BOOST_TEST_MESSAGE("Advance to past hard fork"); + generate_blocks( HARDFORK_CORE_868_890_TIME + maint_interval); + trx.set_expiration(HARDFORK_CORE_868_890_TIME + fc::hours(48)); + + BOOST_TEST_MESSAGE("Verify that the incorrect feeds have been removed"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 1ul); + BOOST_CHECK( ! (*jmj_obj.feeds.begin()).second.second.settlement_price.is_null() ); + // the settlement price will be NaN until 50% of price feeds are valid + //BOOST_CHECK_EQUAL(jmj_obj.current_feed.settlement_price.to_real(), 300); + } + { + BOOST_TEST_MESSAGE("After hardfork, change underlying asset of bit_jmj from core to bit_usd"); + change_backing_asset(*this, nathan_private_key, bit_jmj_id, bit_usd_id); + + BOOST_TEST_MESSAGE("Verify feed producers have been reset"); + const asset_bitasset_data_object& jmj_obj = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(jmj_obj.feeds.size(), 0ul); + } + { + BOOST_TEST_MESSAGE("With underlying bitasset changed from one to another, price feeds should still be publish-able"); + auto& global_props = db.get_global_properties(); + std::vector active_witnesses; + for(const auto& witness_id : global_props.active_witnesses) + { + active_witnesses.push_back(witness_id(db).witness_account); + } + BOOST_TEST_MESSAGE("Adding Witness 0's price feed"); + add_price_feed(*this, active_witnesses[0], bit_usd_id, 1, bit_jmj_id, 30, core_id); + + const asset_bitasset_data_object& bitasset = bit_jmj_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 30); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + + BOOST_TEST_MESSAGE("Adding Witness 1's pricing to JMJBIT"); + add_price_feed(*this, active_witnesses[0], bit_usd_id, 1, bit_jmj_id, 25, core_id); + + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 30); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + + BOOST_TEST_MESSAGE("Adding Witness 2's pricing to JMJBIT"); + add_price_feed(*this, active_witnesses[2], bit_usd_id, 1, bit_jmj_id, 10, core_id); + + BOOST_CHECK_EQUAL(bitasset.current_feed.settlement_price.to_real(), 25); + BOOST_CHECK(bitasset.current_feed.maintenance_collateral_ratio == GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO); + generate_block(); + trx.clear(); + + BOOST_CHECK(bitasset.current_feed.core_exchange_rate.base.asset_id != bitasset.current_feed.core_exchange_rate.quote.asset_id); + } +} +*/ + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/block_tests.cpp b/tests/tests/block_tests.cpp index 9e5e5ebcdd..2c7eec7d05 100644 --- a/tests/tests/block_tests.cpp +++ b/tests/tests/block_tests.cpp @@ -32,6 +32,9 @@ #include #include #include +#include +#include +#include #include @@ -49,7 +52,7 @@ genesis_state_type make_genesis() { auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key"))); genesis_state.initial_active_witnesses = 10; - for( int i = 0; i < genesis_state.initial_active_witnesses; ++i ) + for( unsigned int i = 0; i < genesis_state.initial_active_witnesses; ++i ) { auto name = "init"+fc::to_string(i); genesis_state.initial_accounts.emplace_back(name, @@ -77,11 +80,12 @@ BOOST_AUTO_TEST_CASE( block_database_test ) FC_ASSERT( !bdb.is_open() ); bdb.open( data_dir.path() ); - signed_block b; + clearable_block b; for( uint32_t i = 0; i < 5; ++i ) { if( i > 0 ) b.previous = b.id(); b.witness = witness_id_type(i+1); + b.clear(); bdb.store( b.id(), b ); auto fetch = bdb.fetch_by_number( b.block_num() ); @@ -135,9 +139,10 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks ) // TODO: Don't generate this here auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); signed_block cutoff_block; + uint32_t last_block; { database db; - db.open(data_dir.path(), make_genesis ); + db.open(data_dir.path(), make_genesis, "TEST" ); b = db.generate_block(db.get_slot_time(1), db.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); // TODO: Change this test when we correct #406 @@ -154,6 +159,7 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks ) if( cutoff_height >= 200 ) { cutoff_block = *(db.fetch_block_by_number( cutoff_height )); + last_block = db.head_block_num(); break; } } @@ -161,8 +167,10 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks ) } { database db; - db.open(data_dir.path(), []{return genesis_state_type();}); - BOOST_CHECK_EQUAL( db.head_block_num(), cutoff_block.block_num() ); + db.open(data_dir.path(), []{return genesis_state_type();}, "TEST"); + BOOST_CHECK_EQUAL( db.head_block_num(), last_block ); + while( db.head_block_num() > cutoff_block.block_num() ) + db.pop_block(); b = cutoff_block; for( uint32_t i = 0; i < 200; ++i ) { @@ -186,7 +194,7 @@ BOOST_AUTO_TEST_CASE( undo_block ) fc::temp_directory data_dir( graphene::utilities::temp_directory_path() ); { database db; - db.open(data_dir.path(), make_genesis); + db.open(data_dir.path(), make_genesis, "TEST"); fc::time_point_sec now( GRAPHENE_TESTING_GENESIS_TIMESTAMP ); std::vector< time_point_sec > time_stack; @@ -228,6 +236,123 @@ BOOST_AUTO_TEST_CASE( undo_block ) } } +BOOST_AUTO_TEST_CASE( change_signing_key_test ) +{ + try { + fc::temp_directory data_dir( graphene::utilities::temp_directory_path() ); + + auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); + auto init_pub_key = init_account_priv_key.get_public_key(); + auto new_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("new_key")) ); + auto new_pub_key = new_key.get_public_key(); + + std::map< public_key_type, fc::ecc::private_key > key_map; + key_map[init_pub_key] = init_account_priv_key; + key_map[new_pub_key] = new_key; + + std::set< witness_id_type > witnesses; + for( uint32_t i = 0; i <= 11; ++i ) // 11 init witnesses and 0 is reserved + witnesses.insert( witness_id_type(i) ); + + auto change_signing_key = [&init_account_priv_key]( database& db, witness_id_type wit, public_key_type new_signing_key ) { + witness_update_operation wuop; + wuop.witness_account = wit(db).witness_account; + wuop.witness = wit; + wuop.new_signing_key = new_signing_key; + signed_transaction wu_trx; + wu_trx.operations.push_back( wuop ); + wu_trx.set_reference_block( db.head_block_id() ); + wu_trx.set_expiration( db.head_block_time() + + fc::seconds( 0x1000 * db.get_global_properties().parameters.block_interval ) ); + wu_trx.sign( init_account_priv_key, db.get_chain_id() ); + PUSH_TX( db, wu_trx, 0 ); + }; + + { + database db; + + // open database + db.open(data_dir.path(), make_genesis, "TEST"); + + // generate some empty blocks with init keys + for( uint32_t i = 0; i < 30; ++i ) + { + auto now = db.get_slot_time(1); + auto next_witness = db.get_scheduled_witness( 1 ); + db.generate_block( now, next_witness, init_account_priv_key, database::skip_nothing ); + } + + // generate some blocks and change keys in same block + for( uint32_t i = 0; i < 9; ++i ) + { + auto now = db.get_slot_time(1); + auto next_witness = db.get_scheduled_witness( 1 ); + public_key_type current_key = next_witness(db).signing_key; + change_signing_key( db, next_witness, new_key.get_public_key() ); + idump( (i)(now)(next_witness) ); + auto b = db.generate_block( now, next_witness, key_map[current_key], database::skip_nothing ); + idump( (b) ); + } + + // pop a few blocks and clear pending, some signing keys should be changed back + for( uint32_t i = 0; i < 4; ++i ) + { + db.pop_block(); + } + db._popped_tx.clear(); + db.clear_pending(); + + // generate a few blocks and change keys in same block + for( uint32_t i = 0; i < 2; ++i ) + { + auto now = db.get_slot_time(1); + auto next_witness = db.get_scheduled_witness( 1 ); + public_key_type current_key = next_witness(db).signing_key; + change_signing_key( db, next_witness, new_key.get_public_key() ); + idump( (i)(now)(next_witness) ); + auto b = db.generate_block( now, next_witness, key_map[current_key], database::skip_nothing ); + idump( (b) ); + } + + // generate some blocks but don't change a key + for( uint32_t i = 0; i < 25; ++i ) + { + auto now = db.get_slot_time(1); + auto next_witness = db.get_scheduled_witness( 1 ); + public_key_type current_key = next_witness(db).signing_key; + idump( (i)(now)(next_witness) ); + auto b = db.generate_block( now, next_witness, key_map[current_key], database::skip_nothing ); + idump( (b) ); + } + + // close the database, flush all data to disk + db.close(); + } + { + database db; + + // reopen database, all data should be unchanged + db.open(data_dir.path(), make_genesis, "TEST"); + + // generate more blocks and change keys in same block + for( uint32_t i = 0; i < 25; ++i ) + { + auto now = db.get_slot_time(1); + auto next_witness = db.get_scheduled_witness( 1 ); + public_key_type current_key = next_witness(db).signing_key; + change_signing_key( db, next_witness, new_key.get_public_key() ); + idump( (i)(now)(next_witness) ); + auto b = db.generate_block( now, next_witness, key_map[current_key], database::skip_nothing ); + idump( (b) ); + } + + } + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_CASE( fork_blocks ) { try { @@ -235,57 +360,112 @@ BOOST_AUTO_TEST_CASE( fork_blocks ) fc::temp_directory data_dir2( graphene::utilities::temp_directory_path() ); database db1; - db1.open(data_dir1.path(), make_genesis); + db1.open(data_dir1.path(), make_genesis, "TEST"); database db2; - db2.open(data_dir2.path(), make_genesis); + db2.open(data_dir2.path(), make_genesis, "TEST"); BOOST_CHECK( db1.get_chain_id() == db2.get_chain_id() ); auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); - for( uint32_t i = 0; i < 10; ++i ) + + BOOST_TEST_MESSAGE( "Adding blocks 1 through 10" ); + for( uint32_t i = 1; i <= 10; ++i ) { auto b = db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); try { PUSH_BLOCK( db2, b ); } FC_CAPTURE_AND_RETHROW( ("db2") ); } - for( uint32_t i = 10; i < 13; ++i ) - { - auto b = db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); - } - string db1_tip = db1.head_block_id().str(); - uint32_t next_slot = 3; - for( uint32_t i = 13; i < 16; ++i ) + + for( uint32_t j = 0; j <= 4; j += 4 ) { - auto b = db2.generate_block(db2.get_slot_time(next_slot), db2.get_scheduled_witness(next_slot), init_account_priv_key, database::skip_nothing); - next_slot = 1; - // notify both databases of the new block. - // only db2 should switch to the new fork, db1 should not - PUSH_BLOCK( db1, b ); + // add blocks 11 through 13 to db1 only + BOOST_TEST_MESSAGE( "Adding 3 blocks to db1 only" ); + for( uint32_t i = 11 + j; i <= 13 + j; ++i ) + { + BOOST_TEST_MESSAGE( i ); + auto b = db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); + } + string db1_tip = db1.head_block_id().str(); + + // add different blocks 11 through 13 to db2 only + BOOST_TEST_MESSAGE( "Add 3 different blocks to db2 only" ); + uint32_t next_slot = 3; + for( uint32_t i = 11 + j; i <= 13 + j; ++i ) + { + BOOST_TEST_MESSAGE( i ); + auto b = db2.generate_block(db2.get_slot_time(next_slot), db2.get_scheduled_witness(next_slot), init_account_priv_key, database::skip_nothing); + next_slot = 1; + // notify both databases of the new block. + // only db2 should switch to the new fork, db1 should not + PUSH_BLOCK( db1, b ); + BOOST_CHECK_EQUAL(db1.head_block_id().str(), db1_tip); + BOOST_CHECK_EQUAL(db2.head_block_id().str(), b.id().str()); + } + + //The two databases are on distinct forks now, but at the same height. + BOOST_CHECK_EQUAL(db1.head_block_num(), 13u + j); + BOOST_CHECK_EQUAL(db2.head_block_num(), 13u + j); + BOOST_CHECK( db1.head_block_id() != db2.head_block_id() ); + + //Make a block on db2, make it invalid, then + //pass it to db1 and assert that db1 doesn't switch to the new fork. + signed_block good_block; + { + auto b = db2.generate_block(db2.get_slot_time(1), db2.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); + good_block = b; + b.transactions.emplace_back(signed_transaction()); + b.transactions.back().operations.emplace_back(transfer_operation()); + b.sign( init_account_priv_key ); + BOOST_CHECK_EQUAL(b.block_num(), 14u + j); + GRAPHENE_CHECK_THROW(PUSH_BLOCK( db1, b ), fc::exception); + + // At this point, `fetch_block_by_number` will fetch block from fork_db, + // so unable to reproduce the issue which is fixed in PR #938 + // https://github.com/bitshares/bitshares-core/pull/938 + fc::optional previous_block = db1.fetch_block_by_number(1); + BOOST_CHECK ( previous_block.valid() ); + uint32_t db1_blocks = db1.head_block_num(); + for( uint32_t curr_block_num = 2; curr_block_num <= db1_blocks; ++curr_block_num ) + { + fc::optional curr_block = db1.fetch_block_by_number( curr_block_num ); + BOOST_CHECK( curr_block.valid() ); + BOOST_CHECK_EQUAL( curr_block->previous.str(), previous_block->id().str() ); + previous_block = curr_block; + } + } + BOOST_CHECK_EQUAL(db1.head_block_num(), 13u + j); BOOST_CHECK_EQUAL(db1.head_block_id().str(), db1_tip); - BOOST_CHECK_EQUAL(db2.head_block_id().str(), b.id().str()); + + if( j == 0 ) + { + // assert that db1 switches to new fork with good block + BOOST_CHECK_EQUAL(db2.head_block_num(), 14u + j); + PUSH_BLOCK( db1, good_block ); + BOOST_CHECK_EQUAL(db1.head_block_id().str(), db2.head_block_id().str()); + } } - //The two databases are on distinct forks now, but at the same height. Make a block on db2, make it invalid, then - //pass it to db1 and assert that db1 doesn't switch to the new fork. - signed_block good_block; - BOOST_CHECK_EQUAL(db1.head_block_num(), 13); - BOOST_CHECK_EQUAL(db2.head_block_num(), 13); + // generate more blocks to push the forked blocks out of fork_db + BOOST_TEST_MESSAGE( "Adding more blocks to db1, push the forked blocks out of fork_db" ); + for( uint32_t i = 1; i <= 50; ++i ) { - auto b = db2.generate_block(db2.get_slot_time(1), db2.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); - good_block = b; - b.transactions.emplace_back(signed_transaction()); - b.transactions.back().operations.emplace_back(transfer_operation()); - b.sign( init_account_priv_key ); - BOOST_CHECK_EQUAL(b.block_num(), 14); - GRAPHENE_CHECK_THROW(PUSH_BLOCK( db1, b ), fc::exception); + db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); } - BOOST_CHECK_EQUAL(db1.head_block_num(), 13); - BOOST_CHECK_EQUAL(db1.head_block_id().str(), db1_tip); - // assert that db1 switches to new fork with good block - BOOST_CHECK_EQUAL(db2.head_block_num(), 14); - PUSH_BLOCK( db1, good_block ); - BOOST_CHECK_EQUAL(db1.head_block_id().str(), db2.head_block_id().str()); + { + // PR #938 make sure db is in a good state https://github.com/bitshares/bitshares-core/pull/938 + BOOST_TEST_MESSAGE( "Checking whether all blocks on disk are good" ); + fc::optional previous_block = db1.fetch_block_by_number(1); + BOOST_CHECK ( previous_block.valid() ); + uint32_t db1_blocks = db1.head_block_num(); + for( uint32_t curr_block_num = 2; curr_block_num <= db1_blocks; ++curr_block_num ) + { + fc::optional curr_block = db1.fetch_block_by_number( curr_block_num ); + BOOST_CHECK( curr_block.valid() ); + BOOST_CHECK_EQUAL( curr_block->previous.str(), previous_block->id().str() ); + previous_block = curr_block; + } + } } catch (fc::exception& e) { edump((e.to_detail_string())); throw; @@ -380,7 +560,7 @@ BOOST_AUTO_TEST_CASE( undo_pending ) fc::temp_directory data_dir( graphene::utilities::temp_directory_path() ); { database db; - db.open(data_dir.path(), make_genesis); + db.open(data_dir.path(), make_genesis, "TEST"); auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); public_key_type init_account_pub_key = init_account_priv_key.get_public_key(); @@ -422,11 +602,11 @@ BOOST_AUTO_TEST_CASE( undo_pending ) t.to = nathan_id; t.amount = asset(5000); trx.operations.push_back(t); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); trx.clear(); set_expiration( db, trx ); trx.operations.push_back(t); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); BOOST_CHECK(db.get_balance(nathan_id, asset_id_type()).amount == 10000); db.clear_pending(); @@ -445,8 +625,8 @@ BOOST_AUTO_TEST_CASE( switch_forks_undo_create ) dir2( graphene::utilities::temp_directory_path() ); database db1, db2; - db1.open(dir1.path(), make_genesis); - db2.open(dir2.path(), make_genesis); + db1.open(dir1.path(), make_genesis, "TEST"); + db2.open(dir2.path(), make_genesis, "TEST"); BOOST_CHECK( db1.get_chain_id() == db2.get_chain_id() ); auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); @@ -504,11 +684,11 @@ BOOST_AUTO_TEST_CASE( duplicate_transactions ) dir2( graphene::utilities::temp_directory_path() ); database db1, db2; - db1.open(dir1.path(), make_genesis); - db2.open(dir2.path(), make_genesis); + db1.open(dir1.path(), make_genesis, "TEST"); + db2.open(dir2.path(), make_genesis, "TEST"); BOOST_CHECK( db1.get_chain_id() == db2.get_chain_id() ); - auto skip_sigs = database::skip_transaction_signatures | database::skip_authority_check; + auto skip_sigs = database::skip_transaction_signatures; auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); public_key_type init_account_pub_key = init_account_priv_key.get_public_key(); @@ -554,7 +734,7 @@ BOOST_AUTO_TEST_CASE( tapos ) try { fc::temp_directory dir1( graphene::utilities::temp_directory_path() ); database db1; - db1.open(dir1.path(), make_genesis); + db1.open(dir1.path(), make_genesis, "TEST"); const account_object& init1 = *db1.get_index_type().indices().get().find("init1"); @@ -577,7 +757,7 @@ BOOST_AUTO_TEST_CASE( tapos ) cop.active = cop.owner; trx.operations.push_back(cop); trx.sign( init_account_priv_key, db1.get_chain_id() ); - db1.push_transaction(trx); + PUSH_TX(db1, trx); b = db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing); trx.clear(); @@ -587,11 +767,11 @@ BOOST_AUTO_TEST_CASE( tapos ) trx.operations.push_back(t); trx.sign( init_account_priv_key, db1.get_chain_id() ); //relative_expiration is 1, but ref block is 2 blocks old, so this should fail. - GRAPHENE_REQUIRE_THROW(PUSH_TX( db1, trx, database::skip_transaction_signatures | database::skip_authority_check ), fc::exception); + GRAPHENE_REQUIRE_THROW(PUSH_TX( db1, trx, database::skip_transaction_signatures ), fc::exception); set_expiration( db1, trx ); - trx.signatures.clear(); + trx.clear_signatures(); trx.sign( init_account_priv_key, db1.get_chain_id() ); - db1.push_transaction(trx, database::skip_transaction_signatures | database::skip_authority_check); + PUSH_TX( db1, trx, database::skip_transaction_signatures ); } catch (fc::exception& e) { edump((e.to_detail_string())); throw; @@ -621,14 +801,14 @@ BOOST_FIXTURE_TEST_CASE( optional_tapos, database_fixture ) tx.ref_block_num = 0; tx.ref_block_prefix = 0; - tx.signatures.clear(); + tx.clear_signatures(); sign( tx, alice_private_key ); PUSH_TX( db, tx ); BOOST_TEST_MESSAGE( "proper ref_block_num, ref_block_prefix" ); set_expiration( db, tx ); - tx.signatures.clear(); + tx.clear_signatures(); sign( tx, alice_private_key ); PUSH_TX( db, tx ); @@ -636,7 +816,7 @@ BOOST_FIXTURE_TEST_CASE( optional_tapos, database_fixture ) tx.ref_block_num = 0; tx.ref_block_prefix = 0x12345678; - tx.signatures.clear(); + tx.clear_signatures(); sign( tx, alice_private_key ); GRAPHENE_REQUIRE_THROW( PUSH_TX( db, tx ), fc::exception ); @@ -644,7 +824,7 @@ BOOST_FIXTURE_TEST_CASE( optional_tapos, database_fixture ) tx.ref_block_num = 1; tx.ref_block_prefix = 0x12345678; - tx.signatures.clear(); + tx.clear_signatures(); sign( tx, alice_private_key ); GRAPHENE_REQUIRE_THROW( PUSH_TX( db, tx ), fc::exception ); @@ -652,7 +832,7 @@ BOOST_FIXTURE_TEST_CASE( optional_tapos, database_fixture ) tx.ref_block_num = 9999; tx.ref_block_prefix = 0x12345678; - tx.signatures.clear(); + tx.clear_signatures(); sign( tx, alice_private_key ); GRAPHENE_REQUIRE_THROW( PUSH_TX( db, tx ), fc::exception ); } @@ -667,7 +847,7 @@ BOOST_FIXTURE_TEST_CASE( maintenance_interval, database_fixture ) { try { generate_block(); - BOOST_CHECK_EQUAL(db.head_block_num(), 2); + BOOST_CHECK_EQUAL(db.head_block_num(), 2u); fc::time_point_sec maintenence_time = db.get_dynamic_global_properties().next_maintenance_time; BOOST_CHECK_GT(maintenence_time.sec_since_epoch(), db.head_block_time().sec_since_epoch()); @@ -718,7 +898,7 @@ BOOST_FIXTURE_TEST_CASE( limit_order_expiration, database_fixture ) //Get a sane head block time generate_block(); - auto* test = &create_bitasset("TEST"); + auto* test = &create_bitasset("MIATEST"); auto* core = &asset_id_type()(db); auto* nathan = &create_account("nathan"); auto* committee = &account_id_type()(db); @@ -747,7 +927,7 @@ BOOST_FIXTURE_TEST_CASE( limit_order_expiration, database_fixture ) auto id = limit_itr->id; generate_blocks(op.expiration, false); - test = &get_asset("TEST"); + test = &get_asset("MIATEST"); core = &asset_id_type()(db); nathan = &get_account("nathan"); committee = &account_id_type()(db); @@ -772,7 +952,7 @@ BOOST_FIXTURE_TEST_CASE( double_sign_check, database_fixture ) for( auto& op : trx.operations ) db.current_fee_schedule().set_fee(op); trx.validate(); - db.push_transaction(trx, ~0); + PUSH_TX(db, trx, ~0); trx.operations.clear(); t.from = bob.id; @@ -783,22 +963,21 @@ BOOST_FIXTURE_TEST_CASE( double_sign_check, database_fixture ) trx.validate(); BOOST_TEST_MESSAGE( "Verify that not-signing causes an exception" ); - GRAPHENE_REQUIRE_THROW( db.push_transaction(trx, 0), fc::exception ); + GRAPHENE_REQUIRE_THROW( PUSH_TX(db, trx, 0), fc::exception ); BOOST_TEST_MESSAGE( "Verify that double-signing causes an exception" ); sign( trx, bob_private_key ); sign( trx, bob_private_key ); - GRAPHENE_REQUIRE_THROW( db.push_transaction(trx, 0), tx_duplicate_sig ); + GRAPHENE_REQUIRE_THROW( PUSH_TX(db, trx, 0), tx_duplicate_sig ); BOOST_TEST_MESSAGE( "Verify that signing with an extra, unused key fails" ); trx.signatures.pop_back(); sign( trx, generate_private_key("bogus" )); - GRAPHENE_REQUIRE_THROW( db.push_transaction(trx, 0), tx_irrelevant_sig ); + GRAPHENE_REQUIRE_THROW( PUSH_TX(db, trx, 0), tx_irrelevant_sig ); BOOST_TEST_MESSAGE( "Verify that signing once with the proper key passes" ); trx.signatures.pop_back(); - db.push_transaction(trx, 0); - sign( trx, bob_private_key ); + PUSH_TX(db, trx, 0); } FC_LOG_AND_RETHROW() } @@ -819,7 +998,7 @@ BOOST_FIXTURE_TEST_CASE( change_block_interval, database_fixture ) uop.new_parameters.block_interval = 1; cop.proposed_ops.emplace_back(uop); trx.operations.push_back(cop); - db.push_transaction(trx); + PUSH_TX(db, trx); } BOOST_TEST_MESSAGE( "Updating proposal by signing with the committee_member private key" ); { @@ -840,22 +1019,22 @@ BOOST_FIXTURE_TEST_CASE( change_block_interval, database_fixture ) sign( trx, get_account("init6" ).active.get_keys().front(),init_account_priv_key); sign( trx, get_account("init7" ).active.get_keys().front(),init_account_priv_key); */ - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_CHECK(proposal_id_type()(db).is_authorized_to_execute(db)); } BOOST_TEST_MESSAGE( "Verifying that the interval didn't change immediately" ); - BOOST_CHECK_EQUAL(db.get_global_properties().parameters.block_interval, 5); + BOOST_CHECK_EQUAL(db.get_global_properties().parameters.block_interval, 5u); auto past_time = db.head_block_time().sec_since_epoch(); generate_block(); - BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 5); + BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 5u); generate_block(); - BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 10); + BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 10u); BOOST_TEST_MESSAGE( "Generating blocks until proposal expires" ); generate_blocks(proposal_id_type()(db).expiration_time + 5); BOOST_TEST_MESSAGE( "Verify that the block interval is still 5 seconds" ); - BOOST_CHECK_EQUAL(db.get_global_properties().parameters.block_interval, 5); + BOOST_CHECK_EQUAL(db.get_global_properties().parameters.block_interval, 5u); BOOST_TEST_MESSAGE( "Generating blocks until next maintenance interval" ); generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); @@ -865,9 +1044,9 @@ BOOST_FIXTURE_TEST_CASE( change_block_interval, database_fixture ) BOOST_CHECK_EQUAL(db.get_global_properties().parameters.block_interval, 1); past_time = db.head_block_time().sec_since_epoch(); generate_block(); - BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 1); + BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 1u); generate_block(); - BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 2); + BOOST_CHECK_EQUAL(db.head_block_time().sec_since_epoch() - past_time, 2u); } FC_LOG_AND_RETHROW() } BOOST_FIXTURE_TEST_CASE( pop_block_twice, database_fixture ) @@ -877,7 +1056,6 @@ BOOST_FIXTURE_TEST_CASE( pop_block_twice, database_fixture ) uint32_t skip_flags = ( database::skip_witness_signature | database::skip_transaction_signatures - | database::skip_authority_check ); const asset_object& core = asset_id_type()(db); @@ -944,7 +1122,7 @@ BOOST_FIXTURE_TEST_CASE( rsf_missed_blocks, database_fixture ) "1111111111111111111111111111111111111111111111111111111111111111" "1111111111111111111111111111111111111111111111111111111111111111" ); - BOOST_CHECK_EQUAL( db.witness_participation_rate(), GRAPHENE_100_PERCENT ); + BOOST_CHECK_EQUAL( db.witness_participation_rate(), (uint32_t)GRAPHENE_100_PERCENT ); generate_block( ~0, init_account_priv_key, 1 ); BOOST_CHECK_EQUAL( rsf(), @@ -1052,19 +1230,20 @@ BOOST_FIXTURE_TEST_CASE( transaction_invalidated_in_cache, database_fixture ) }; // tx's created by ACTORS() have bogus authority, so we need to - // skip_authority_check in the block where they're included - signed_block b1 = generate_block(db, database::skip_authority_check); + // skip_transaction_signatures in the block where they're included + signed_block b1 = generate_block(db, database::skip_transaction_signatures); fc::temp_directory data_dir2( graphene::utilities::temp_directory_path() ); database db2; - db2.open(data_dir2.path(), make_genesis); + db2.open(data_dir2.path(), make_genesis, "TEST"); BOOST_CHECK( db.get_chain_id() == db2.get_chain_id() ); while( db2.head_block_num() < db.head_block_num() ) { optional< signed_block > b = db.fetch_block_by_number( db2.head_block_num()+1 ); - db2.push_block(*b, database::skip_witness_signature); + db2.push_block(*b, database::skip_witness_signature + |database::skip_transaction_signatures ); } BOOST_CHECK( db2.get( alice_id ).name == "alice" ); BOOST_CHECK( db2.get( bob_id ).name == "bob" ); @@ -1073,7 +1252,7 @@ BOOST_FIXTURE_TEST_CASE( transaction_invalidated_in_cache, database_fixture ) transfer( account_id_type(), alice_id, asset( 1000 ) ); transfer( account_id_type(), bob_id, asset( 1000 ) ); // need to skip authority check here as well for same reason as above - db2.push_block(generate_block(db, database::skip_authority_check), database::skip_authority_check); + db2.push_block(generate_block(db, database::skip_transaction_signatures), database::skip_transaction_signatures); BOOST_CHECK_EQUAL(db.get_balance(alice_id, asset_id_type()).amount.value, 1000); BOOST_CHECK_EQUAL(db.get_balance( bob_id, asset_id_type()).amount.value, 1000); @@ -1108,7 +1287,7 @@ BOOST_FIXTURE_TEST_CASE( transaction_invalidated_in_cache, database_fixture ) signed_transaction tx = generate_xfer_tx( alice_id, bob_id, 1000, 2 ); tx.set_expiration( db.head_block_time() + 2 * db.get_global_properties().parameters.block_interval ); - tx.signatures.clear(); + tx.clear_signatures(); sign( tx, alice_private_key ); // put the tx in db tx cache PUSH_TX( db, tx ); @@ -1218,7 +1397,7 @@ BOOST_AUTO_TEST_CASE( genesis_reserve_ids ) genesis_state.initial_assets.push_back( usd ); return genesis_state; - } ); + }, "TEST" ); const auto& acct_idx = db.get_index_type().indices().get(); auto acct_itr = acct_idx.find("init0"); @@ -1237,18 +1416,378 @@ BOOST_AUTO_TEST_CASE( genesis_reserve_ids ) } } +BOOST_FIXTURE_TEST_CASE( miss_some_blocks, database_fixture ) +{ try { + std::vector witnesses = witness_schedule_id_type()(db).current_shuffled_witnesses; + BOOST_CHECK_EQUAL( 10u, witnesses.size() ); + // database_fixture constructor calls generate_block once, signed by witnesses[0] + generate_block(); // witnesses[1] + generate_block(); // witnesses[2] + for( const auto& id : witnesses ) + BOOST_CHECK_EQUAL( 0, id(db).total_missed ); + // generate_blocks generates another block *now* (witnesses[3]) + // and one at now+10 blocks (witnesses[12%10]) + generate_blocks( db.head_block_time() + db.get_global_properties().parameters.block_interval * 10, true ); + // i. e. 8 blocks are missed in between by witness[4..11%10] + for( uint32_t i = 0; i < witnesses.size(); i++ ) + BOOST_CHECK_EQUAL( (i+7) % 10 < 2 ? 0 : 1, witnesses[i](db).total_missed ); +} FC_LOG_AND_RETHROW() } + BOOST_FIXTURE_TEST_CASE( miss_many_blocks, database_fixture ) { try { + auto get_misses = []( database& db ) { + std::map< witness_id_type, uint32_t > misses; + for( const auto& witness_id : witness_schedule_id_type()(db).current_shuffled_witnesses ) + misses[witness_id] = witness_id(db).total_missed; + return misses; + }; generate_block(); generate_block(); generate_block(); + auto missed_before = get_misses( db ); // miss 10 maintenance intervals generate_blocks( db.get_dynamic_global_properties().next_maintenance_time + db.get_global_properties().parameters.maintenance_interval * 10, true ); generate_block(); generate_block(); generate_block(); + auto missed_after = get_misses( db ); + BOOST_CHECK_EQUAL( missed_before.size(), missed_after.size() ); + for( const auto& miss : missed_before ) + { + const auto& after = missed_after.find( miss.first ); + BOOST_REQUIRE( after != missed_after.end() ); + BOOST_CHECK_EQUAL( miss.second, after->second ); + } + } + catch (fc::exception& e) + { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_FIXTURE_TEST_CASE( update_account_keys, database_fixture ) +{ + try + { + const asset_object& core = asset_id_type()(db); + uint32_t skip_flags = + database::skip_transaction_dupe_check + | database::skip_witness_signature + | database::skip_transaction_signatures + ; + + // Sam is the creator of accounts + private_key_type committee_key = init_account_priv_key; + private_key_type sam_key = generate_private_key("sam"); + + // + // A = old key set + // B = new key set + // + // we measure how many times we test following four cases: + // + // A-B B-A + // alice case_count[0] A == B empty empty + // bob case_count[1] A < B empty nonempty + // charlie case_count[2] B < A nonempty empty + // dan case_count[3] A nc B nonempty nonempty + // + // and assert that all four cases were tested at least once + // + account_object sam_account_object = create_account( "sam", sam_key ); + + // upgrade sam to LTM + upgrade_to_lifetime_member(sam_account_object.id); + + //Get a sane head block time + generate_block( skip_flags ); + + db.modify(db.get_global_properties(), [](global_property_object& p) { + p.parameters.committee_proposal_review_period = fc::hours(1).to_seconds(); + }); + + transaction tx; + processed_transaction ptx; + + account_object committee_account_object = committee_account(db); + // transfer from committee account to Sam account + transfer(committee_account_object, sam_account_object, core.amount(100000)); + + const int num_keys = 5; + vector< private_key_type > numbered_private_keys; + vector< vector< public_key_type > > numbered_key_id; + numbered_private_keys.reserve( num_keys ); + numbered_key_id.push_back( vector() ); + numbered_key_id.push_back( vector() ); + + for( int i=0; i > possible_key_sched; + const int num_key_sched = (1 << num_keys)-1; + possible_key_sched.reserve( num_key_sched ); + + for( int s=1; s<=num_key_sched; s++ ) + { + vector< int > v; + int i = 0; + v.reserve( num_keys ); + while( v.size() < num_keys ) + { + if( s & (1 << i) ) + v.push_back( i ); + i++; + if( i >= num_keys ) + i = 0; + } + possible_key_sched.push_back( v ); + } + + // we can only undo in blocks + generate_block( skip_flags ); + + std::cout << "update_account_keys: this test will take a few minutes...\n"; + + // Originally we had a loop here to go from use_address=0 to 1 + // Live chain do not allow this so it had to be removed: https://github.com/bitshares/bitshares-core/issues/565 + vector< public_key_type > key_ids = numbered_key_id[ 0 ]; + for( int num_owner_keys=1; num_owner_keys<=2; num_owner_keys++ ) + { + for( int num_active_keys=1; num_active_keys<=2; num_active_keys++ ) + { + std::cout << 0 << num_owner_keys << num_active_keys << "\n"; + for( const vector< int >& key_sched_before : possible_key_sched ) + { + auto it = key_sched_before.begin(); + vector< const private_key_type* > owner_privkey; + vector< const public_key_type* > owner_keyid; + owner_privkey.reserve( num_owner_keys ); + + trx.clear(); + account_create_operation create_op; + create_op.name = "alice"; + + for( int owner_index=0; owner_index(); + + generate_block( skip_flags ); + for( const vector< int >& key_sched_after : possible_key_sched ) + { + auto it = key_sched_after.begin(); + + trx.clear(); + account_update_operation update_op; + update_op.account = alice_account_id; + update_op.owner = authority(); + update_op.active = authority(); + update_op.new_options = create_op.options; + + for( int owner_index=0; owner_indexkey_auths[ key_ids[ *(it++) ] ] = 1; + // size() < num_owner_keys is possible when some keys are duplicates + update_op.owner->weight_threshold = update_op.owner->key_auths.size(); + for( int active_index=0; active_indexkey_auths[ key_ids[ *(it++) ] ] = 1; + // size() < num_active_keys is possible when some keys are duplicates + update_op.active->weight_threshold = update_op.active->key_auths.size(); + FC_ASSERT( update_op.new_options.valid() ); + update_op.new_options->memo_key = key_ids[ *(it++) ] ; + + trx.operations.push_back( update_op ); + for( int i=0; i> 1; + + vector< witness_id_type > cur_round; + vector< witness_id_type > full_schedule; + // if we make the maximum witness count testable, + // we'll need to enlarge this. + std::bitset< 0x40 > witness_seen; + size_t total_blocks = 1000000; + + cur_round.reserve( num_witnesses ); + full_schedule.reserve( total_blocks ); + cur_round.push_back( db.get_dynamic_global_properties().current_witness ); + + // we assert so the test doesn't continue, which would + // corrupt memory + assert( num_witnesses <= witness_seen.size() ); + + while( full_schedule.size() < total_blocks ) + { + if( (db.head_block_num() & 0x3FFF) == 0 ) + { + wdump( (db.head_block_num()) ); + } + witness_id_type wid = db.get_scheduled_witness( 1 ); + full_schedule.push_back( wid ); + cur_round.push_back( wid ); + if( cur_round.size() == num_witnesses ) + { + // check that the current round contains exactly 1 copy + // of each witness + witness_seen.reset(); + for( const witness_id_type& w : cur_round ) + { + uint64_t inst = w.instance.value; + BOOST_CHECK( !witness_seen.test( inst ) ); + assert( !witness_seen.test( inst ) ); + witness_seen.set( inst ); + } + cur_round.clear(); + } + generate_block(); + } + + for( size_t i=0,m=full_schedule.size(); i 0 ); +} FC_LOG_AND_RETHROW() } + +/// +/// This test case tries to +/// * generate blocks when there are too many pending transactions, +/// * push blocks that are too large. +/// If we add some logging in signed_transaction::get_signature_keys(), we can see if the code will extract public key(s) +/// from signature(s) of same transactions multiple times. +/// See https://github.com/bitshares/bitshares-core/pull/1251 +/// +BOOST_FIXTURE_TEST_CASE( block_size_test, database_fixture ) +{ + try + { + ACTORS((alice)(bob)); + + const fc::ecc::private_key& key = generate_private_key("null_key"); + BOOST_TEST_MESSAGE( "Give Alice some money" ); + transfer(committee_account, alice_id, asset(10000000)); + generate_block(); + + const size_t default_block_header_size = fc::raw::pack_size( signed_block_header() ); + const auto& gpo = db.get_global_properties(); + const auto block_interval = gpo.parameters.block_interval; + idump( (db.head_block_num())(default_block_header_size)(gpo.parameters.maximum_block_size) ); + + BOOST_TEST_MESSAGE( "Start" ); + // Note: a signed transaction with a transfer operation inside is at least 102 bytes; + // after processed, it become 103 bytes; + // an empty block is 112 bytes; + // a block with a transfer is 215 bytes; + // a block with 2 transfers is 318 bytes. + uint32_t large_block_count = 0; + for( uint64_t i = 90; i <= 230; ++i ) + { + if( i > 120 && i < 200 ) // skip some + i = 200; + + // Temporarily disable undo db and change max block size + db._undo_db.disable(); + db.modify( gpo, [i,&default_block_header_size](global_property_object& p) { + p.parameters.maximum_block_size = default_block_header_size + i; + }); + db._undo_db.enable(); + idump( (i)(gpo.parameters.maximum_block_size) ); + + // push a transaction + signed_transaction xfer_tx; + transfer_operation xfer_op; + xfer_op.from = alice_id; + xfer_op.to = bob_id; + xfer_op.amount = asset(i); + xfer_tx.operations.push_back( xfer_op ); + xfer_tx.set_expiration( db.head_block_time() + fc::seconds( 0x1000 * block_interval ) ); + xfer_tx.set_reference_block( db.head_block_id() ); + sign( xfer_tx, alice_private_key ); + auto processed_tx = PUSH_TX( db, xfer_tx, database::skip_nothing ); + + // sign a temporary block + signed_block maybe_large_block; + maybe_large_block.transactions.push_back(processed_tx); + maybe_large_block.previous = db.head_block_id(); + maybe_large_block.timestamp = db.get_slot_time(1); + maybe_large_block.transaction_merkle_root = maybe_large_block.calculate_merkle_root(); + maybe_large_block.witness = db.get_scheduled_witness(1); + maybe_large_block.sign(key); + auto maybe_large_block_size = fc::raw::pack_size(maybe_large_block); + idump( (maybe_large_block_size) ); + + // should fail to push if it's too large + if( maybe_large_block_size > gpo.parameters.maximum_block_size ) + { + ++large_block_count; + BOOST_CHECK_THROW( db.push_block(maybe_large_block), fc::exception ); + } + + // generate a block normally + auto good_block = db.generate_block( db.get_slot_time(1), db.get_scheduled_witness(1), key, database::skip_nothing ); + idump( (fc::raw::pack_size(good_block)) ); + } + // make sure we have tested at least once pushing a large block + BOOST_CHECK_GT( large_block_count, 0u ); + } + catch( fc::exception& e ) + { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/confidential_tests.cpp b/tests/tests/confidential_tests.cpp index 3f47b698ee..b5046dcdfe 100644 --- a/tests/tests/confidential_tests.cpp +++ b/tests/tests/confidential_tests.cpp @@ -25,7 +25,6 @@ #include #include -#include #include #include @@ -64,7 +63,6 @@ BOOST_AUTO_TEST_CASE( confidential_test ) auto InB1 = fc::sha256::hash("InB1"); auto InB2 = fc::sha256::hash("InB2"); - auto OutB = fc::sha256::hash("InB2"); auto nonce1 = fc::sha256::hash("nonce"); auto nonce2 = fc::sha256::hash("nonce2"); @@ -79,8 +77,8 @@ BOOST_AUTO_TEST_CASE( confidential_test ) trx.operations = {to_blind}; sign( trx, dan_private_key ); - db.push_transaction(trx); - trx.signatures.clear(); + PUSH_TX(db, trx); + trx.clear_signatures(); BOOST_TEST_MESSAGE( "Transfering from blind to blind with change address" ); auto Out3B = fc::sha256::hash("Out3B"); @@ -99,7 +97,7 @@ BOOST_AUTO_TEST_CASE( confidential_test ) blind_tr.validate(); trx.operations = {blind_tr}; sign( trx, owner2_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_TEST_MESSAGE( "Attempting to double spend the same commitments" ); blind_tr.fee = core.amount(11); @@ -110,7 +108,7 @@ BOOST_AUTO_TEST_CASE( confidential_test ) out4.range_proof = fc::ecc::range_proof_sign( 0, out3.commitment, InB1, nonce1, 0, 0, 750-300-11 ); blind_tr.outputs = {out4,out3}; trx.operations = {blind_tr}; - BOOST_REQUIRE_THROW( db.push_transaction(trx, ~0), graphene::chain::blind_transfer_unknown_commitment ); + BOOST_REQUIRE_THROW( PUSH_TX(db, trx, ~0), graphene::chain::blind_transfer_unknown_commitment ); BOOST_TEST_MESSAGE( "Transfering from blind to nathan public" ); @@ -123,8 +121,8 @@ BOOST_AUTO_TEST_CASE( confidential_test ) from_blind.blinding_factor = Out4B; from_blind.inputs.push_back( {out4.commitment, out4.owner} ); trx.operations = {from_blind}; - trx.signatures.clear(); - db.push_transaction(trx); + trx.clear_signatures(); + PUSH_TX(db, trx); BOOST_REQUIRE_EQUAL( get_balance( nathan, core ), 750-300-10-10 ); diff --git a/tests/tests/database_api_tests.cpp b/tests/tests/database_api_tests.cpp new file mode 100644 index 0000000000..f261bc7f59 --- /dev/null +++ b/tests/tests/database_api_tests.cpp @@ -0,0 +1,974 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include + +#include +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE(database_api_tests, database_fixture) + +BOOST_AUTO_TEST_CASE(is_registered) { + try { + /*** + * Arrange + */ + auto nathan_private_key = generate_private_key("nathan"); + public_key_type nathan_public = nathan_private_key.get_public_key(); + + auto dan_private_key = generate_private_key("dan"); + public_key_type dan_public = dan_private_key.get_public_key(); + + auto unregistered_private_key = generate_private_key("unregistered"); + public_key_type unregistered_public = unregistered_private_key.get_public_key(); + + + /*** + * Act + */ + create_account("dan", dan_private_key.get_public_key()); + create_account("nathan", nathan_private_key.get_public_key()); + // Unregistered key will not be registered with any account + + + /*** + * Assert + */ + graphene::app::database_api db_api(db); + + BOOST_CHECK(db_api.is_public_key_registered((string) nathan_public)); + BOOST_CHECK(db_api.is_public_key_registered((string) dan_public)); + BOOST_CHECK(!db_api.is_public_key_registered((string) unregistered_public)); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( get_potential_signatures_owner_and_active ) { + try { + fc::ecc::private_key nathan_key1 = fc::ecc::private_key::regenerate(fc::digest("key1")); + fc::ecc::private_key nathan_key2 = fc::ecc::private_key::regenerate(fc::digest("key2")); + public_key_type pub_key_active( nathan_key1.get_public_key() ); + public_key_type pub_key_owner( nathan_key2.get_public_key() ); + const account_object& nathan = create_account("nathan", nathan_key1.get_public_key() ); + + try { + account_update_operation op; + op.account = nathan.id; + op.active = authority(1, pub_key_active, 1); + op.owner = authority(1, pub_key_owner, 1); + trx.operations.push_back(op); + sign(trx, nathan_key1); + PUSH_TX( db, trx, database::skip_transaction_dupe_check ); + trx.clear(); + } FC_CAPTURE_AND_RETHROW ((nathan.active)) + + // this op requires active + transfer_operation op; + op.from = nathan.id; + op.to = account_id_type(); + trx.operations.push_back(op); + + graphene::app::database_api db_api(db); + set pub_keys = db_api.get_potential_signatures( trx ); + + BOOST_CHECK( pub_keys.find( pub_key_active ) != pub_keys.end() ); + BOOST_CHECK( pub_keys.find( pub_key_owner ) != pub_keys.end() ); + + trx.operations.clear(); + + // this op requires owner + account_update_operation auop; + auop.account = nathan.id; + auop.owner = authority(1, pub_key_owner, 1); + trx.operations.push_back(auop); + + pub_keys = db_api.get_potential_signatures( trx ); + + BOOST_CHECK( pub_keys.find( pub_key_active ) == pub_keys.end() ); // active key doesn't help in this case + BOOST_CHECK( pub_keys.find( pub_key_owner ) != pub_keys.end() ); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( get_potential_signatures_other ) { + try { + fc::ecc::private_key priv_key1 = fc::ecc::private_key::regenerate(fc::digest("key1")); + public_key_type pub_key1( priv_key1.get_public_key() ); + + const account_object& nathan = create_account( "nathan" ); + + balance_claim_operation op; + op.deposit_to_account = nathan.id; + op.balance_owner_key = pub_key1; + trx.operations.push_back(op); + + graphene::app::database_api db_api(db); + set pub_keys = db_api.get_potential_signatures( trx ); + + BOOST_CHECK( pub_keys.find( pub_key1 ) != pub_keys.end() ); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( get_required_signatures_owner_or_active ) { + try { + fc::ecc::private_key nathan_key1 = fc::ecc::private_key::regenerate(fc::digest("key1")); + fc::ecc::private_key nathan_key2 = fc::ecc::private_key::regenerate(fc::digest("key2")); + public_key_type pub_key_active( nathan_key1.get_public_key() ); + public_key_type pub_key_owner( nathan_key2.get_public_key() ); + const account_object& nathan = create_account("nathan", nathan_key1.get_public_key() ); + + try { + account_update_operation op; + op.account = nathan.id; + op.active = authority(1, pub_key_active, 1); + op.owner = authority(1, pub_key_owner, 1); + trx.operations.push_back(op); + sign(trx, nathan_key1); + PUSH_TX( db, trx, database::skip_transaction_dupe_check ); + trx.clear(); + } FC_CAPTURE_AND_RETHROW ((nathan.active)) + + graphene::app::database_api db_api(db); + + // prepare available keys sets + flat_set avail_keys1, avail_keys2, avail_keys3; + avail_keys1.insert( pub_key_active ); + avail_keys2.insert( pub_key_owner ); + avail_keys3.insert( pub_key_active ); + avail_keys3.insert( pub_key_owner ); + + set pub_keys; + + // this op requires active + transfer_operation op; + op.from = nathan.id; + op.to = account_id_type(); + trx.operations.push_back(op); + + // provides active, should be ok + pub_keys = db_api.get_required_signatures( trx, avail_keys1 ); + BOOST_CHECK( pub_keys.find( pub_key_active ) != pub_keys.end() ); + + // provides owner, should be ok + pub_keys = db_api.get_required_signatures( trx, avail_keys2 ); + BOOST_CHECK( pub_keys.find( pub_key_owner ) != pub_keys.end() ); + + // provides both active and owner, should return one of them + pub_keys = db_api.get_required_signatures( trx, avail_keys3 ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_active ) != pub_keys.end() || pub_keys.find( pub_key_owner ) != pub_keys.end() ); + + trx.operations.clear(); + + // this op requires owner + account_update_operation auop; + auop.account = nathan.id; + auop.owner = authority(1, pub_key_owner, 1); + trx.operations.push_back(auop); + + // provides active, should return an empty set + pub_keys = db_api.get_required_signatures( trx, avail_keys1 ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides owner, should return it + pub_keys = db_api.get_required_signatures( trx, avail_keys2 ); + BOOST_CHECK( pub_keys.find( pub_key_owner ) != pub_keys.end() ); + + // provides both active and owner, should return owner only + pub_keys = db_api.get_required_signatures( trx, avail_keys3 ); + BOOST_CHECK( pub_keys.find( pub_key_active ) == pub_keys.end() ); + BOOST_CHECK( pub_keys.find( pub_key_owner ) != pub_keys.end() ); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( get_required_signatures_partially_signed_or_not ) { + try { + fc::ecc::private_key morgan_key = fc::ecc::private_key::regenerate(fc::digest("morgan_key")); + fc::ecc::private_key nathan_key = fc::ecc::private_key::regenerate(fc::digest("nathan_key")); + fc::ecc::private_key oliver_key = fc::ecc::private_key::regenerate(fc::digest("oliver_key")); + public_key_type pub_key_morgan( morgan_key.get_public_key() ); + public_key_type pub_key_nathan( nathan_key.get_public_key() ); + public_key_type pub_key_oliver( oliver_key.get_public_key() ); + const account_object& morgan = create_account("morgan", morgan_key.get_public_key() ); + const account_object& nathan = create_account("nathan", nathan_key.get_public_key() ); + const account_object& oliver = create_account("oliver", oliver_key.get_public_key() ); + + graphene::app::database_api db_api(db); + + // prepare available keys sets + flat_set avail_keys_empty, avail_keys_m, avail_keys_n, avail_keys_o; + flat_set avail_keys_mn, avail_keys_mo, avail_keys_no, avail_keys_mno; + avail_keys_m.insert( pub_key_morgan ); + avail_keys_mn.insert( pub_key_morgan ); + avail_keys_mo.insert( pub_key_morgan ); + avail_keys_mno.insert( pub_key_morgan ); + avail_keys_n.insert( pub_key_nathan ); + avail_keys_mn.insert( pub_key_nathan ); + avail_keys_no.insert( pub_key_nathan ); + avail_keys_mno.insert( pub_key_nathan ); + avail_keys_o.insert( pub_key_oliver ); + avail_keys_mo.insert( pub_key_oliver ); + avail_keys_no.insert( pub_key_oliver ); + avail_keys_mno.insert( pub_key_oliver ); + + // result set + set pub_keys; + + // make a transaction that require 1 signature (m) + transfer_operation op; + op.from = morgan.id; + op.to = oliver.id; + trx.operations.push_back(op); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // sign with n, but actually need m + sign(trx, nathan_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [m,o], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [n,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n,o], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // sign with m, should be enough + trx.clear_signatures(); + sign(trx, morgan_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // sign with m+n, although m only should be enough, this API won't complain + sign(trx, nathan_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // make a transaction that require 2 signatures (m+n) + trx.clear_signatures(); + op.from = nathan.id; + trx.operations.push_back(op); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [n], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [m,n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 2 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,o], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [n,o], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,n,o], should return [m,n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 2 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // sign with o, but actually need m+n + sign(trx, oliver_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [n], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [m,n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 2 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,o], should return [m] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + + // provides [n,o], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,n,o], should return [m,n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 2 ); + BOOST_CHECK( pub_keys.find( pub_key_morgan ) != pub_keys.end() ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // sign with m+o, but actually need m+n + sign(trx, morgan_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n,o], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,n,o], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // sign with m, but actually need m+n + trx.clear_signatures(); + sign(trx, morgan_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n,o], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // provides [m,n,o], should return [n] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 1 ); + BOOST_CHECK( pub_keys.find( pub_key_nathan ) != pub_keys.end() ); + + // sign with m+n, should be enough + sign(trx, nathan_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // sign with m+n+o, should be enough as well + sign(trx, oliver_key); + + // provides [], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_empty ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_m ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_n ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_o ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mn ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mo ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [n,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_no ); + BOOST_CHECK( pub_keys.size() == 0 ); + + // provides [m,n,o], should return [] + pub_keys = db_api.get_required_signatures( trx, avail_keys_mno ); + BOOST_CHECK( pub_keys.size() == 0 ); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( set_subscribe_callback_disable_notify_all_test ) { + try { + ACTORS( (alice) ); + + uint32_t objects_changed1 = 0; + uint32_t objects_changed2 = 0; + uint32_t objects_changed3 = 0; + auto callback1 = [&]( const variant& v ) + { + ++objects_changed1; + }; + auto callback2 = [&]( const variant& v ) + { + ++objects_changed2; + }; + auto callback3 = [&]( const variant& v ) + { + ++objects_changed3; + }; + + uint32_t expected_objects_changed1 = 0; + uint32_t expected_objects_changed2 = 0; + uint32_t expected_objects_changed3 = 0; + + graphene::app::database_api db_api1(db); + + // subscribing to all should fail + BOOST_CHECK_THROW( db_api1.set_subscribe_callback( callback1, true ), fc::exception ); + + db_api1.set_subscribe_callback( callback1, false ); + + graphene::app::application_options opt; + opt.enable_subscribe_to_all = true; + + graphene::app::database_api db_api2( db, &opt ); + db_api2.set_subscribe_callback( callback2, true ); + + graphene::app::database_api db_api3( db, &opt ); + db_api3.set_subscribe_callback( callback3, false ); + + vector ids; + ids.push_back( alice_id ); + + db_api1.get_objects( ids ); // db_api1 subscribe to Alice + db_api2.get_objects( ids ); // db_api2 subscribe to Alice + + generate_block(); + ++expected_objects_changed2; // subscribed to all, notify block changes + + transfer( account_id_type(), alice_id, asset(1) ); + generate_block(); + ++expected_objects_changed1; // subscribed to Alice, notify Alice balance change + ++expected_objects_changed2; // subscribed to all, notify block changes + + fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + + BOOST_CHECK_EQUAL( expected_objects_changed1, objects_changed1 ); + BOOST_CHECK_EQUAL( expected_objects_changed2, objects_changed2 ); + BOOST_CHECK_EQUAL( expected_objects_changed3, objects_changed3 ); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( lookup_vote_ids ) +{ try { + ACTORS( (connie)(whitney)(wolverine) ); + + fund(connie); + upgrade_to_lifetime_member(connie); + fund(whitney); + upgrade_to_lifetime_member(whitney); + fund(wolverine); + upgrade_to_lifetime_member(wolverine); + + const auto& committee = create_committee_member( connie ); + const auto& witness = create_witness( whitney ); + const auto& worker = create_worker( wolverine_id ); + + graphene::app::database_api db_api(db); + + std::vector votes; + votes.push_back( committee.vote_id ); + votes.push_back( witness.vote_id ); + votes.push_back( worker.vote_for ); + + const auto results = db_api.lookup_vote_ids( votes ); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE(get_account_limit_orders) +{ try { + + ACTORS((seller)); + + const auto& bitcny = create_bitasset("CNY"); + const auto& core = asset_id_type()(db); + + int64_t init_balance(10000000); + transfer(committee_account, seller_id, asset(init_balance)); + BOOST_CHECK_EQUAL( 10000000, get_balance(seller, core) ); + + /// Create 250 versatile orders + for (size_t i = 0 ; i < 50 ; ++i) + { + BOOST_CHECK(create_sell_order(seller, core.amount(100), bitcny.amount(250))); + } + + for (size_t i = 1 ; i < 101 ; ++i) + { + BOOST_CHECK(create_sell_order(seller, core.amount(100), bitcny.amount(250 + i))); + BOOST_CHECK(create_sell_order(seller, core.amount(100), bitcny.amount(250 - i))); + } + + graphene::app::database_api db_api(db); + std::vector results; + limit_order_object o; + + // query with no constraint, expected: + // 1. up to 101 orders returned + // 2. orders were sorted by price desendingly + results = db_api.get_account_limit_orders(seller.name, GRAPHENE_SYMBOL, "CNY"); + BOOST_CHECK(results.size() == 101); + for (size_t i = 0 ; i < results.size() - 1 ; ++i) + { + BOOST_CHECK(results[i].sell_price >= results[i+1].sell_price); + } + BOOST_CHECK(results.front().sell_price == price(core.amount(100), bitcny.amount(150))); + BOOST_CHECK(results.back().sell_price == price(core.amount(100), bitcny.amount(250))); + results.clear(); + + // query with specified limit, expected: + // 1. up to specified amount of orders returned + // 2. orders were sorted by price desendingly + results = db_api.get_account_limit_orders(seller.name, GRAPHENE_SYMBOL, "CNY", 50); + BOOST_CHECK(results.size() == 50); + for (size_t i = 0 ; i < results.size() - 1 ; ++i) + { + BOOST_CHECK(results[i].sell_price >= results[i+1].sell_price); + } + BOOST_CHECK(results.front().sell_price == price(core.amount(100), bitcny.amount(150))); + BOOST_CHECK(results.back().sell_price == price(core.amount(100), bitcny.amount(199))); + + o = results.back(); + results.clear(); + + // query with specified order id and limit, expected: + // same as before, but also the first order's id equal to specified + results = db_api.get_account_limit_orders(seller.name, GRAPHENE_SYMBOL, "CNY", 80, + limit_order_id_type(o.id)); + BOOST_CHECK(results.size() == 80); + BOOST_CHECK(results.front().id == o.id); + for (size_t i = 0 ; i < results.size() - 1 ; ++i) + { + BOOST_CHECK(results[i].sell_price >= results[i+1].sell_price); + } + BOOST_CHECK(results.front().sell_price == price(core.amount(100), bitcny.amount(199))); + BOOST_CHECK(results.back().sell_price == price(core.amount(100), bitcny.amount(250))); + + o = results.back(); + results.clear(); + + // query with specified price and an not exists order id, expected: + // 1. the canceled order should not exists in returned orders and first order's + // id should greater than specified + // 2. returned orders sorted by price desendingly + // 3. the first order's sell price equal to specified + cancel_limit_order(o); // NOTE 1: this canceled order was in scope of the + // first created 50 orders, so with price 2.5 BTS/CNY + results = db_api.get_account_limit_orders(seller.name, GRAPHENE_SYMBOL, "CNY", 50, + limit_order_id_type(o.id), o.sell_price); + BOOST_CHECK(results.size() == 50); + BOOST_CHECK(results.front().id > o.id); + // NOTE 2: because of NOTE 1, here should be equal + BOOST_CHECK(results.front().sell_price == o.sell_price); + for (size_t i = 0 ; i < results.size() - 1 ; ++i) + { + BOOST_CHECK(results[i].sell_price >= results[i+1].sell_price); + } + BOOST_CHECK(results.front().sell_price == price(core.amount(100), bitcny.amount(250))); + BOOST_CHECK(results.back().sell_price == price(core.amount(100), bitcny.amount(279))); + + o = results.back(); + results.clear(); + + cancel_limit_order(o); // NOTE 3: this time the canceled order was in scope + // of the lowest price 150 orders + results = db_api.get_account_limit_orders(seller.name, GRAPHENE_SYMBOL, "CNY", 101, + limit_order_id_type(o.id), o.sell_price); + BOOST_CHECK(results.size() == 71); + BOOST_CHECK(results.front().id > o.id); + // NOTE 3: because of NOTE 1, here should be little than + BOOST_CHECK(results.front().sell_price < o.sell_price); + for (size_t i = 0 ; i < results.size() - 1 ; ++i) + { + BOOST_CHECK(results[i].sell_price >= results[i+1].sell_price); + } + BOOST_CHECK(results.front().sell_price == price(core.amount(100), bitcny.amount(280))); + BOOST_CHECK(results.back().sell_price == price(core.amount(100), bitcny.amount(350))); + + BOOST_CHECK_THROW(db_api.get_account_limit_orders(seller.name, GRAPHENE_SYMBOL, "CNY", 101, + limit_order_id_type(o.id)), fc::exception); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( get_transaction_hex ) +{ try { + graphene::app::database_api db_api(db); + auto test_private_key = generate_private_key("testaccount"); + public_key_type test_public = test_private_key.get_public_key(); + + trx.operations.push_back(make_account("testaccount", test_public)); + trx.validate(); + + // case1: not signed, get hex + std::string hex_str = fc::to_hex( fc::raw::pack( trx ) ); + + BOOST_CHECK( db_api.get_transaction_hex( trx ) == hex_str ); + BOOST_CHECK( db_api.get_transaction_hex_without_sig( trx ) + "00" == hex_str ); + + // case2: signed, get hex + sign( trx, test_private_key ); + hex_str = fc::to_hex( fc::raw::pack( trx ) ); + + BOOST_CHECK( db_api.get_transaction_hex( trx ) == hex_str ); + BOOST_CHECK( db_api.get_transaction_hex_without_sig( trx ) + + fc::to_hex( fc::raw::pack( trx.signatures ) ) == hex_str ); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE(verify_account_authority) +{ + try { + + ACTORS( (nathan) ); + graphene::app::database_api db_api(db); + + // good keys + flat_set public_keys; + public_keys.emplace(nathan_public_key); + BOOST_CHECK(db_api.verify_account_authority( "nathan", public_keys)); + + // bad keys + flat_set bad_public_keys; + bad_public_keys.emplace(public_key_type(GRAPHENE_ADDRESS_PREFIX "6MkMxwBjFWmcDjXRoJ4mW9Hd4LCSPwtv9tKG1qYW5Kgu4AhoZy")); + BOOST_CHECK(!db_api.verify_account_authority( "nathan", bad_public_keys)); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( any_two_of_three ) +{ + try { + fc::ecc::private_key nathan_key1 = fc::ecc::private_key::regenerate(fc::digest("key1")); + fc::ecc::private_key nathan_key2 = fc::ecc::private_key::regenerate(fc::digest("key2")); + fc::ecc::private_key nathan_key3 = fc::ecc::private_key::regenerate(fc::digest("key3")); + const account_object& nathan = create_account("nathan", nathan_key1.get_public_key() ); + fund(nathan); + graphene::app::database_api db_api(db); + + try { + account_update_operation op; + op.account = nathan.id; + op.active = authority(2, public_key_type(nathan_key1.get_public_key()), 1, + public_key_type(nathan_key2.get_public_key()), 1, public_key_type(nathan_key3.get_public_key()), 1); + op.owner = *op.active; + trx.operations.push_back(op); + sign(trx, nathan_key1); + PUSH_TX( db, trx, database::skip_transaction_dupe_check ); + trx.clear(); + } FC_CAPTURE_AND_RETHROW ((nathan.active)) + + // two keys should work + { + flat_set public_keys; + public_keys.emplace(nathan_key1.get_public_key()); + public_keys.emplace(nathan_key2.get_public_key()); + BOOST_CHECK(db_api.verify_account_authority("nathan", public_keys)); + } + + // the other two keys should work + { + flat_set public_keys; + public_keys.emplace(nathan_key2.get_public_key()); + public_keys.emplace(nathan_key3.get_public_key()); + BOOST_CHECK(db_api.verify_account_authority("nathan", public_keys)); + } + + // just one key should not work + { + flat_set public_keys; + public_keys.emplace(nathan_key1.get_public_key()); + BOOST_CHECK(!db_api.verify_account_authority("nathan", public_keys)); + } + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( verify_authority_multiple_accounts ) +{ + try { + ACTORS( (nathan) (alice) (bob) ); + + graphene::app::database_api db_api(db); + + try { + account_update_operation op; + op.account = nathan.id; + op.active = authority(3, nathan_public_key, 1, alice.id, 1, bob.id, 1); + op.owner = *op.active; + trx.operations.push_back(op); + sign(trx, nathan_private_key); + PUSH_TX( db, trx, database::skip_transaction_dupe_check ); + trx.clear(); + } FC_CAPTURE_AND_RETHROW ((nathan.active)) + + // requires 3 signatures + { + flat_set public_keys; + public_keys.emplace(nathan_public_key); + public_keys.emplace(alice_public_key); + public_keys.emplace(bob_public_key); + BOOST_CHECK(db_api.verify_account_authority("nathan", public_keys)); + } + + // only 2 signatures given + { + flat_set public_keys; + public_keys.emplace(nathan_public_key); + public_keys.emplace(bob_public_key); + BOOST_CHECK(!db_api.verify_account_authority("nathan", public_keys)); + } + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} +BOOST_AUTO_TEST_CASE( api_limit_get_key_references ){ + try{ + const int num_keys = 210; + const int num_keys1 = 2; + vector< private_key_type > numbered_private_keys; + vector< public_key_type > numbered_key_id; + numbered_private_keys.reserve( num_keys ); + graphene::app::database_api db_api( db, &( app.get_options() )); + for( int i=0; i > final_result=db_api.get_key_references(numbered_key_id); + BOOST_REQUIRE_EQUAL( final_result.size(), 2u ); + numbered_private_keys.reserve( num_keys ); + for( int i=num_keys1; i([](account_balance_object& obj) { + obj.owner = account_id_type(123); + }); + account_balance_id_type obj_id = obj.id; + BOOST_CHECK_EQUAL(obj.owner.instance.value, 123u); + + // Modify dummy object, check that changes stick + db.modify(obj, [](account_balance_object& obj) { + obj.owner = account_id_type(234); + }); + BOOST_CHECK_EQUAL(obj_id(db).owner.instance.value, 234u); + + // Throw exception when modifying object, check that object still exists after + BOOST_CHECK_THROW(db.modify(obj, [](account_balance_object& obj) { + throw 5; + }), int); + BOOST_CHECK_NE((long)db.find_object(obj_id), (long)nullptr); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( flat_index_test ) +{ try { + ACTORS((sam)); + const auto& bitusd = create_bitasset("USDBIT", sam.id); + const asset_id_type bitusd_id = bitusd.id; + update_feed_producers(bitusd, {sam.id}); + price_feed current_feed; + current_feed.settlement_price = bitusd.amount(100) / asset(100); + publish_feed(bitusd, sam, current_feed); + BOOST_CHECK_EQUAL( (int)bitusd.bitasset_data_id->instance, 1 ); + BOOST_CHECK( !(*bitusd.bitasset_data_id)(db).current_feed.settlement_price.is_null() ); + try { + auto ses = db._undo_db.start_undo_session(); + const auto& obj1 = db.create( [&]( asset_bitasset_data_object& obj ){ + obj.settlement_fund = 17; + }); + BOOST_REQUIRE_EQUAL( obj1.settlement_fund.value, 17 ); + throw std::string("Expected"); + // With flat_index, obj1 will not really be removed from the index + } catch ( const std::string& e ) + { // ignore + } + + // force maintenance + const auto& dynamic_global_props = db.get(dynamic_global_property_id_type()); + generate_blocks(dynamic_global_props.next_maintenance_time, true); + + BOOST_CHECK( !(*bitusd_id(db).bitasset_data_id)(db).current_feed.settlement_price.is_null() ); +} FC_CAPTURE_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( merge_test ) +{ + try { + database db; + auto ses = db._undo_db.start_undo_session(); + db.create( [&]( account_balance_object& obj ){ + obj.balance = 42; + }); + ses.merge(); + + auto balance = db.get_balance( account_id_type(), asset_id_type() ); + BOOST_CHECK_EQUAL( 42, balance.amount.value ); + } catch ( const fc::exception& e ) + { + edump( (e.to_detail_string()) ); + throw; + } +} + +BOOST_AUTO_TEST_CASE( direct_index_test ) +{ try { + try { + const graphene::db::primary_index< account_index, 6 > small_chunkbits( db ); + BOOST_FAIL( "Expected assertion failure!" ); + } catch( const fc::assert_exception& expected ) {} + + graphene::db::primary_index< account_index, 8 > my_accounts( db ); + const auto& direct = my_accounts.get_secondary_index>(); + BOOST_CHECK_EQUAL( 0u, my_accounts.indices().size() ); + BOOST_CHECK( nullptr == direct.find( account_id_type( 1 ) ) ); + // BOOST_CHECK_THROW( direct.find( asset_id_type( 1 ) ), fc::assert_exception ); // compile-time error + BOOST_CHECK_THROW( direct.find( object_id_type( asset_id_type( 1 ) ) ), fc::assert_exception ); + BOOST_CHECK_THROW( direct.get( account_id_type( 1 ) ), fc::assert_exception ); + + account_object test_account; + test_account.id = account_id_type(1); + test_account.name = "account1"; + + my_accounts.load( fc::raw::pack( test_account ) ); + + BOOST_CHECK_EQUAL( 1u, my_accounts.indices().size() ); + BOOST_CHECK( nullptr == direct.find( account_id_type( 0 ) ) ); + BOOST_CHECK( nullptr == direct.find( account_id_type( 2 ) ) ); + BOOST_CHECK( nullptr != direct.find( account_id_type( 1 ) ) ); + BOOST_CHECK_EQUAL( test_account.name, direct.get( test_account.id ).name ); + + // The following assumes that MAX_HOLE = 100 + test_account.id = account_id_type(102); + test_account.name = "account102"; + // highest insert was 1, direct.next is 2 => 102 is highest allowed instance + my_accounts.load( fc::raw::pack( test_account ) ); + BOOST_CHECK_EQUAL( test_account.name, direct.get( test_account.id ).name ); + + // direct.next is now 103, but index sequence counter is 0 + my_accounts.create( [] ( object& o ) { + account_object& acct = dynamic_cast< account_object& >( o ); + BOOST_CHECK_EQUAL( 0u, acct.id.instance() ); + acct.name = "account0"; + } ); + + test_account.id = account_id_type(50); + test_account.name = "account50"; + my_accounts.load( fc::raw::pack( test_account ) ); + + // can handle nested modification + my_accounts.modify( direct.get( account_id_type(0) ), [&direct,&my_accounts] ( object& outer ) { + account_object& _outer = dynamic_cast< account_object& >( outer ); + my_accounts.modify( direct.get( account_id_type(50) ), [] ( object& inner ) { + account_object& _inner = dynamic_cast< account_object& >( inner ); + _inner.referrer = account_id_type(102); + }); + _outer.options.voting_account = GRAPHENE_PROXY_TO_SELF_ACCOUNT; + }); + + // direct.next is still 103, so 204 is not allowed + test_account.id = account_id_type(204); + test_account.name = "account204"; + GRAPHENE_REQUIRE_THROW( my_accounts.load( fc::raw::pack( test_account ) ), fc::assert_exception ); + // This is actually undefined behaviour. The object has been inserted into + // the primary index, but the secondary has refused to insert it! + BOOST_CHECK_EQUAL( 5u, my_accounts.indices().size() ); + + uint32_t count = 0; + for( uint32_t i = 0; i < 250; i++ ) + { + const account_object* aptr = dynamic_cast< const account_object* >( my_accounts.find( account_id_type( i ) ) ); + if( aptr ) + { + count++; + BOOST_CHECK( aptr->id.instance() == 0 || aptr->id.instance() == 1 + || aptr->id.instance() == 50 || aptr->id.instance() == 102 ); + BOOST_CHECK_EQUAL( i, aptr->id.instance() ); + BOOST_CHECK_EQUAL( "account" + std::to_string( i ), aptr->name ); + } + } + BOOST_CHECK_EQUAL( count, my_accounts.indices().size() - 1 ); + + GRAPHENE_REQUIRE_THROW( my_accounts.modify( direct.get( account_id_type( 1 ) ), [] ( object& acct ) { + acct.id = account_id_type(2); + }), fc::assert_exception ); + // This is actually undefined behaviour. The object has been modified, but + // but the secondary has not updated its representation +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/fee_tests.cpp b/tests/tests/fee_tests.cpp index d6f261709f..5d4d12ca05 100644 --- a/tests/tests/fee_tests.cpp +++ b/tests/tests/fee_tests.cpp @@ -22,7 +22,6 @@ * THE SOFTWARE. */ -#include #include #include @@ -31,6 +30,7 @@ #include #include +#include #include #include @@ -155,7 +155,7 @@ BOOST_AUTO_TEST_CASE(asset_claim_fees_test) fc::ecc::private_key your_pk = (issuer == izzy_id) ? jill_private_key : izzy_private_key; sign( tx, your_pk ); GRAPHENE_REQUIRE_THROW( PUSH_TX( db, tx ), fc::exception ); - tx.signatures.clear(); + tx.clear_signatures(); sign( tx, my_pk ); PUSH_TX( db, tx ); }; @@ -212,6 +212,132 @@ BOOST_AUTO_TEST_CASE(asset_claim_fees_test) FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(asset_claim_pool_test) +{ + try + { + ACTORS((alice)(bob)); + // Alice and Bob create some user issued assets + // Alice deposits BTS to the fee pool + // Alice claimes fee pool of her asset and can't claim pool of Bob's asset + + const share_type core_prec = asset::scaled_precision( asset_id_type()(db).precision ); + + // return number of core shares (times precision) + auto _core = [&core_prec]( int64_t x ) -> asset + { return asset( x*core_prec ); }; + + const asset_object& alicecoin = create_user_issued_asset( "ALICECOIN", alice, 0 ); + const asset_object& aliceusd = create_user_issued_asset( "ALICEUSD", alice, 0 ); + + asset_id_type alicecoin_id = alicecoin.id; + asset_id_type aliceusd_id = aliceusd.id; + asset_id_type bobcoin_id = create_user_issued_asset( "BOBCOIN", bob, 0).id; + + // prepare users' balance + issue_uia( alice, aliceusd.amount( 20000000 ) ); + issue_uia( alice, alicecoin.amount( 10000000 ) ); + + transfer( committee_account, alice_id, _core(1000) ); + transfer( committee_account, bob_id, _core(1000) ); + + enable_fees(); + + auto claim_pool = [&]( const account_id_type issuer, const asset_id_type asset_to_claim, + const asset& amount_to_fund, const asset_object& fee_asset ) + { + asset_claim_pool_operation claim_op; + claim_op.issuer = issuer; + claim_op.asset_id = asset_to_claim; + claim_op.amount_to_claim = amount_to_fund; + + signed_transaction tx; + tx.operations.push_back( claim_op ); + db.current_fee_schedule().set_fee( tx.operations.back(), fee_asset.options.core_exchange_rate ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + PUSH_TX( db, tx ); + + }; + + auto claim_pool_proposal = [&]( const account_id_type issuer, const asset_id_type asset_to_claim, + const asset& amount_to_fund, const asset_object& fee_asset ) + { + asset_claim_pool_operation claim_op; + claim_op.issuer = issuer; + claim_op.asset_id = asset_to_claim; + claim_op.amount_to_claim = amount_to_fund; + + const auto& curfees = *db.get_global_properties().parameters.current_fees; + const auto& proposal_create_fees = curfees.get(); + proposal_create_operation prop; + prop.fee_paying_account = alice_id; + prop.proposed_ops.emplace_back( claim_op ); + prop.expiration_time = db.head_block_time() + fc::days(1); + prop.fee = asset( proposal_create_fees.fee + proposal_create_fees.price_per_kbyte ); + + signed_transaction tx; + tx.operations.push_back( prop ); + db.current_fee_schedule().set_fee( tx.operations.back(), fee_asset.options.core_exchange_rate ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + PUSH_TX( db, tx ); + + }; + + const asset_object& core_asset = asset_id_type()(db); + + // deposit 100 BTS to the fee pool of ALICEUSD asset + fund_fee_pool( alice_id(db), aliceusd_id(db), _core(100).amount ); + + // Unable to claim pool before the hardfork + GRAPHENE_REQUIRE_THROW( claim_pool( alice_id, aliceusd_id, _core(1), core_asset), fc::exception ); + GRAPHENE_REQUIRE_THROW( claim_pool_proposal( alice_id, aliceusd_id, _core(1), core_asset), fc::exception ); + + // Fast forward to hard fork date + generate_blocks( HARDFORK_CORE_188_TIME ); + + // New reference for core_asset after having produced blocks + const asset_object& core_asset_hf = asset_id_type()(db); + + // can't claim pool because it is empty + GRAPHENE_REQUIRE_THROW( claim_pool( alice_id, alicecoin_id, _core(1), core_asset_hf), fc::exception ); + + // deposit 300 BTS to the fee pool of ALICECOIN asset + fund_fee_pool( alice_id(db), alicecoin_id(db), _core(300).amount ); + + // Test amount of CORE in fee pools + BOOST_CHECK( alicecoin_id(db).dynamic_asset_data_id(db).fee_pool == _core(300).amount ); + BOOST_CHECK( aliceusd_id(db).dynamic_asset_data_id(db).fee_pool == _core(100).amount ); + + // can't claim pool of an asset that doesn't belong to you + GRAPHENE_REQUIRE_THROW( claim_pool( alice_id, bobcoin_id, _core(200), core_asset_hf), fc::exception ); + + // can't claim more than is available in the fee pool + GRAPHENE_REQUIRE_THROW( claim_pool( alice_id, alicecoin_id, _core(400), core_asset_hf ), fc::exception ); + + // can't pay fee in the same asset whose pool is being drained + GRAPHENE_REQUIRE_THROW( claim_pool( alice_id, alicecoin_id, _core(200), alicecoin_id(db) ), fc::exception ); + + // can claim BTS back from the fee pool + claim_pool( alice_id, alicecoin_id, _core(200), core_asset_hf ); + BOOST_CHECK( alicecoin_id(db).dynamic_asset_data_id(db).fee_pool == _core(100).amount ); + + // can pay fee in the asset other than the one whose pool is being drained + share_type balance_before_claim = get_balance( alice_id, asset_id_type() ); + claim_pool( alice_id, alicecoin_id, _core(100), aliceusd_id(db) ); + BOOST_CHECK( alicecoin_id(db).dynamic_asset_data_id(db).fee_pool == _core(0).amount ); + + //check balance after claiming pool + share_type current_balance = get_balance( alice_id, asset_id_type() ); + BOOST_CHECK( balance_before_claim + _core(100).amount == current_balance ); + + // can create a proposal to claim claim pool after hard fork + claim_pool_proposal( alice_id, aliceusd_id, _core(1), core_asset_hf); + } + FC_LOG_AND_RETHROW() +} + /////////////////////////////////////////////////////////////// // cashback_test infrastructure // /////////////////////////////////////////////////////////////// @@ -309,6 +435,12 @@ BOOST_AUTO_TEST_CASE( cashback_test ) PREP_ACTOR(dumy); PREP_ACTOR(stud); PREP_ACTOR(pleb); + // use ##_public_key vars to silence unused variable warning + BOOST_CHECK_GT(ann_public_key.key_data.size(), 0u); + BOOST_CHECK_GT(scud_public_key.key_data.size(), 0u); + BOOST_CHECK_GT(dumy_public_key.key_data.size(), 0u); + BOOST_CHECK_GT(stud_public_key.key_data.size(), 0u); + BOOST_CHECK_GT(pleb_public_key.key_data.size(), 0u); account_id_type ann_id, scud_id, dumy_id, stud_id, pleb_id; actor_audit alife, arog, aann, ascud, adumy, astud, apleb; @@ -330,7 +462,7 @@ BOOST_AUTO_TEST_CASE( cashback_test ) upgrade_to_lifetime_member(rog_id); BOOST_TEST_MESSAGE("Enable fees"); - const auto& fees = db.get_global_properties().parameters.current_fees; + const auto& fees = *db.get_global_properties().parameters.current_fees; #define CustomRegisterActor(actor_name, registrar_name, referrer_name, referrer_rate) \ { \ @@ -342,7 +474,7 @@ BOOST_AUTO_TEST_CASE( cashback_test ) op.options.memo_key = actor_name ## _private_key.get_public_key(); \ op.active = authority(1, public_key_type(actor_name ## _private_key.get_public_key()), 1); \ op.owner = op.active; \ - op.fee = fees->calculate_fee(op); \ + op.fee = fees.calculate_fee(op); \ trx.operations = {op}; \ sign( trx, registrar_name ## _private_key ); \ actor_name ## _id = PUSH_TX( db, trx ).operation_results.front().get(); \ @@ -368,10 +500,10 @@ BOOST_AUTO_TEST_CASE( cashback_test ) CustomAuditActor( pleb ); \ } - int64_t reg_fee = fees->get< account_create_operation >().premium_fee; - int64_t xfer_fee = fees->get< transfer_operation >().fee; - int64_t upg_an_fee = fees->get< account_upgrade_operation >().membership_annual_fee; - int64_t upg_lt_fee = fees->get< account_upgrade_operation >().membership_lifetime_fee; + int64_t reg_fee = fees.get< account_create_operation >().premium_fee; + int64_t xfer_fee = fees.get< transfer_operation >().fee; + int64_t upg_an_fee = fees.get< account_upgrade_operation >().membership_annual_fee; + int64_t upg_lt_fee = fees.get< account_upgrade_operation >().membership_lifetime_fee; // all percentages here are cut from whole pie! uint64_t network_pct = 20 * P1; uint64_t lt_pct = 375 * P100 / 1000; @@ -577,29 +709,29 @@ BOOST_AUTO_TEST_CASE( account_create_fee_scaling ) auto accounts_per_scale = db.get_global_properties().parameters.accounts_per_fee_scale; db.modify(global_property_id_type()(db), [](global_property_object& gpo) { - gpo.parameters.current_fees = fee_schedule::get_default(); + gpo.parameters.current_fees = std::make_shared(fee_schedule::get_default()); gpo.parameters.current_fees->get().basic_fee = 1; }); for( int i = db.get_dynamic_global_properties().accounts_registered_this_interval; i < accounts_per_scale; ++i ) { - BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 1); + BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 1u); create_account("shill" + fc::to_string(i)); } for( int i = 0; i < accounts_per_scale; ++i ) { - BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 16); + BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 16u); create_account("moreshills" + fc::to_string(i)); } for( int i = 0; i < accounts_per_scale; ++i ) { - BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 256); + BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 256u); create_account("moarshills" + fc::to_string(i)); } - BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 4096); + BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 4096u); generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); - BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 1); + BOOST_CHECK_EQUAL(db.get_global_properties().parameters.current_fees->get().basic_fee, 1u); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE( fee_refund_test ) @@ -626,7 +758,6 @@ BOOST_AUTO_TEST_CASE( fee_refund_test ) | database::skip_transaction_dupe_check | database::skip_block_size_check | database::skip_tapos_check - | database::skip_authority_check | database::skip_merkle_check ; @@ -686,7 +817,7 @@ BOOST_AUTO_TEST_CASE( fee_refund_test ) cancel_limit_order( bo1_id(db) ); int64_t cancel_net_fee; - if( db.head_block_time() >= HARDFORK_445_TIME ) + if( db.head_block_time() > HARDFORK_445_TIME ) cancel_net_fee = order_cancel_fee; else cancel_net_fee = order_create_fee + order_cancel_fee; @@ -736,6 +867,2575 @@ BOOST_AUTO_TEST_CASE( fee_refund_test ) FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( non_core_fee_refund_test ) +{ + try + { + ACTORS((alice)(bob)(izzy)); + + int64_t alice_b0 = 1000000, bob_b0 = 1000000; + int64_t pool_0 = 100000, accum_0 = 0; + + transfer( account_id_type(), alice_id, asset(alice_b0) ); + transfer( account_id_type(), bob_id, asset(bob_b0) ); + + asset_id_type core_id = asset_id_type(); + const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee ); + asset_id_type usd_id = usd_obj.id; + issue_uia( alice_id, asset( alice_b0, usd_id ) ); + issue_uia( bob_id, asset( bob_b0, usd_id ) ); + + fund_fee_pool( committee_account( db ), usd_obj, pool_0 ); + + int64_t order_create_fee = 537; + int64_t order_cancel_fee = 129; + + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + + generate_block( skip ); + + flat_set< fee_parameters > new_fees; + { + limit_order_create_operation::fee_parameters_type create_fee_params; + create_fee_params.fee = order_create_fee; + new_fees.insert( create_fee_params ); + } + { + limit_order_cancel_operation::fee_parameters_type cancel_fee_params; + cancel_fee_params.fee = order_cancel_fee; + new_fees.insert( cancel_fee_params ); + } + { + transfer_operation::fee_parameters_type transfer_fee_params; + transfer_fee_params.fee = 0; + transfer_fee_params.price_per_kbyte = 0; + new_fees.insert( transfer_fee_params ); + } + + for( int i=0; i<4; i++ ) + { + bool expire_order = ( i % 2 != 0 ); + bool before_hardfork_445 = ( i < 2 ); + if( i == 2 ) + { + generate_blocks( HARDFORK_445_TIME, true, skip ); + generate_block( skip ); + } + + // enable_fees() and change_fees() modifies DB directly, and results will be overwritten by block generation + // so we have to do it every time we stop generating/popping blocks and start doing tx's + enable_fees(); + change_fees( new_fees ); + + // AAAAGGHH create_sell_order reads trx.expiration #469 + set_expiration( db, trx ); + + // prepare params + uint32_t blocks_generated = 0; + time_point_sec max_exp = time_point_sec::maximum(); + time_point_sec exp = db.head_block_time(); // order will be accepted when pushing trx then expired at current block + price cer( asset(1), asset(1, usd_id) ); + const auto* usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + + // balance data + int64_t alice_bc = alice_b0, bob_bc = bob_b0; // core balance + int64_t alice_bu = alice_b0, bob_bu = bob_b0; // usd balance + int64_t pool_b = pool_0, accum_b = accum_0; + + // refund data + int64_t core_fee_refund_core; + int64_t core_fee_refund_usd; + int64_t usd_fee_refund_core; + int64_t usd_fee_refund_usd; + if( db.head_block_time() > HARDFORK_445_TIME ) + { + core_fee_refund_core = order_create_fee; + core_fee_refund_usd = 0; + usd_fee_refund_core = order_create_fee; + usd_fee_refund_usd = 0; + } + else + { + core_fee_refund_core = 0; + core_fee_refund_usd = 0; + usd_fee_refund_core = 0; + usd_fee_refund_usd = 0; + } + + // Check non-overlapping + // Alice creates order + // Bob creates order which doesn't match + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id) )->id; + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000), exp, cer )->id; + + alice_bc -= order_create_fee; + alice_bc -= 1000; + bob_bu -= order_create_fee; + bob_bu -= 500; + pool_b -= order_create_fee; + accum_b += order_create_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Bob cancels order + if( !expire_order ) + cancel_limit_order( bo1_id(db) ); + else + { + // empty accounts before generate block, to test if it will fail when charging order cancel fee + transfer( alice_id, account_id_type(), asset(alice_bc, core_id) ); + transfer( alice_id, account_id_type(), asset(alice_bu, usd_id) ); + transfer( bob_id, account_id_type(), asset( bob_bc, core_id) ); + transfer( bob_id, account_id_type(), asset( bob_bu, usd_id) ); + // generate a new block so one or more order will expire + generate_block( skip ); + ++blocks_generated; + enable_fees(); + change_fees( new_fees ); + set_expiration( db, trx ); + exp = db.head_block_time(); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + // restore account balances + transfer( account_id_type(), alice_id, asset(alice_bc, core_id) ); + transfer( account_id_type(), alice_id, asset(alice_bu, usd_id) ); + transfer( account_id_type(), bob_id, asset( bob_bc, core_id) ); + transfer( account_id_type(), bob_id, asset( bob_bu, usd_id) ); + } + + if( !expire_order || !before_hardfork_445 ) + bob_bc -= order_cancel_fee; + // else do nothing: before hard fork 445, no fee on expired order + bob_bc += usd_fee_refund_core; + bob_bu += 500; + bob_bu += usd_fee_refund_usd; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + + // Alice cancels order + cancel_limit_order( ao1_id(db) ); + + alice_bc -= order_cancel_fee; + alice_bc += 1000; + alice_bc += core_fee_refund_core; + alice_bu += core_fee_refund_usd; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Check partial fill + const limit_order_object* ao2 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), exp, cer ); + const limit_order_id_type ao2id = ao2->id; + const limit_order_object* bo2 = create_sell_order( bob_id, asset(100, usd_id), asset(500) ); + + BOOST_CHECK( db.find( ao2id ) != nullptr ); + BOOST_CHECK( bo2 == nullptr ); + + // data after order created + alice_bc -= 1000; + alice_bu -= order_create_fee; + pool_b -= order_create_fee; + accum_b += order_create_fee; + bob_bc -= order_create_fee; + bob_bu -= 100; + + // data after order filled + alice_bu += 100; + bob_bc += 500; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel Alice order, show that entire deferred_fee was consumed by partial match + if( !expire_order ) + cancel_limit_order( *ao2 ); + else + { + // empty accounts before generate block, to test if it will fail when charging order cancel fee + transfer( alice_id, account_id_type(), asset(alice_bc, core_id) ); + transfer( alice_id, account_id_type(), asset(alice_bu, usd_id) ); + transfer( bob_id, account_id_type(), asset( bob_bc, core_id) ); + transfer( bob_id, account_id_type(), asset( bob_bu, usd_id) ); + // generate a new block so one or more order will expire + generate_block( skip ); + ++blocks_generated; + enable_fees(); + change_fees( new_fees ); + set_expiration( db, trx ); + exp = db.head_block_time(); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + // restore account balances + transfer( account_id_type(), alice_id, asset(alice_bc, core_id) ); + transfer( account_id_type(), alice_id, asset(alice_bu, usd_id) ); + transfer( account_id_type(), bob_id, asset( bob_bc, core_id) ); + transfer( account_id_type(), bob_id, asset( bob_bu, usd_id) ); + } + + + if( !expire_order ) + alice_bc -= order_cancel_fee; + // else do nothing: + // before hard fork 445, no fee when order is expired; + // after hard fork 445, when partially filled order expired, order cancel fee is capped at 0 + alice_bc += 500; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Check multiple fill + // Alice creating multiple orders + const limit_order_object* ao31 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + const limit_order_object* ao32 = create_sell_order( alice_id, asset(1000), asset(2000, usd_id), max_exp, cer ); + const limit_order_object* ao33 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + const limit_order_object* ao34 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + const limit_order_object* ao35 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + + const limit_order_id_type ao31id = ao31->id; + const limit_order_id_type ao32id = ao32->id; + const limit_order_id_type ao33id = ao33->id; + const limit_order_id_type ao34id = ao34->id; + const limit_order_id_type ao35id = ao35->id; + + alice_bc -= 1000 * 5; + alice_bu -= order_create_fee * 5; + pool_b -= order_create_fee * 5; + accum_b += order_create_fee * 5; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Bob creating an order matching multiple Alice's orders + const limit_order_object* bo31 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); + + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) != nullptr ); + BOOST_CHECK( db.find( ao35id ) != nullptr ); + BOOST_CHECK( bo31 == nullptr ); + + // data after order created + bob_bc -= order_create_fee; + bob_bu -= 500; + + // data after order filled + alice_bu += 500; + bob_bc += 2500; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Bob creating an order matching multiple Alice's orders + const limit_order_object* bo32 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); + + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) == nullptr ); + BOOST_CHECK( db.find( ao35id ) == nullptr ); + BOOST_CHECK( bo32 != nullptr ); + + // data after order created + bob_bc -= order_create_fee; + bob_bu -= 500; + + // data after order filled + alice_bu += 300; + bob_bc += 1500; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel Bob order, show that entire deferred_fee was consumed by partial match + if( !expire_order ) + cancel_limit_order( *bo32 ); + else + { + // empty accounts before generate block, to test if it will fail when charging order cancel fee + transfer( alice_id, account_id_type(), asset(alice_bc, core_id) ); + transfer( alice_id, account_id_type(), asset(alice_bu, usd_id) ); + transfer( bob_id, account_id_type(), asset( bob_bc, core_id) ); + transfer( bob_id, account_id_type(), asset( bob_bu, usd_id) ); + // generate a new block so one or more order will expire + generate_block( skip ); + ++blocks_generated; + enable_fees(); + change_fees( new_fees ); + set_expiration( db, trx ); + exp = db.head_block_time(); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + // restore account balances + transfer( account_id_type(), alice_id, asset(alice_bc, core_id) ); + transfer( account_id_type(), alice_id, asset(alice_bu, usd_id) ); + transfer( account_id_type(), bob_id, asset( bob_bc, core_id) ); + transfer( account_id_type(), bob_id, asset( bob_bu, usd_id) ); + } + + if( !expire_order ) + bob_bc -= order_cancel_fee; + // else do nothing: + // before hard fork 445, no fee when order is expired; + // after hard fork 445, when partially filled order expired, order cancel fee is capped at 0 + bob_bu += 200; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel Alice order, will refund after hard fork 445 + cancel_limit_order( ao32id( db ) ); + + alice_bc -= order_cancel_fee; + alice_bc += 1000; + alice_bc += usd_fee_refund_core; + alice_bu += usd_fee_refund_usd; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // undo above tx's and reset + generate_block( skip ); + ++blocks_generated; + while( blocks_generated > 0 ) + { + db.pop_block(); + --blocks_generated; + } + } + } + FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( hf445_fee_refund_cross_test ) +{ // create orders before hard fork, cancel them after hard fork + try + { + ACTORS((alice)(bob)(izzy)); + + int64_t alice_b0 = 1000000, bob_b0 = 1000000; + int64_t pool_0 = 100000, accum_0 = 0; + + transfer( account_id_type(), alice_id, asset(alice_b0) ); + transfer( account_id_type(), bob_id, asset(bob_b0) ); + + asset_id_type core_id = asset_id_type(); + const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee ); + asset_id_type usd_id = usd_obj.id; + issue_uia( alice_id, asset( alice_b0, usd_id ) ); + issue_uia( bob_id, asset( bob_b0, usd_id ) ); + + fund_fee_pool( committee_account( db ), usd_obj, pool_0 ); + + int64_t order_create_fee = 537; + int64_t order_cancel_fee = 129; + + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + + generate_block( skip ); + + flat_set< fee_parameters > new_fees; + { + limit_order_create_operation::fee_parameters_type create_fee_params; + create_fee_params.fee = order_create_fee; + new_fees.insert( create_fee_params ); + } + { + limit_order_cancel_operation::fee_parameters_type cancel_fee_params; + cancel_fee_params.fee = order_cancel_fee; + new_fees.insert( cancel_fee_params ); + } + { + transfer_operation::fee_parameters_type transfer_fee_params; + transfer_fee_params.fee = 0; + transfer_fee_params.price_per_kbyte = 0; + new_fees.insert( transfer_fee_params ); + } + + // enable_fees() and change_fees() modifies DB directly, and results will be overwritten by block generation + // so we have to do it every time we stop generating/popping blocks and start doing tx's + enable_fees(); + change_fees( new_fees ); + + // AAAAGGHH create_sell_order reads trx.expiration #469 + set_expiration( db, trx ); + + // prepare params + const chain_parameters& params = db.get_global_properties().parameters; + time_point_sec max_exp = time_point_sec::maximum(); + time_point_sec exp = HARDFORK_445_TIME + fc::seconds( params.block_interval * (params.maintenance_skip_slots + 1) * 3 ); + time_point_sec exp2 = HARDFORK_445_TIME + fc::seconds( params.block_interval * (params.maintenance_skip_slots + 1) * 13 ); + price cer( asset(1), asset(1, usd_id) ); + const auto* usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + + // balance data + int64_t alice_bc = alice_b0, bob_bc = bob_b0; // core balance + int64_t alice_bu = alice_b0, bob_bu = bob_b0; // usd balance + int64_t pool_b = pool_0, accum_b = accum_0; + + // prepare orders + BOOST_TEST_MESSAGE( "Creating orders those will never match: ao1, ao2, bo1, bo2 .." ); + // ao1: won't expire, won't match, fee in core + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(100000, usd_id) )->id; + BOOST_CHECK( db.find( ao1_id ) != nullptr ); + // ao2: will expire, won't match, fee in core + limit_order_id_type ao2_id = create_sell_order( alice_id, asset(800), asset(100000, usd_id), exp )->id; + BOOST_CHECK( db.find( ao2_id ) != nullptr ); + // bo1: won't expire, won't match, fee in usd + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(1000, usd_id), asset(100000), max_exp, cer )->id; + BOOST_CHECK( db.find( bo1_id ) != nullptr ); + // bo2: will expire, won't match, fee in usd + limit_order_id_type bo2_id = create_sell_order( bob_id, asset(800, usd_id), asset(100000), exp, cer )->id; + BOOST_CHECK( db.find( bo2_id ) != nullptr ); + + alice_bc -= order_create_fee * 2; + alice_bc -= 1000; + alice_bc -= 800; + bob_bu -= order_create_fee * 2; + bob_bu -= 1000; + bob_bu -= 800; + pool_b -= order_create_fee * 2; + accum_b += order_create_fee * 2; + int64_t ao1_remain = 1000; + int64_t ao2_remain = 800; + int64_t bo1_remain = 1000; + int64_t bo2_remain = 800; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao3: won't expire, partially match before hard fork 445, fee in core + BOOST_TEST_MESSAGE( "Creating order ao3 .." ); + limit_order_id_type ao3_id = create_sell_order( alice_id, asset(900), asset(2700, usd_id) )->id; + BOOST_CHECK( db.find( ao3_id ) != nullptr ); + create_sell_order( bob_id, asset(600, usd_id), asset(200) ); + + alice_bc -= order_create_fee; + alice_bc -= 900; + alice_bu += 600; + bob_bc -= order_create_fee; + bob_bu -= 600; + bob_bc += 200; + int64_t ao3_remain = 900 - 200; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao4: will expire, will partially match before hard fork 445, fee in core + BOOST_TEST_MESSAGE( "Creating order ao4 .." ); + limit_order_id_type ao4_id = create_sell_order( alice_id, asset(700), asset(1400, usd_id), exp )->id; + BOOST_CHECK( db.find( ao4_id ) != nullptr ); + create_sell_order( bob_id, asset(200, usd_id), asset(100) ); + + alice_bc -= order_create_fee; + alice_bc -= 700; + alice_bu += 200; + bob_bc -= order_create_fee; + bob_bu -= 200; + bob_bc += 100; + int64_t ao4_remain = 700 - 100; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo3: won't expire, will partially match before hard fork 445, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo3 .." ); + limit_order_id_type bo3_id = create_sell_order( bob_id, asset(500, usd_id), asset(1500), max_exp, cer )->id; + BOOST_CHECK( db.find( bo3_id ) != nullptr ); + create_sell_order( alice_id, asset(450), asset(150, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 450; + alice_bu += 150; + bob_bu -= order_create_fee; + bob_bu -= 500; + bob_bc += 450; + pool_b -= order_create_fee; + accum_b += order_create_fee; + int64_t bo3_remain = 500 - 150; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo4: will expire, will partially match before hard fork 445, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo4 .." ); + limit_order_id_type bo4_id = create_sell_order( bob_id, asset(300, usd_id), asset(600), exp, cer )->id; + BOOST_CHECK( db.find( bo4_id ) != nullptr ); + create_sell_order( alice_id, asset(140), asset(70, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 140; + alice_bu += 70; + bob_bu -= order_create_fee; + bob_bu -= 300; + bob_bc += 140; + pool_b -= order_create_fee; + accum_b += order_create_fee; + int64_t bo4_remain = 300 - 70; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao5: won't expire, partially match after hard fork 445, fee in core + BOOST_TEST_MESSAGE( "Creating order ao5 .." ); + limit_order_id_type ao5_id = create_sell_order( alice_id, asset(606), asset(909, usd_id) )->id; + BOOST_CHECK( db.find( ao5_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 606; + int64_t ao5_remain = 606; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao6: will expire, partially match after hard fork 445, fee in core + BOOST_TEST_MESSAGE( "Creating order ao6 .." ); + limit_order_id_type ao6_id = create_sell_order( alice_id, asset(333), asset(444, usd_id), exp2 )->id; + BOOST_CHECK( db.find( ao6_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 333; + int64_t ao6_remain = 333; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo5: won't expire, partially match after hard fork 445, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo5 .." ); + limit_order_id_type bo5_id = create_sell_order( bob_id, asset(255, usd_id), asset(408), max_exp, cer )->id; + BOOST_CHECK( db.find( bo5_id ) != nullptr ); + + bob_bu -= order_create_fee; + bob_bu -= 255; + pool_b -= order_create_fee; + accum_b += order_create_fee; + int64_t bo5_remain = 255; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo6: will expire, partially match after hard fork 445, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo6 .." ); + limit_order_id_type bo6_id = create_sell_order( bob_id, asset(127, usd_id), asset(127), exp2, cer )->id; + BOOST_CHECK( db.find( bo6_id ) != nullptr ); + + bob_bu -= order_create_fee; + bob_bu -= 127; + pool_b -= order_create_fee; + accum_b += order_create_fee; + int64_t bo6_remain = 127; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block so the orders will be in db before hard fork + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + + // generate blocks util hard fork 445 + generate_blocks( HARDFORK_445_TIME, true, skip ); + generate_block( skip ); + + // nothing will change + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate more blocks, so some orders will expire + generate_blocks( exp, true, skip ); + + // no fee refund for orders created before hard fork 445, but remaining funds will be refunded + BOOST_TEST_MESSAGE( "Checking expired orders: ao2, ao4, bo2, bo4 .." ); + alice_bc += ao2_remain; + alice_bc += ao4_remain; + bob_bu += bo2_remain; + bob_bu += bo4_remain; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // prepare for new transactions + enable_fees(); + change_fees( new_fees ); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + set_expiration( db, trx ); + + // cancel ao1 + BOOST_TEST_MESSAGE( "Cancel order ao1 .." ); + cancel_limit_order( ao1_id(db) ); + + alice_bc += ao1_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel ao3 + BOOST_TEST_MESSAGE( "Cancel order ao3 .." ); + cancel_limit_order( ao3_id(db) ); + + alice_bc += ao3_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo1 + BOOST_TEST_MESSAGE( "Cancel order bo1 .." ); + cancel_limit_order( bo1_id(db) ); + + bob_bu += bo1_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo3 + BOOST_TEST_MESSAGE( "Cancel order bo3 .." ); + cancel_limit_order( bo3_id(db) ); + + bob_bu += bo3_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill ao6 + BOOST_TEST_MESSAGE( "Partially fill ao6 .." ); + create_sell_order( bob_id, asset(88, usd_id), asset(66) ); + + alice_bu += 88; + bob_bc -= order_create_fee; + bob_bu -= 88; + bob_bc += 66; + ao6_remain -= 66; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill bo6 + BOOST_TEST_MESSAGE( "Partially fill bo6 .." ); + create_sell_order( alice_id, asset(59), asset(59, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 59; + alice_bu += 59; + bob_bc += 59; + bo6_remain -= 59; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block to save the changes + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate blocks util exp2, so some orders will expire + generate_blocks( exp2, true, skip ); + + // no fee refund for orders created before hard fork 445, but remaining funds will be refunded + BOOST_TEST_MESSAGE( "Checking expired orders: ao6, bo6 .." ); + alice_bc += ao6_remain; + bob_bu += bo6_remain; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // prepare for new transactions + enable_fees(); + change_fees( new_fees ); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + set_expiration( db, trx ); + + // partially fill ao5 + BOOST_TEST_MESSAGE( "Partially fill ao5 .." ); + create_sell_order( bob_id, asset(93, usd_id), asset(62) ); + + alice_bu += 93; + bob_bc -= order_create_fee; + bob_bu -= 93; + bob_bc += 62; + ao5_remain -= 62; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill bo5 + BOOST_TEST_MESSAGE( "Partially fill bo5 .." ); + create_sell_order( alice_id, asset(24), asset(15, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 24; + alice_bu += 15; + bob_bc += 24; + bo5_remain -= 15; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel ao5 + BOOST_TEST_MESSAGE( "Cancel order ao5 .." ); + cancel_limit_order( ao5_id(db) ); + + alice_bc += ao5_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo5 + BOOST_TEST_MESSAGE( "Cancel order bo5 .." ); + cancel_limit_order( bo5_id(db) ); + + bob_bu += bo5_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block to save the changes + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + } + FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( bsip26_fee_refund_test ) +{ + try + { + ACTORS((alice)(bob)(izzy)); + + int64_t alice_b0 = 1000000, bob_b0 = 1000000; + int64_t pool_0 = 1000000, accum_0 = 0; + + transfer( account_id_type(), alice_id, asset(alice_b0) ); + transfer( account_id_type(), bob_id, asset(bob_b0) ); + + asset_id_type core_id = asset_id_type(); + int64_t cer_core_amount = 1801; + int64_t cer_usd_amount = 3; + price tmp_cer( asset( cer_core_amount ), asset( cer_usd_amount, asset_id_type(1) ) ); + const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee, tmp_cer ); + asset_id_type usd_id = usd_obj.id; + issue_uia( alice_id, asset( alice_b0, usd_id ) ); + issue_uia( bob_id, asset( bob_b0, usd_id ) ); + + fund_fee_pool( committee_account( db ), usd_obj, pool_0 ); + + int64_t order_create_fee = 547; + int64_t order_cancel_fee; + int64_t order_cancel_fee1 = 139; + int64_t order_cancel_fee2 = 829; + + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + + generate_block( skip ); + + flat_set< fee_parameters > new_fees; + flat_set< fee_parameters > new_fees1; + flat_set< fee_parameters > new_fees2; + { + limit_order_create_operation::fee_parameters_type create_fee_params; + create_fee_params.fee = order_create_fee; + new_fees1.insert( create_fee_params ); + new_fees2.insert( create_fee_params ); + } + { + limit_order_cancel_operation::fee_parameters_type cancel_fee_params; + cancel_fee_params.fee = order_cancel_fee1; + new_fees1.insert( cancel_fee_params ); + } + { + limit_order_cancel_operation::fee_parameters_type cancel_fee_params; + cancel_fee_params.fee = order_cancel_fee2; + new_fees2.insert( cancel_fee_params ); + } + { + transfer_operation::fee_parameters_type transfer_fee_params; + transfer_fee_params.fee = 0; + transfer_fee_params.price_per_kbyte = 0; + new_fees1.insert( transfer_fee_params ); + new_fees2.insert( transfer_fee_params ); + } + + for( int i=0; i<12; i++ ) + { + bool expire_order = ( i % 2 != 0 ); + bool high_cancel_fee = ( i % 4 >= 2 ); + bool before_hardfork_445 = ( i < 4 ); + bool after_bsip26 = ( i >= 8 ); + idump( (before_hardfork_445)(after_bsip26)(expire_order)(high_cancel_fee) ); + if( i == 4 ) + { + BOOST_TEST_MESSAGE( "Hard fork 445" ); + generate_blocks( HARDFORK_445_TIME, true, skip ); + generate_block( skip ); + } + else if( i == 8 ) + { + BOOST_TEST_MESSAGE( "Hard fork core-604 (bsip26)" ); + generate_blocks( HARDFORK_CORE_604_TIME, true, skip ); + generate_block( skip ); + } + + if( high_cancel_fee ) + { + new_fees = new_fees2; + order_cancel_fee = order_cancel_fee2; + } + else + { + new_fees = new_fees1; + order_cancel_fee = order_cancel_fee1; + } + + int64_t usd_create_fee = order_create_fee * cer_usd_amount / cer_core_amount; + if( usd_create_fee * cer_core_amount != order_create_fee * cer_usd_amount ) usd_create_fee += 1; + int64_t usd_cancel_fee = order_cancel_fee * cer_usd_amount / cer_core_amount; + if( usd_cancel_fee * cer_core_amount != order_cancel_fee * cer_usd_amount ) usd_cancel_fee += 1; + int64_t core_create_fee = usd_create_fee * cer_core_amount / cer_usd_amount; + int64_t core_cancel_fee = usd_cancel_fee * cer_core_amount / cer_usd_amount; + BOOST_CHECK( core_cancel_fee >= order_cancel_fee ); + + BOOST_TEST_MESSAGE( "Start" ); + + // enable_fees() and change_fees() modifies DB directly, and results will be overwritten by block generation + // so we have to do it every time we stop generating/popping blocks and start doing tx's + enable_fees(); + change_fees( new_fees ); + + // AAAAGGHH create_sell_order reads trx.expiration #469 + set_expiration( db, trx ); + + // prepare params + uint32_t blocks_generated = 0; + time_point_sec max_exp = time_point_sec::maximum(); + time_point_sec exp = db.head_block_time(); // order will be accepted when pushing trx then expired at current block + price cer = usd_id( db ).options.core_exchange_rate; + const auto* usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + + // balance data + int64_t alice_bc = alice_b0, bob_bc = bob_b0; // core balance + int64_t alice_bu = alice_b0, bob_bu = bob_b0; // usd balance + int64_t pool_b = pool_0, accum_b = accum_0; + + // refund data + int64_t core_fee_refund_core; + int64_t core_fee_refund_usd; + int64_t usd_fee_refund_core; + int64_t usd_fee_refund_usd; + int64_t accum_on_new; + int64_t accum_on_fill; + int64_t pool_refund; + if( db.head_block_time() > HARDFORK_CORE_604_TIME ) + { + core_fee_refund_core = order_create_fee; + core_fee_refund_usd = 0; + usd_fee_refund_core = 0; + usd_fee_refund_usd = usd_create_fee; + accum_on_new = 0; + accum_on_fill = usd_create_fee; + pool_refund = core_create_fee; + } + else if( db.head_block_time() > HARDFORK_445_TIME ) + { + core_fee_refund_core = order_create_fee; + core_fee_refund_usd = 0; + usd_fee_refund_core = core_create_fee; + usd_fee_refund_usd = 0; + accum_on_new = usd_create_fee; + accum_on_fill = 0; + pool_refund = 0; + } + else + { + core_fee_refund_core = 0; + core_fee_refund_usd = 0; + usd_fee_refund_core = 0; + usd_fee_refund_usd = 0; + accum_on_new = usd_create_fee; + accum_on_fill = 0; + pool_refund = 0; + } + + // Check non-overlapping + // Alice creates order + // Bob creates order which doesn't match + BOOST_TEST_MESSAGE( "Creating non-overlapping orders" ); + BOOST_TEST_MESSAGE( "Creating ao1" ); + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(1000, usd_id), exp )->id; + + alice_bc -= order_create_fee; + alice_bc -= 1000; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Alice cancels order + if( !expire_order ) + { + BOOST_TEST_MESSAGE( "Cancel order ao1" ); + cancel_limit_order( ao1_id(db) ); + } + else + { + BOOST_TEST_MESSAGE( "Order ao1 expired" ); + // empty accounts before generate block, to test if it will fail when charging order cancel fee + transfer( alice_id, account_id_type(), asset(alice_bc, core_id) ); + transfer( alice_id, account_id_type(), asset(alice_bu, usd_id) ); + transfer( bob_id, account_id_type(), asset( bob_bc, core_id) ); + transfer( bob_id, account_id_type(), asset( bob_bu, usd_id) ); + // generate a new block so one or more order will expire + generate_block( skip ); + ++blocks_generated; + enable_fees(); + change_fees( new_fees ); + set_expiration( db, trx ); + exp = db.head_block_time(); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + // restore account balances + transfer( account_id_type(), alice_id, asset(alice_bc, core_id) ); + transfer( account_id_type(), alice_id, asset(alice_bu, usd_id) ); + transfer( account_id_type(), bob_id, asset( bob_bc, core_id) ); + transfer( account_id_type(), bob_id, asset( bob_bu, usd_id) ); + } + + + if( !expire_order ) + alice_bc -= order_cancel_fee; // manual cancellation always need a fee + else if( before_hardfork_445 ) + { // do nothing: before hard fork 445, no fee on expired order + } + else if( !after_bsip26 ) + { + // charge a cancellation fee in core, capped by deffered_fee which is order_create_fee + alice_bc -= std::min( order_cancel_fee, order_create_fee ); + } + else // bsip26 + { + // charge a cancellation fee in core, capped by deffered_fee which is order_create_fee + alice_bc -= std::min( order_cancel_fee, order_create_fee ); + } + alice_bc += 1000; + alice_bc += core_fee_refund_core; + alice_bu += core_fee_refund_usd; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + BOOST_TEST_MESSAGE( "Creating bo1" ); + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(500, usd_id), asset(1000), exp, cer )->id; + + bob_bu -= usd_create_fee; + bob_bu -= 500; + pool_b -= core_create_fee; + accum_b += accum_on_new; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Bob cancels order + if( !expire_order ) + { + BOOST_TEST_MESSAGE( "Cancel order bo1" ); + cancel_limit_order( bo1_id(db) ); + } + else + { + BOOST_TEST_MESSAGE( "Order bo1 expired" ); + // empty accounts before generate block, to test if it will fail when charging order cancel fee + transfer( alice_id, account_id_type(), asset(alice_bc, core_id) ); + transfer( alice_id, account_id_type(), asset(alice_bu, usd_id) ); + transfer( bob_id, account_id_type(), asset( bob_bc, core_id) ); + transfer( bob_id, account_id_type(), asset( bob_bu, usd_id) ); + // generate a new block so one or more order will expire + generate_block( skip ); + ++blocks_generated; + enable_fees(); + change_fees( new_fees ); + set_expiration( db, trx ); + exp = db.head_block_time(); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + // restore account balances + transfer( account_id_type(), alice_id, asset(alice_bc, core_id) ); + transfer( account_id_type(), alice_id, asset(alice_bu, usd_id) ); + transfer( account_id_type(), bob_id, asset( bob_bc, core_id) ); + transfer( account_id_type(), bob_id, asset( bob_bu, usd_id) ); + } + + if( !expire_order ) + bob_bc -= order_cancel_fee; // manual cancellation always need a fee + else if( before_hardfork_445 ) + { // do nothing: before hard fork 445, no fee on expired order + } + else if( !after_bsip26 ) + { + // charge a cancellation fee in core, capped by deffered_fee which is core_create_fee + bob_bc -= std::min( order_cancel_fee, core_create_fee ); + } + else // bsip26 + { + // when expired, should have core_create_fee in deferred, usd_create_fee in deferred_paid + + // charge a cancellation fee in core from fee_pool, capped by deffered + int64_t capped_core_cancel_fee = std::min( order_cancel_fee, core_create_fee ); + pool_b -= capped_core_cancel_fee; + + // charge a coresponding cancellation fee in usd from deffered_paid, round up, capped + int64_t capped_usd_cancel_fee = capped_core_cancel_fee * usd_create_fee / core_create_fee; + if( capped_usd_cancel_fee * core_create_fee != capped_core_cancel_fee * usd_create_fee ) + capped_usd_cancel_fee += 1; + if( capped_usd_cancel_fee > usd_create_fee ) + capped_usd_cancel_fee = usd_create_fee; + bob_bu -= capped_usd_cancel_fee; + + // cancellation fee goes to accumulated fees + accum_b += capped_usd_cancel_fee; + } + bob_bc += usd_fee_refund_core; + bob_bu += 500; + bob_bu += usd_fee_refund_usd; + pool_b += pool_refund; // bo1 + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + + // Check partial fill + BOOST_TEST_MESSAGE( "Creating ao2, then be partially filled by bo2" ); + const limit_order_object* ao2 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), exp, cer ); + const limit_order_id_type ao2id = ao2->id; + const limit_order_object* bo2 = create_sell_order( bob_id, asset(100, usd_id), asset(500) ); + + BOOST_CHECK( db.find( ao2id ) != nullptr ); + BOOST_CHECK( bo2 == nullptr ); + + // data after order created + alice_bc -= 1000; + alice_bu -= usd_create_fee; + pool_b -= core_create_fee; + accum_b += accum_on_new; + bob_bc -= order_create_fee; + bob_bu -= 100; + + // data after order filled + alice_bu += 100; + bob_bc += 500; + accum_b += accum_on_fill; // ao2 + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel Alice order, show that entire deferred_fee was consumed by partial match + if( !expire_order ) + { + BOOST_TEST_MESSAGE( "Cancel order ao2" ); + cancel_limit_order( *ao2 ); + } + else + { + BOOST_TEST_MESSAGE( "Order ao2 expired" ); + // empty accounts before generate block, to test if it will fail when charging order cancel fee + transfer( alice_id, account_id_type(), asset(alice_bc, core_id) ); + transfer( alice_id, account_id_type(), asset(alice_bu, usd_id) ); + transfer( bob_id, account_id_type(), asset( bob_bc, core_id) ); + transfer( bob_id, account_id_type(), asset( bob_bu, usd_id) ); + // generate a new block so one or more order will expire + generate_block( skip ); + ++blocks_generated; + enable_fees(); + change_fees( new_fees ); + set_expiration( db, trx ); + exp = db.head_block_time(); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + // restore account balances + transfer( account_id_type(), alice_id, asset(alice_bc, core_id) ); + transfer( account_id_type(), alice_id, asset(alice_bu, usd_id) ); + transfer( account_id_type(), bob_id, asset( bob_bc, core_id) ); + transfer( account_id_type(), bob_id, asset( bob_bu, usd_id) ); + } + + + if( !expire_order ) + alice_bc -= order_cancel_fee; + // else do nothing: + // before hard fork 445, no fee when order is expired; + // after hard fork 445, when partially filled order expired, order cancel fee is capped at 0 + alice_bc += 500; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Check multiple fill + // Alice creating multiple orders + BOOST_TEST_MESSAGE( "Creating ao31-ao35" ); + const limit_order_object* ao31 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + const limit_order_object* ao32 = create_sell_order( alice_id, asset(1000), asset(2000, usd_id), max_exp, cer ); + const limit_order_object* ao33 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + const limit_order_object* ao34 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + const limit_order_object* ao35 = create_sell_order( alice_id, asset(1000), asset(200, usd_id), max_exp, cer ); + + const limit_order_id_type ao31id = ao31->id; + const limit_order_id_type ao32id = ao32->id; + const limit_order_id_type ao33id = ao33->id; + const limit_order_id_type ao34id = ao34->id; + const limit_order_id_type ao35id = ao35->id; + + alice_bc -= 1000 * 5; + alice_bu -= usd_create_fee * 5; + pool_b -= core_create_fee * 5; + accum_b += accum_on_new * 5; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Bob creating an order matching multiple Alice's orders + BOOST_TEST_MESSAGE( "Creating bo31, completely fill ao31 and ao33, partially fill ao34" ); + const limit_order_object* bo31 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); + + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) != nullptr ); + BOOST_CHECK( db.find( ao35id ) != nullptr ); + BOOST_CHECK( bo31 == nullptr ); + + // data after order created + bob_bc -= order_create_fee; + bob_bu -= 500; + + // data after order filled + alice_bu += 500; + bob_bc += 2500; + accum_b += accum_on_fill * 3; // ao31, ao33, ao34 + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // Bob creating an order matching multiple Alice's orders + BOOST_TEST_MESSAGE( "Creating bo32, completely fill partially filled ao34 and new ao35, leave on market" ); + const limit_order_object* bo32 = create_sell_order( bob_id, asset(500, usd_id), asset(2500), exp ); + + BOOST_CHECK( db.find( ao31id ) == nullptr ); + BOOST_CHECK( db.find( ao32id ) != nullptr ); + BOOST_CHECK( db.find( ao33id ) == nullptr ); + BOOST_CHECK( db.find( ao34id ) == nullptr ); + BOOST_CHECK( db.find( ao35id ) == nullptr ); + BOOST_CHECK( bo32 != nullptr ); + + // data after order created + bob_bc -= order_create_fee; + bob_bu -= 500; + + // data after order filled + alice_bu += 300; + bob_bc += 1500; + accum_b += accum_on_fill; // ao35 + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel Bob order, show that entire deferred_fee was consumed by partial match + if( !expire_order ) + { + BOOST_TEST_MESSAGE( "Cancel order bo32" ); + cancel_limit_order( *bo32 ); + } + else + { + BOOST_TEST_MESSAGE( "Order bo32 expired" ); + // empty accounts before generate block, to test if it will fail when charging order cancel fee + transfer( alice_id, account_id_type(), asset(alice_bc, core_id) ); + transfer( alice_id, account_id_type(), asset(alice_bu, usd_id) ); + transfer( bob_id, account_id_type(), asset( bob_bc, core_id) ); + transfer( bob_id, account_id_type(), asset( bob_bu, usd_id) ); + // generate a new block so one or more order will expire + generate_block( skip ); + ++blocks_generated; + enable_fees(); + change_fees( new_fees ); + set_expiration( db, trx ); + exp = db.head_block_time(); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + // restore account balances + transfer( account_id_type(), alice_id, asset(alice_bc, core_id) ); + transfer( account_id_type(), alice_id, asset(alice_bu, usd_id) ); + transfer( account_id_type(), bob_id, asset( bob_bc, core_id) ); + transfer( account_id_type(), bob_id, asset( bob_bu, usd_id) ); + } + + if( !expire_order ) + bob_bc -= order_cancel_fee; + // else do nothing: + // before hard fork 445, no fee when order is expired; + // after hard fork 445, when partially filled order expired, order cancel fee is capped at 0 + bob_bu += 200; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel Alice order, will refund after hard fork 445 + BOOST_TEST_MESSAGE( "Cancel order ao32" ); + cancel_limit_order( ao32id( db ) ); + + alice_bc -= order_cancel_fee; + alice_bc += 1000; + alice_bc += usd_fee_refund_core; + alice_bu += usd_fee_refund_usd; + pool_b += pool_refund; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // undo above tx's and reset + BOOST_TEST_MESSAGE( "Clean up" ); + generate_block( skip ); + ++blocks_generated; + while( blocks_generated > 0 ) + { + db.pop_block(); + --blocks_generated; + } + } + } + FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( bsip26_fee_refund_cross_test ) +{ // create orders before hard fork, cancel them after hard fork + try + { + ACTORS((alice)(bob)(izzy)); + + int64_t alice_b0 = 1000000, bob_b0 = 1000000; + int64_t pool_0 = 1000000, accum_0 = 0; + + transfer( account_id_type(), alice_id, asset(alice_b0) ); + transfer( account_id_type(), bob_id, asset(bob_b0) ); + + asset_id_type core_id = asset_id_type(); + int64_t cer_core_amount = 1801; + int64_t cer_usd_amount = 3; + price tmp_cer( asset( cer_core_amount ), asset( cer_usd_amount, asset_id_type(1) ) ); + const auto& usd_obj = create_user_issued_asset( "IZZYUSD", izzy_id(db), charge_market_fee, tmp_cer ); + asset_id_type usd_id = usd_obj.id; + issue_uia( alice_id, asset( alice_b0, usd_id ) ); + issue_uia( bob_id, asset( bob_b0, usd_id ) ); + + fund_fee_pool( committee_account( db ), usd_obj, pool_0 ); + + int64_t order_create_fee = 547; + int64_t order_cancel_fee = 139; + int64_t usd_create_fee = order_create_fee * cer_usd_amount / cer_core_amount; + if( usd_create_fee * cer_core_amount != order_create_fee * cer_usd_amount ) usd_create_fee += 1; + int64_t usd_cancel_fee = order_cancel_fee * cer_usd_amount / cer_core_amount; + if( usd_cancel_fee * cer_core_amount != order_cancel_fee * cer_usd_amount ) usd_cancel_fee += 1; + int64_t core_create_fee = usd_create_fee * cer_core_amount / cer_usd_amount; + int64_t core_cancel_fee = usd_cancel_fee * cer_core_amount / cer_usd_amount; + BOOST_CHECK( core_cancel_fee >= order_cancel_fee ); + + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + + generate_block( skip ); + + flat_set< fee_parameters > new_fees; + { + limit_order_create_operation::fee_parameters_type create_fee_params; + create_fee_params.fee = order_create_fee; + new_fees.insert( create_fee_params ); + } + { + limit_order_cancel_operation::fee_parameters_type cancel_fee_params; + cancel_fee_params.fee = order_cancel_fee; + new_fees.insert( cancel_fee_params ); + } + { + transfer_operation::fee_parameters_type transfer_fee_params; + transfer_fee_params.fee = 0; + transfer_fee_params.price_per_kbyte = 0; + new_fees.insert( transfer_fee_params ); + } + + // enable_fees() and change_fees() modifies DB directly, and results will be overwritten by block generation + // so we have to do it every time we stop generating/popping blocks and start doing tx's + enable_fees(); + change_fees( new_fees ); + + // AAAAGGHH create_sell_order reads trx.expiration #469 + set_expiration( db, trx ); + + // prepare params + const chain_parameters& params = db.get_global_properties().parameters; + time_point_sec max_exp = time_point_sec::maximum(); + time_point_sec exp = HARDFORK_CORE_604_TIME + fc::seconds( params.block_interval * (params.maintenance_skip_slots + 1) * 3 ); + time_point_sec exp1 = HARDFORK_CORE_604_TIME + fc::seconds( params.block_interval * (params.maintenance_skip_slots + 1) * 13 ); + time_point_sec exp2 = HARDFORK_CORE_604_TIME + fc::seconds( params.block_interval * (params.maintenance_skip_slots + 1) * 23 ); + price cer = usd_id( db ).options.core_exchange_rate; + const auto* usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + + // balance data + int64_t alice_bc = alice_b0, bob_bc = bob_b0; // core balance + int64_t alice_bu = alice_b0, bob_bu = bob_b0; // usd balance + int64_t pool_b = pool_0, accum_b = accum_0; + + // prepare orders + BOOST_TEST_MESSAGE( "Creating orders those will never match: ao1, ao2, bo1, bo2 .." ); + // ao1: won't expire, won't match, fee in core + limit_order_id_type ao1_id = create_sell_order( alice_id, asset(1000), asset(100000, usd_id) )->id; + BOOST_CHECK( db.find( ao1_id ) != nullptr ); + // ao2: will expire, won't match, fee in core + limit_order_id_type ao2_id = create_sell_order( alice_id, asset(800), asset(100000, usd_id), exp )->id; + BOOST_CHECK( db.find( ao2_id ) != nullptr ); + // bo1: won't expire, won't match, fee in usd + limit_order_id_type bo1_id = create_sell_order( bob_id, asset(1000, usd_id), asset(100000), max_exp, cer )->id; + BOOST_CHECK( db.find( bo1_id ) != nullptr ); + // bo2: will expire, won't match, fee in usd + limit_order_id_type bo2_id = create_sell_order( bob_id, asset(800, usd_id), asset(100000), exp, cer )->id; + BOOST_CHECK( db.find( bo2_id ) != nullptr ); + + alice_bc -= order_create_fee * 2; + alice_bc -= 1000; + alice_bc -= 800; + bob_bu -= usd_create_fee * 2; + bob_bu -= 1000; + bob_bu -= 800; + pool_b -= core_create_fee * 2; + accum_b += usd_create_fee * 2; + int64_t ao1_remain = 1000; + int64_t ao2_remain = 800; + int64_t bo1_remain = 1000; + int64_t bo2_remain = 800; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao3: won't expire, partially match before hard fork 445, fee in core + BOOST_TEST_MESSAGE( "Creating order ao3 .." ); // 1:30 + limit_order_id_type ao3_id = create_sell_order( alice_id, asset(900), asset(27000, usd_id) )->id; + BOOST_CHECK( db.find( ao3_id ) != nullptr ); + create_sell_order( bob_id, asset(6000, usd_id), asset(200) ); + + alice_bc -= order_create_fee; + alice_bc -= 900; + alice_bu += 6000; + bob_bc -= order_create_fee; + bob_bu -= 6000; + bob_bc += 200; + int64_t ao3_remain = 900 - 200; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao4: will expire, will partially match before hard fork 445, fee in core + BOOST_TEST_MESSAGE( "Creating order ao4 .." ); // 1:20 + limit_order_id_type ao4_id = create_sell_order( alice_id, asset(700), asset(14000, usd_id), exp )->id; + BOOST_CHECK( db.find( ao4_id ) != nullptr ); + create_sell_order( bob_id, asset(2000, usd_id), asset(100) ); + + alice_bc -= order_create_fee; + alice_bc -= 700; + alice_bu += 2000; + bob_bc -= order_create_fee; + bob_bu -= 2000; + bob_bc += 100; + int64_t ao4_remain = 700 - 100; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo3: won't expire, will partially match before hard fork 445, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo3 .." ); // 1:30 + limit_order_id_type bo3_id = create_sell_order( bob_id, asset(500, usd_id), asset(15000), max_exp, cer )->id; + BOOST_CHECK( db.find( bo3_id ) != nullptr ); + create_sell_order( alice_id, asset(4500), asset(150, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 4500; + alice_bu += 150; + bob_bu -= usd_create_fee; + bob_bu -= 500; + bob_bc += 4500; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo3_remain = 500 - 150; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo4: will expire, will partially match before hard fork 445, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo4 .." ); // 1:20 + limit_order_id_type bo4_id = create_sell_order( bob_id, asset(300, usd_id), asset(6000), exp, cer )->id; + BOOST_CHECK( db.find( bo4_id ) != nullptr ); + create_sell_order( alice_id, asset(1400), asset(70, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 1400; + alice_bu += 70; + bob_bu -= usd_create_fee; + bob_bu -= 300; + bob_bc += 1400; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo4_remain = 300 - 70; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + + // ao11: won't expire, partially match after hard fork core-604, fee in core + BOOST_TEST_MESSAGE( "Creating order ao11 .." ); // 1:18 + limit_order_id_type ao11_id = create_sell_order( alice_id, asset(510), asset(9180, usd_id) )->id; + BOOST_CHECK( db.find( ao11_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 510; + int64_t ao11_remain = 510; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao12: will expire, partially match after hard fork core-604, fee in core + BOOST_TEST_MESSAGE( "Creating order ao12 .." ); // 1:16 + limit_order_id_type ao12_id = create_sell_order( alice_id, asset(256), asset(4096, usd_id), exp2 )->id; + BOOST_CHECK( db.find( ao12_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 256; + int64_t ao12_remain = 256; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo11: won't expire, partially match after hard fork core-604, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo11 .." ); // 1:18 + limit_order_id_type bo11_id = create_sell_order( bob_id, asset(388, usd_id), asset(6984), max_exp, cer )->id; + BOOST_CHECK( db.find( bo11_id ) != nullptr ); + + bob_bu -= usd_create_fee; + bob_bu -= 388; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo11_remain = 388; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo12: will expire, partially match after hard fork core-604, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo12 .." ); // 1:17 + limit_order_id_type bo12_id = create_sell_order( bob_id, asset(213, usd_id), asset(3621), exp2, cer )->id; + BOOST_CHECK( db.find( bo12_id ) != nullptr ); + + bob_bu -= usd_create_fee; + bob_bu -= 213; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo12_remain = 213; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao5: won't expire, partially match after hard fork 445, fee in core + BOOST_TEST_MESSAGE( "Creating order ao5 .." ); // 1:15 + limit_order_id_type ao5_id = create_sell_order( alice_id, asset(606), asset(9090, usd_id) )->id; + BOOST_CHECK( db.find( ao5_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 606; + int64_t ao5_remain = 606; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao6: will expire, partially match after hard fork 445, fee in core + if( false ) { // only can have either ao5 or ao6, can't have both + BOOST_TEST_MESSAGE( "Creating order ao6 .." ); // 3:40 = 1:13.33333 + limit_order_id_type ao6_id = create_sell_order( alice_id, asset(333), asset(4440, usd_id), exp )->id; + BOOST_CHECK( db.find( ao6_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 333; + // int64_t ao6_remain = 333; // only can have either ao5 or ao6, can't have both + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + } + + // bo5: won't expire, partially match after hard fork 445, fee in usd + if( false ) { // only can have either bo5 or bo6, can't have both + BOOST_TEST_MESSAGE( "Creating order bo5 .." ); // 1:16 + limit_order_id_type bo5_id = create_sell_order( bob_id, asset(255, usd_id), asset(4080), max_exp, cer )->id; + BOOST_CHECK( db.find( bo5_id ) != nullptr ); + + bob_bu -= usd_create_fee; + bob_bu -= 255; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + //int64_t bo5_remain = 255; // only can have either bo5 or bo6, can't have both + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + } + + // bo6: will expire, partially match after hard fork 445, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo6 .." ); // 1:10 + limit_order_id_type bo6_id = create_sell_order( bob_id, asset(127, usd_id), asset(1270), exp, cer )->id; + BOOST_CHECK( db.find( bo6_id ) != nullptr ); + BOOST_CHECK( db.find( bo6_id ) != nullptr ); + + bob_bu -= usd_create_fee; + bob_bu -= 127; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo6_remain = 127; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block so the orders will be in db before hard fork 445 + BOOST_TEST_MESSAGE( "Generating blocks, passing hard fork 445 ..." ); + generate_block( skip ); + + // generate blocks util hard fork 445 + generate_blocks( HARDFORK_445_TIME, true, skip ); + generate_block( skip ); + + // nothing will change + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // prepare for new transactions + enable_fees(); + change_fees( new_fees ); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + set_expiration( db, trx ); + + // partially fill ao6 + if( false ) { // only can have either ao5 or ao6, can't have both + BOOST_TEST_MESSAGE( "Partially fill ao6 .." ); // 3:40 + create_sell_order( bob_id, asset(880, usd_id), asset(66) ); + + alice_bu += 880; + bob_bc -= order_create_fee; + bob_bu -= 880; + bob_bc += 66; + //ao6_remain -= 66; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + } + + // partially fill bo6 + BOOST_TEST_MESSAGE( "Partially fill bo6 .." ); // 1:10 + create_sell_order( alice_id, asset(590), asset(59, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 590; + alice_bu += 59; + bob_bc += 590; + bo6_remain -= 59; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill ao5 + BOOST_TEST_MESSAGE( "Partially fill ao5 .." ); // 1:15 + create_sell_order( bob_id, asset(930, usd_id), asset(62) ); + + alice_bu += 930; + bob_bc -= order_create_fee; + bob_bu -= 930; + bob_bc += 62; + ao5_remain -= 62; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill bo5 + if( false ) { // only can have either bo5 or bo6, can't have both + BOOST_TEST_MESSAGE( "Partially fill bo5 .." ); // 1:16 + create_sell_order( alice_id, asset(240), asset(15, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 240; + alice_bu += 15; + bob_bc += 240; + //bo5_remain -= 15; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + } + + // prepare more orders + BOOST_TEST_MESSAGE( "Creating more orders those will never match: ao7, ao8, bo7, bo8 .." ); // ~ 1:100 + // ao7: won't expire, won't match, fee in core + limit_order_id_type ao7_id = create_sell_order( alice_id, asset(1003), asset(100000, usd_id) )->id; + BOOST_CHECK( db.find( ao7_id ) != nullptr ); + // ao8: will expire, won't match, fee in core + limit_order_id_type ao8_id = create_sell_order( alice_id, asset(803), asset(100000, usd_id), exp1 )->id; + BOOST_CHECK( db.find( ao8_id ) != nullptr ); + // bo7: won't expire, won't match, fee in usd + limit_order_id_type bo7_id = create_sell_order( bob_id, asset(1003, usd_id), asset(100000), max_exp, cer )->id; + BOOST_CHECK( db.find( bo7_id ) != nullptr ); + // bo8: will expire, won't match, fee in usd + limit_order_id_type bo8_id = create_sell_order( bob_id, asset(803, usd_id), asset(100000), exp1, cer )->id; + BOOST_CHECK( db.find( bo8_id ) != nullptr ); + + alice_bc -= order_create_fee * 2; + alice_bc -= 1003; + alice_bc -= 803; + bob_bu -= usd_create_fee * 2; + bob_bu -= 1003; + bob_bu -= 803; + pool_b -= core_create_fee * 2; + accum_b += usd_create_fee * 2; + int64_t ao7_remain = 1003; + int64_t ao8_remain = 803; + int64_t bo7_remain = 1003; + int64_t bo8_remain = 803; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao9: won't expire, partially match before hard fork core-604, fee in core + BOOST_TEST_MESSAGE( "Creating order ao9 .." ); // 1:3 + limit_order_id_type ao9_id = create_sell_order( alice_id, asset(909), asset(2727, usd_id) )->id; + BOOST_CHECK( db.find( ao9_id ) != nullptr ); + create_sell_order( bob_id, asset(606, usd_id), asset(202) ); + + alice_bc -= order_create_fee; + alice_bc -= 909; + alice_bu += 606; + bob_bc -= order_create_fee; + bob_bu -= 606; + bob_bc += 202; + int64_t ao9_remain = 909 - 202; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao10: will expire, will partially match before hard fork core-604, fee in core + BOOST_TEST_MESSAGE( "Creating order ao10 .." ); // 1:2 + limit_order_id_type ao10_id = create_sell_order( alice_id, asset(707), asset(1414, usd_id), exp )->id; + BOOST_CHECK( db.find( ao10_id ) != nullptr ); + create_sell_order( bob_id, asset(202, usd_id), asset(101) ); + + alice_bc -= order_create_fee; + alice_bc -= 707; + alice_bu += 202; + bob_bc -= order_create_fee; + bob_bu -= 202; + bob_bc += 101; + int64_t ao10_remain = 707 - 101; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo9: won't expire, will partially match before hard fork core-604, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo9 .." ); // 1:3 + limit_order_id_type bo9_id = create_sell_order( bob_id, asset(505, usd_id), asset(1515), max_exp, cer )->id; + BOOST_CHECK( db.find( bo9_id ) != nullptr ); + create_sell_order( alice_id, asset(453), asset(151, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 453; + alice_bu += 151; + bob_bu -= usd_create_fee; + bob_bu -= 505; + bob_bc += 453; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo9_remain = 505 - 151; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo10: will expire, will partially match before hard fork core-604, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo10 .." ); // 1:2 + limit_order_id_type bo10_id = create_sell_order( bob_id, asset(302, usd_id), asset(604), exp, cer )->id; + BOOST_CHECK( db.find( bo10_id ) != nullptr ); + create_sell_order( alice_id, asset(142), asset(71, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 142; + alice_bu += 71; + bob_bu -= usd_create_fee; + bob_bu -= 302; + bob_bc += 142; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo10_remain = 302 - 71; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao13: won't expire, partially match after hard fork core-604, fee in core + BOOST_TEST_MESSAGE( "Creating order ao13 .." ); // 1:1.5 + limit_order_id_type ao13_id = create_sell_order( alice_id, asset(424), asset(636, usd_id) )->id; + BOOST_CHECK( db.find( ao13_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 424; + int64_t ao13_remain = 424; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // ao14: will expire, partially match after hard fork core-604, fee in core + BOOST_TEST_MESSAGE( "Creating order ao14 .." ); // 1:1.2 + limit_order_id_type ao14_id = create_sell_order( alice_id, asset(525), asset(630, usd_id), exp )->id; + BOOST_CHECK( db.find( ao14_id ) != nullptr ); + + alice_bc -= order_create_fee; + alice_bc -= 525; + int64_t ao14_remain = 525; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo13: won't expire, partially match after hard fork core-604, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo13 .." ); // 1:1.5 + limit_order_id_type bo13_id = create_sell_order( bob_id, asset(364, usd_id), asset(546), max_exp, cer )->id; + BOOST_CHECK( db.find( bo13_id ) != nullptr ); + + bob_bu -= usd_create_fee; + bob_bu -= 364; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo13_remain = 364; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // bo14: will expire, partially match after hard fork core-604, fee in usd + BOOST_TEST_MESSAGE( "Creating order bo14 .." ); // 1:1.2 + limit_order_id_type bo14_id = create_sell_order( bob_id, asset(365, usd_id), asset(438), exp, cer )->id; + BOOST_CHECK( db.find( bo14_id ) != nullptr ); + + bob_bu -= usd_create_fee; + bob_bu -= 365; + pool_b -= core_create_fee; + accum_b += usd_create_fee; + int64_t bo14_remain = 365; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block so the orders will be in db before hard fork core-604 + BOOST_TEST_MESSAGE( "Generating blocks, passing hard fork core-604 ..." ); + generate_block( skip ); + + // generate blocks util hard fork core-604 + generate_blocks( HARDFORK_CORE_604_TIME, true, skip ); + generate_block( skip ); + + // nothing will change + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // prepare for new transactions + enable_fees(); + change_fees( new_fees ); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + set_expiration( db, trx ); + + // partially fill ao14 + BOOST_TEST_MESSAGE( "Partially fill ao14 .." ); // 1:1.2 + create_sell_order( bob_id, asset(72, usd_id), asset(60) ); + + alice_bu += 72; + bob_bc -= order_create_fee; + bob_bu -= 72; + bob_bc += 60; + ao14_remain -= 60; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill bo14 + BOOST_TEST_MESSAGE( "Partially fill bo14 .." ); // 1:1.2 + create_sell_order( alice_id, asset(66), asset(55, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 66; + alice_bu += 55; + bob_bc += 66; + bo14_remain -= 55; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block to save the changes + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate more blocks, so some orders will expire + generate_blocks( exp, true, skip ); + + // don't refund fee, only refund remaining funds, for: + // * orders created before hard fork 445 : ao2, ao4, ao6, bo2, bo4, bo6 + // * partially filled orders (cancellation fee capped at 0) : ao10, ao14, bo10, bo14 + BOOST_TEST_MESSAGE( "Checking expired orders: ao2, ao4, ao6, ao10, ao14, bo2, bo4, bo6, bo10, bo14 .." ); + alice_bc += ao2_remain; + alice_bc += ao4_remain; + //alice_bc += ao6_remain; // can only have ao5 or ao6 but not both + alice_bc += ao10_remain; + alice_bc += ao14_remain; + bob_bu += bo2_remain; + bob_bu += bo4_remain; + bob_bu += bo6_remain; // can only have bo5 or bo6 but not both + bob_bu += bo10_remain; + bob_bu += bo14_remain; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // prepare for new transactions + enable_fees(); + change_fees( new_fees ); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + set_expiration( db, trx ); + + // partially fill ao13 + BOOST_TEST_MESSAGE( "Partially fill ao13 .." ); // 1:1.5 + create_sell_order( bob_id, asset(78, usd_id), asset(52) ); + + alice_bu += 78; + bob_bc -= order_create_fee; + bob_bu -= 78; + bob_bc += 52; + ao13_remain -= 52; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill bo13 + BOOST_TEST_MESSAGE( "Partially fill bo13 .." ); // 1:1.5 + create_sell_order( alice_id, asset(63), asset(42, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 63; + alice_bu += 42; + bob_bc += 63; + bo13_remain -= 42; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // don't refund fee, only refund remaining funds, for manually cancellations with an explicit fee: + // * orders created before hard fork 445 : ao1, ao3, ao5, bo1, bo3, bo5 + // * partially filled orders (cancellation fee capped at 0) : ao9, ao13, bo9, bo13 + + // cancel ao1 + BOOST_TEST_MESSAGE( "Cancel order ao1 .." ); + cancel_limit_order( ao1_id(db) ); + + alice_bc += ao1_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo1 + BOOST_TEST_MESSAGE( "Cancel order bo1 .." ); + cancel_limit_order( bo1_id(db) ); + + bob_bu += bo1_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel ao3 + BOOST_TEST_MESSAGE( "Cancel order ao3 .." ); + cancel_limit_order( ao3_id(db) ); + + alice_bc += ao3_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo3 + BOOST_TEST_MESSAGE( "Cancel order bo3 .." ); + cancel_limit_order( bo3_id(db) ); + + bob_bu += bo3_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel ao5 + BOOST_TEST_MESSAGE( "Cancel order ao5 .." ); + cancel_limit_order( ao5_id(db) ); + + alice_bc += ao5_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo5 + if( false ) { // can only have bo5 or bo6 but not both + BOOST_TEST_MESSAGE( "Cancel order bo5 .." ); + //cancel_limit_order( bo5_id(db) ); + + //bob_bu += bo5_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + } + + // cancel ao9 + BOOST_TEST_MESSAGE( "Cancel order ao9 .." ); + cancel_limit_order( ao9_id(db) ); + + alice_bc += ao9_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo9 + BOOST_TEST_MESSAGE( "Cancel order bo9 .." ); + cancel_limit_order( bo9_id(db) ); + + bob_bu += bo9_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel ao13 + BOOST_TEST_MESSAGE( "Cancel order ao13 .." ); + cancel_limit_order( ao13_id(db) ); + + alice_bc += ao13_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo13 + BOOST_TEST_MESSAGE( "Cancel order bo13 .." ); + cancel_limit_order( bo13_id(db) ); + + bob_bu += bo13_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + + // generate block to save the changes + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate blocks util exp1, so some orders will expire + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + generate_blocks( exp1, true, skip ); + + // orders created after hard fork 445 but before core-604, no partially filled, + // will refund remaining funds, and will refund create fee in core (minus cancel fee, capped) + BOOST_TEST_MESSAGE( "Checking expired orders: ao8, bo8 .." ); + alice_bc += ao8_remain; + alice_bc += std::max(order_create_fee - order_cancel_fee, int64_t(0)); + bob_bu += bo8_remain; + bob_bc += std::max(core_create_fee - order_cancel_fee, int64_t(0)); + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // prepare for new transactions + enable_fees(); + change_fees( new_fees ); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + set_expiration( db, trx ); + + // orders created after hard fork 445 but before core-604, no partially filled, + // when manually cancelling (with an explicit fee), + // will refund remaining funds, and will refund create fee in core + + // cancel ao7 + BOOST_TEST_MESSAGE( "Cancel order ao7 .." ); + cancel_limit_order( ao7_id(db) ); + + alice_bc += ao7_remain; + alice_bc -= order_cancel_fee; + alice_bc += order_create_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo7 + BOOST_TEST_MESSAGE( "Cancel order bo7 .." ); + cancel_limit_order( bo7_id(db) ); + + bob_bu += bo7_remain; + bob_bc -= order_cancel_fee; + bob_bc += core_create_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill ao12 + BOOST_TEST_MESSAGE( "Partially fill ao12 .." ); // 1:16 + create_sell_order( bob_id, asset(688, usd_id), asset(43) ); + + alice_bu += 688; + bob_bc -= order_create_fee; + bob_bu -= 688; + bob_bc += 43; + ao12_remain -= 43; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill bo12 + BOOST_TEST_MESSAGE( "Partially fill bo12 .." ); // 1:17 + create_sell_order( alice_id, asset(629), asset(37, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 629; + alice_bu += 37; + bob_bc += 629; + bo12_remain -= 37; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block to save the changes + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate blocks util exp2, so some orders will expire + generate_blocks( exp2, true, skip ); + + // no fee refund for orders created before hard fork 445, cancellation fee capped at 0 + // remaining funds will be refunded + BOOST_TEST_MESSAGE( "Checking expired orders: ao12, bo12 .." ); + alice_bc += ao12_remain; + bob_bu += bo12_remain; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // prepare for new transactions + enable_fees(); + change_fees( new_fees ); + usd_stat = &usd_id( db ).dynamic_asset_data_id( db ); + set_expiration( db, trx ); + + // partially fill ao11 + BOOST_TEST_MESSAGE( "Partially fill ao11 .." ); // 1:18 + create_sell_order( bob_id, asset(1422, usd_id), asset(79) ); + + alice_bu += 1422; + bob_bc -= order_create_fee; + bob_bu -= 1422; + bob_bc += 79; + ao11_remain -= 79; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // partially fill bo11 + BOOST_TEST_MESSAGE( "Partially fill bo11 .." ); // 1:18 + create_sell_order( alice_id, asset(1494), asset(83, usd_id) ); + + alice_bc -= order_create_fee; + alice_bc -= 1494; + alice_bu += 83; + bob_bc += 1494; + bo11_remain -= 83; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // no fee refund for orders created before hard fork 445, if manually cancelled with an explicit fee. + // remaining funds will be refunded + + // cancel ao11 + BOOST_TEST_MESSAGE( "Cancel order ao11 .." ); + cancel_limit_order( ao11_id(db) ); + + alice_bc += ao11_remain; + alice_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // cancel bo11 + BOOST_TEST_MESSAGE( "Cancel order bo11 .." ); + cancel_limit_order( bo11_id(db) ); + + bob_bu += bo11_remain; + bob_bc -= order_cancel_fee; + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + // generate block to save the changes + BOOST_TEST_MESSAGE( "Generating blocks ..." ); + generate_block( skip ); + + BOOST_CHECK_EQUAL( get_balance( alice_id, core_id ), alice_bc ); + BOOST_CHECK_EQUAL( get_balance( alice_id, usd_id ), alice_bu ); + BOOST_CHECK_EQUAL( get_balance( bob_id, core_id ), bob_bc ); + BOOST_CHECK_EQUAL( get_balance( bob_id, usd_id ), bob_bu ); + BOOST_CHECK_EQUAL( usd_stat->fee_pool.value, pool_b ); + BOOST_CHECK_EQUAL( usd_stat->accumulated_fees.value, accum_b ); + + } + FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_CASE( stealth_fba_test ) { try @@ -943,4 +3643,219 @@ BOOST_AUTO_TEST_CASE( stealth_fba_test ) } } +BOOST_AUTO_TEST_CASE( defaults_test ) +{ try { + fee_schedule schedule; + const limit_order_create_operation::fee_parameters_type default_order_fee {}; + + // no fees set yet -> default + asset fee = schedule.calculate_fee( limit_order_create_operation() ); + BOOST_CHECK_EQUAL( (int64_t)default_order_fee.fee, fee.amount.value ); + + limit_order_create_operation::fee_parameters_type new_order_fee; new_order_fee.fee = 123; + // set fee + check + schedule.parameters.insert( new_order_fee ); + fee = schedule.calculate_fee( limit_order_create_operation() ); + BOOST_CHECK_EQUAL( (int64_t)new_order_fee.fee, fee.amount.value ); + + // bid_collateral fee defaults to call_order_update fee + // call_order_update fee is unset -> default + const call_order_update_operation::fee_parameters_type default_short_fee {}; + call_order_update_operation::fee_parameters_type new_short_fee; new_short_fee.fee = 123; + fee = schedule.calculate_fee( bid_collateral_operation() ); + BOOST_CHECK_EQUAL( (int64_t)default_short_fee.fee, fee.amount.value ); + + // set call_order_update fee + check bid_collateral fee + schedule.parameters.insert( new_short_fee ); + fee = schedule.calculate_fee( bid_collateral_operation() ); + BOOST_CHECK_EQUAL( (int64_t)new_short_fee.fee, fee.amount.value ); + + // set bid_collateral fee + check + bid_collateral_operation::fee_parameters_type new_bid_fee; new_bid_fee.fee = 124; + schedule.parameters.insert( new_bid_fee ); + fee = schedule.calculate_fee( bid_collateral_operation() ); + BOOST_CHECK_EQUAL( (int64_t)new_bid_fee.fee, fee.amount.value ); + } + catch( const fc::exception& e ) + { + elog( "caught exception ${e}", ("e", e.to_detail_string()) ); + throw; + } +} + +BOOST_AUTO_TEST_CASE( issue_429_test ) +{ + try + { + ACTORS((alice)); + + transfer( committee_account, alice_id, asset( 1000000 * asset::scaled_precision( asset_id_type()(db).precision ) ) ); + + // make sure the database requires our fee to be nonzero + enable_fees(); + + const auto& fees = *db.get_global_properties().parameters.current_fees; + auto fees_to_pay = fees.get(); + + { + signed_transaction tx; + asset_create_operation op; + op.issuer = alice_id; + op.symbol = "ALICE"; + op.common_options.core_exchange_rate = asset( 1 ) / asset( 1, asset_id_type( 1 ) ); + op.fee = asset( (fees_to_pay.long_symbol + fees_to_pay.price_per_kbyte) & (~1) ); + tx.operations.push_back( op ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + PUSH_TX( db, tx ); + } + + verify_asset_supplies( db ); + + { + signed_transaction tx; + asset_create_operation op; + op.issuer = alice_id; + op.symbol = "ALICE.ODD"; + op.common_options.core_exchange_rate = asset( 1 ) / asset( 1, asset_id_type( 1 ) ); + op.fee = asset((fees_to_pay.long_symbol + fees_to_pay.price_per_kbyte) | 1); + tx.operations.push_back( op ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + PUSH_TX( db, tx ); + } + + verify_asset_supplies( db ); + + generate_blocks( HARDFORK_CORE_429_TIME + 10 ); + + { + signed_transaction tx; + asset_create_operation op; + op.issuer = alice_id; + op.symbol = "ALICE.ODDER"; + op.common_options.core_exchange_rate = asset( 1 ) / asset( 1, asset_id_type( 1 ) ); + op.fee = asset((fees_to_pay.long_symbol + fees_to_pay.price_per_kbyte) | 1); + tx.operations.push_back( op ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + PUSH_TX( db, tx ); + } + + verify_asset_supplies( db ); + } + catch( const fc::exception& e ) + { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( issue_433_test ) +{ + try + { + ACTORS((alice)); + + auto& core = asset_id_type()(db); + + transfer( committee_account, alice_id, asset( 1000000 * asset::scaled_precision( core.precision ) ) ); + + const auto& myusd = create_user_issued_asset( "MYUSD", alice, 0 ); + issue_uia( alice, myusd.amount( 2000000000 ) ); + + // make sure the database requires our fee to be nonzero + enable_fees(); + + const auto& fees = *db.get_global_properties().parameters.current_fees; + const auto asset_create_fees = fees.get(); + + fund_fee_pool( alice, myusd, 5*asset_create_fees.long_symbol ); + + asset_create_operation op; + op.issuer = alice_id; + op.symbol = "ALICE"; + op.common_options.core_exchange_rate = asset( 1 ) / asset( 1, asset_id_type( 1 ) ); + op.fee = myusd.amount( ((asset_create_fees.long_symbol + asset_create_fees.price_per_kbyte) & (~1)) ); + signed_transaction tx; + tx.operations.push_back( op ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + PUSH_TX( db, tx ); + + verify_asset_supplies( db ); + } + catch( const fc::exception& e ) + { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( issue_433_indirect_test ) +{ + try + { + ACTORS((alice)); + + auto& core = asset_id_type()(db); + + transfer( committee_account, alice_id, asset( 1000000 * asset::scaled_precision( core.precision ) ) ); + + const auto& myusd = create_user_issued_asset( "MYUSD", alice, 0 ); + issue_uia( alice, myusd.amount( 2000000000 ) ); + + // make sure the database requires our fee to be nonzero + enable_fees(); + + const auto& fees = *db.get_global_properties().parameters.current_fees; + const auto asset_create_fees = fees.get(); + + fund_fee_pool( alice, myusd, 5*asset_create_fees.long_symbol ); + + asset_create_operation op; + op.issuer = alice_id; + op.symbol = "ALICE"; + op.common_options.core_exchange_rate = asset( 1 ) / asset( 1, asset_id_type( 1 ) ); + op.fee = myusd.amount( ((asset_create_fees.long_symbol + asset_create_fees.price_per_kbyte) & (~1)) ); + + const auto proposal_create_fees = fees.get(); + proposal_create_operation prop; + prop.fee_paying_account = alice_id; + prop.proposed_ops.emplace_back( op ); + prop.expiration_time = db.head_block_time() + fc::days(1); + prop.fee = asset( proposal_create_fees.fee + proposal_create_fees.price_per_kbyte ); + object_id_type proposal_id; + { + signed_transaction tx; + tx.operations.push_back( prop ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + proposal_id = PUSH_TX( db, tx ).operation_results.front().get(); + } + const proposal_object& proposal = db.get( proposal_id ); + + const auto proposal_update_fees = fees.get(); + proposal_update_operation pup; + pup.proposal = proposal.id; + pup.fee_paying_account = alice_id; + pup.active_approvals_to_add.insert(alice_id); + pup.fee = asset( proposal_update_fees.fee + proposal_update_fees.price_per_kbyte ); + { + signed_transaction tx; + tx.operations.push_back( pup ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + PUSH_TX( db, tx ); + } + + verify_asset_supplies( db ); + } + catch( const fc::exception& e ) + { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/grouped_orders_api_tests.cpp b/tests/tests/grouped_orders_api_tests.cpp new file mode 100644 index 0000000000..e08c7d5d0e --- /dev/null +++ b/tests/tests/grouped_orders_api_tests.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018 manikey123, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; +using namespace graphene::app; + +BOOST_FIXTURE_TEST_SUITE(grouped_orders_api_tests, database_fixture) +BOOST_AUTO_TEST_CASE(api_limit_get_grouped_limit_orders) { + try + { + app.enable_plugin("grouped_orders"); + graphene::app::orders_api orders_api(app); + optional< api_access_info > acc; + optional start; + + //account_id_type() do 3 ops + create_bitasset("USD", account_id_type()); + create_account("dan"); + create_account("bob"); + asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; + generate_block(); + fc::usleep(fc::milliseconds(100)); + GRAPHENE_CHECK_THROW(orders_api.get_grouped_limit_orders(std::string( static_cast(asset_id_type())), std::string( static_cast(asset_id_type())),10, start,260), fc::exception); + vector< limit_order_group > orders =orders_api.get_grouped_limit_orders(std::string( static_cast(asset_id_type())), std::string( static_cast(bit_jmj_id)), 10,start,240); + BOOST_REQUIRE_EQUAL( orders.size(), 0u); + }catch (fc::exception &e) + { + edump((e.to_detail_string())); + throw; + } +} +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp new file mode 100644 index 0000000000..53e426ca6c --- /dev/null +++ b/tests/tests/history_api_tests.cpp @@ -0,0 +1,768 @@ +/* + * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; +using namespace graphene::app; +BOOST_FIXTURE_TEST_SUITE( history_api_tests, database_fixture ) + +BOOST_AUTO_TEST_CASE(get_account_history) { + try { + graphene::app::history_api hist_api(app); + + //account_id_type() do 3 ops + create_bitasset("USD", account_id_type()); + create_account("dan"); + create_account("bob"); + + + generate_block(); + fc::usleep(fc::milliseconds(2000)); + + int asset_create_op_id = operation::tag::value; + int account_create_op_id = operation::tag::value; + + //account_id_type() did 3 ops and includes id0 + vector histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 100, operation_history_id_type()); + + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + BOOST_CHECK_EQUAL(histories[2].op.which(), asset_create_op_id); + + // 1 account_create op larger than id1 + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 100, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK(histories[0].id.instance() != 0); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + + // Limit 2 returns 2 result + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 2, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK(histories[1].id.instance() != 0); + BOOST_CHECK_EQUAL(histories[1].op.which(), account_create_op_id); + // bob has 1 op + histories = hist_api.get_account_history("bob", operation_history_id_type(), 100, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} +BOOST_AUTO_TEST_CASE(get_account_history_additional) { + try { + graphene::app::history_api hist_api(app); + + // A = account_id_type() with records { 5, 3, 1, 0 }, and + // B = dan with records { 6, 4, 2, 1 } + // account_id_type() and dan share operation id 1(account create) - share can be also in id 0 + + // no history at all in the chain + vector histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 4, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + create_bitasset("USD", account_id_type()); // create op 0 + generate_block(); + // what if the account only has one history entry and it is 0? + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + + const account_object& dan = create_account("dan"); // create op 1 + + create_bitasset("CNY", dan.id); // create op 2 + create_bitasset("BTC", account_id_type()); // create op 3 + create_bitasset("XMR", dan.id); // create op 4 + create_bitasset("EUR", account_id_type()); // create op 5 + create_bitasset("OIL", dan.id); // create op 6 + + generate_block(); + + // f(A, 0, 4, 9) = { 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 0, 4, 6) = { 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 0, 4, 5) = { 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 0, 4, 4) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 4, 3) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 4, 2) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 4, 1) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 4, 0) = { 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 4, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // f(A, 1, 5, 9) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 1, 5, 6) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 1, 5, 5) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 1, 5, 4) = { 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + + // f(A, 1, 5, 3) = { 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + + // f(A, 1, 5, 2) = { } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(A, 1, 5, 1) = { } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(A, 1, 5, 0) = { 5, 3 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 5, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // f(A, 0, 3, 9) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(A, 0, 3, 6) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(A, 0, 3, 5) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(A, 0, 3, 4) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 3, 3) = { 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + + // f(A, 0, 3, 2) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 3, 1) = { 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // f(A, 0, 3, 0) = { 5, 3, 1 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 3, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(B, 0, 4, 9) = { 6, 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + // f(B, 0, 4, 6) = { 6, 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + // f(B, 0, 4, 5) = { 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(B, 0, 4, 4) = { 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + + // f(B, 0, 4, 3) = { 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + + // f(B, 0, 4, 2) = { 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u); + + // f(B, 0, 4, 1) = { 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u); + + // f(B, 0, 4, 0) = { 6, 4, 2, 1 } + histories = hist_api.get_account_history("dan", operation_history_id_type(), 4, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + + // f(B, 2, 4, 9) = { 6, 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + + // f(B, 2, 4, 6) = { 6, 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(6)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + + // f(B, 2, 4, 5) = { 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(5)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + + // f(B, 2, 4, 4) = { 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(4)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + + // f(B, 2, 4, 3) = { } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(3)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(B, 2, 4, 2) = { } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(B, 2, 4, 1) = { } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(B, 2, 4, 0) = { 6, 4 } + histories = hist_api.get_account_history("dan", operation_history_id_type(2), 4, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + + // 0 limits + histories = hist_api.get_account_history("dan", operation_history_id_type(0), 0, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(3), 0, operation_history_id_type(9)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // non existent account + histories = hist_api.get_account_history("1.2.18", operation_history_id_type(0), 4, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // create a new account C = alice { 7 } + create_account("alice"); + + generate_block(); + + // f(C, 0, 4, 10) = { 7 } + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 4, operation_history_id_type(10)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u); + + // f(C, 8, 4, 10) = { } + histories = hist_api.get_account_history("alice", operation_history_id_type(8), 4, operation_history_id_type(10)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // f(A, 0, 10, 0) = { 7, 5, 3, 1, 0 } + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 5u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 5u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[4].id.instance(), 0u); + + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(track_account) { + try { + graphene::app::history_api hist_api(app); + + // account_id_type() is not tracked + + // account_id_type() creates alice(not tracked account) + create_account("alice"); + + //account_id_type() creates some ops + create_bitasset("CNY", account_id_type()); + create_bitasset("USD", account_id_type()); + + // account_id_type() creates dan(account tracked) + const account_object& dan = create_account("dan"); + auto dan_id = dan.id; + + // dan makes 1 op + create_bitasset("EUR", dan_id); + + generate_block(); + + // anything against account_id_type() should be {} + vector histories = hist_api.get_account_history("1.2.0", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 1, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // anything against alice should be {} + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("alice", operation_history_id_type(1), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("alice", operation_history_id_type(1), 1, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // dan should have history + histories = hist_api.get_account_history("dan", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u); + + // create more ops, starting with an untracked account + create_bitasset( "BTC", account_id_type() ); + create_bitasset( "GBP", dan_id ); + + generate_block(); + + histories = hist_api.get_account_history("dan", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 3u); + + db.pop_block(); + + // Try again, should result in same object IDs + create_bitasset( "BTC", account_id_type() ); + create_bitasset( "GBP", dan_id ); + + generate_block(); + + histories = hist_api.get_account_history("dan", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 3u); + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} +BOOST_AUTO_TEST_CASE(track_account2) { + try { + graphene::app::history_api hist_api(app); + + // account_id_type() is tracked + + // account_id_type() creates alice(tracked account) + const account_object& alice = create_account("alice"); + auto alice_id = alice.id; + + //account_id_type() creates some ops + create_bitasset("CNY", account_id_type()); + create_bitasset("USD", account_id_type()); + + // alice makes 1 op + create_bitasset("EUR", alice_id); + + // account_id_type() creates dan(account not tracked) + create_account("dan"); + + generate_block(); + + // all account_id_type() should have 4 ops {4,2,1,0} + vector histories = hist_api.get_account_history("committee-account", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 4u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u); + BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u); + + // all alice account should have 2 ops {3, 0} + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u); + + // alice first op should be {0} + histories = hist_api.get_account_history("alice", operation_history_id_type(0), 1, operation_history_id_type(1)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + + // alice second op should be {3} + histories = hist_api.get_account_history("alice", operation_history_id_type(1), 1, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u); + + // anything against dan should be {} + histories = hist_api.get_account_history("dan", operation_history_id_type(0), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("dan", operation_history_id_type(1), 10, operation_history_id_type(0)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + histories = hist_api.get_account_history("dan", operation_history_id_type(1), 1, operation_history_id_type(2)); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(get_account_history_operations) { + try { + graphene::app::history_api hist_api(app); + + //account_id_type() do 3 ops + create_bitasset("CNY", account_id_type()); + create_account("sam"); + create_account("alice"); + + generate_block(); + fc::usleep(fc::milliseconds(2000)); + + int asset_create_op_id = operation::tag::value; + int account_create_op_id = operation::tag::value; + + //account_id_type() did 1 asset_create op + vector histories = hist_api.get_account_history_operations( + "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(), 100); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + BOOST_CHECK_EQUAL(histories[0].op.which(), asset_create_op_id); + + //account_id_type() did 2 account_create ops + histories = hist_api.get_account_history_operations( + "committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 100); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + // No asset_create op larger than id1 + histories = hist_api.get_account_history_operations( + "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(1), 100); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // Limit 1 returns 1 result + histories = hist_api.get_account_history_operations( + "committee-account", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 1); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + // alice has 1 op + histories = hist_api.get_account_history_operations( + "alice", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 100); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + // create a bunch of accounts + for(int i = 0; i < 80; ++i) + { + std::string acct_name = "mytempacct" + std::to_string(i); + create_account(acct_name); + } + generate_block(); + + // history is set to limit transactions to 75 (see database_fixture.hpp) + // so asking for more should only return 75 (and not throw exception, + // see https://github.com/bitshares/bitshares-core/issues/1490 + histories = hist_api.get_account_history_operations( + "committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 100); + BOOST_CHECK_EQUAL(histories.size(), 75u); + if (histories.size() > 0) + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} +//new test case for increasing the limit based on the config file +BOOST_AUTO_TEST_CASE(api_limit_get_account_history_operations) { + try { + graphene::app::history_api hist_api(app); + //account_id_type() do 3 ops + create_bitasset("CNY", account_id_type()); + create_account("sam"); + create_account("alice"); + + generate_block(); + fc::usleep(fc::milliseconds(100)); + + int asset_create_op_id = operation::tag::value; + int account_create_op_id = operation::tag::value; + + //account_id_type() did 1 asset_create op + vector histories = hist_api.get_account_history_operations( + "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u); + BOOST_CHECK_EQUAL(histories[0].op.which(), asset_create_op_id); + + //account_id_type() did 2 account_create ops + histories = hist_api.get_account_history_operations( + "committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + // No asset_create op larger than id1 + histories = hist_api.get_account_history_operations( + "committee-account", asset_create_op_id, operation_history_id_type(), operation_history_id_type(1), 200); + BOOST_CHECK_EQUAL(histories.size(), 0u); + + // Limit 1 returns 1 result + histories = hist_api.get_account_history_operations( + "committee-account", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 1); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + // alice has 1 op + histories = hist_api.get_account_history_operations( + "alice", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 200); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + // create a bunch of accounts + for(int i = 0; i < 126; ++i) + { + std::string acct_name = "mytempacct" + std::to_string(i); + create_account(acct_name); + } + generate_block(); + + // history is set to limit transactions to 125 (see database_fixture.hpp) + // so asking for more should only return 125 (and not throw exception, + // see https://github.com/bitshares/bitshares-core/issues/1490 + GRAPHENE_CHECK_THROW(hist_api.get_account_history_operations("commitee-account", account_create_op_id, operation_history_id_type(),operation_history_id_type(), 301), fc::exception); + histories = hist_api.get_account_history_operations("committee-account", account_create_op_id, operation_history_id_type(), operation_history_id_type(), 200); + BOOST_REQUIRE_EQUAL( histories.size(), 125u ); + } + catch (fc::exception &e) + { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(api_limit_get_account_history) { + try{ + graphene::app::history_api hist_api(app); + //account_id_type() do 3 ops + create_bitasset("USD", account_id_type()); + create_account("dan"); + create_account("bob"); + + generate_block(); + fc::usleep(fc::milliseconds(100)); + + int asset_create_op_id = operation::tag::value; + int account_create_op_id = operation::tag::value; + //account_id_type() did 3 ops and includes id0 + vector histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 210, operation_history_id_type()); + + BOOST_CHECK_EQUAL(histories.size(), 3u); + BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u); + BOOST_CHECK_EQUAL(histories[2].op.which(), asset_create_op_id); + + // 1 account_create op larger than id1 + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(1), 210, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK(histories[0].id.instance() != 0u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + + // Limit 2 returns 2 result + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 2, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 2u); + BOOST_CHECK(histories[1].id.instance() != 0u); + BOOST_CHECK_EQUAL(histories[1].op.which(), account_create_op_id); + // bob has 1 op + histories = hist_api.get_account_history("bob", operation_history_id_type(), 210, operation_history_id_type()); + BOOST_CHECK_EQUAL(histories.size(), 1u); + BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id); + + // create a bunch of accounts + for(int i = 0; i < 126; ++i) + { + std::string acct_name = "mytempacct" + std::to_string(i); + create_account(acct_name); + } + generate_block(); + + GRAPHENE_CHECK_THROW(hist_api.get_account_history("1.2.0", operation_history_id_type(), 260, operation_history_id_type()), fc::exception); + histories = hist_api.get_account_history("1.2.0", operation_history_id_type(), 210, operation_history_id_type()); + BOOST_REQUIRE_EQUAL( histories.size(), 125u ); + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} +BOOST_AUTO_TEST_CASE(api_limit_get_relative_account_history) { + try{ + graphene::app::history_api hist_api(app); + //account_id_type() do 3 ops + create_bitasset("USD", account_id_type()); + create_account("dan"); + create_account("bob"); + + generate_block(); + fc::usleep(fc::milliseconds(100)); + + GRAPHENE_CHECK_THROW(hist_api.get_relative_account_history("1.2.0", 126, 260, 0), fc::exception); + vector histories = hist_api.get_relative_account_history("1.2.0", 126, 210, 0); + BOOST_REQUIRE_EQUAL( histories.size(), 0u ); + + } catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE(api_limit_get_account_history_by_operations) { + try { + graphene::app::history_api hist_api(app); + vector operation_types; + //account_id_type() do 3 ops + create_bitasset("USD", account_id_type()); + create_account("dan"); + create_account("bob"); + generate_block(); + fc::usleep(fc::milliseconds(100)); + GRAPHENE_CHECK_THROW(hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 260), fc::exception); + history_operation_detail histories = hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 210); + BOOST_REQUIRE_EQUAL( histories.total_count, 3u ); + } + catch (fc::exception &e) { + edump((e.to_detail_string())); + throw; + } +} + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/market_rounding_tests.cpp b/tests/tests/market_rounding_tests.cpp new file mode 100644 index 0000000000..b71e1cd0ff --- /dev/null +++ b/tests/tests/market_rounding_tests.cpp @@ -0,0 +1,1508 @@ +/* + * Copyright (c) 2018 Abit More, and other contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE(market_rounding_tests, database_fixture) + +/** + * Create an order such that when the trade executes at the + * requested price the resulting payout to one party is 0 + * ( Reproduces https://github.com/bitshares/bitshares-core/issues/184 ) + */ +BOOST_AUTO_TEST_CASE( trade_amount_equals_zero ) +{ + try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + set_expiration( db, trx ); + + const asset_object& test = create_user_issued_asset( "UIATEST" ); + const asset_id_type test_id = test.id; + const asset_object& core = get_asset( GRAPHENE_SYMBOL ); + const asset_id_type core_id = core.id; + const account_object& core_seller = create_account( "seller1" ); + const account_object& core_buyer = create_account("buyer1"); + + transfer( committee_account(db), core_seller, asset( 100000000 ) ); + + issue_uia( core_buyer, asset( 10000000, test_id ) ); + + BOOST_CHECK_EQUAL(get_balance(core_buyer, core), 0); + BOOST_CHECK_EQUAL(get_balance(core_buyer, test), 10000000); + BOOST_CHECK_EQUAL(get_balance(core_seller, test), 0); + BOOST_CHECK_EQUAL(get_balance(core_seller, core), 100000000); + + create_sell_order(core_seller, core.amount(1), test.amount(2)); + create_sell_order(core_seller, core.amount(1), test.amount(2)); + create_sell_order(core_buyer, test.amount(3), core.amount(1)); + + BOOST_CHECK_EQUAL(get_balance(core_buyer, core), 1); + BOOST_CHECK_EQUAL(get_balance(core_buyer, test), 9999997); + BOOST_CHECK_EQUAL(get_balance(core_seller, core), 99999998); + BOOST_CHECK_EQUAL(get_balance(core_seller, test), 3); + + generate_block(); + fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + + auto result = get_market_order_history(core_id, test_id); + BOOST_CHECK_EQUAL(result.size(), 4u); + BOOST_CHECK(result[0].op.pays == core_id(db).amount(0)); + BOOST_CHECK(result[0].op.receives == test_id(db).amount(1)); + BOOST_CHECK(result[1].op.pays == test_id(db).amount(1)); + BOOST_CHECK(result[1].op.receives == core_id(db).amount(0)); + BOOST_CHECK(result[2].op.pays == core_id(db).amount(1)); + BOOST_CHECK(result[2].op.receives == test_id(db).amount(2)); + BOOST_CHECK(result[3].op.pays == test_id(db).amount(2)); + BOOST_CHECK(result[3].op.receives == core_id(db).amount(1)); + } catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** + * The something-for-nothing bug should be fixed https://github.com/bitshares/bitshares-core/issues/184 + */ +BOOST_AUTO_TEST_CASE( trade_amount_equals_zero_after_hf_184 ) +{ + try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_184_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + const asset_object& test = create_user_issued_asset( "UIATEST" ); + const asset_id_type test_id = test.id; + const asset_object& core = get_asset( GRAPHENE_SYMBOL ); + const asset_id_type core_id = core.id; + const account_object& core_seller = create_account( "seller1" ); + const account_object& core_buyer = create_account("buyer1"); + + transfer( committee_account(db), core_seller, asset( 100000000 ) ); + + issue_uia( core_buyer, asset( 10000000, test_id ) ); + + BOOST_CHECK_EQUAL(get_balance(core_buyer, core), 0); + BOOST_CHECK_EQUAL(get_balance(core_buyer, test), 10000000); + BOOST_CHECK_EQUAL(get_balance(core_seller, test), 0); + BOOST_CHECK_EQUAL(get_balance(core_seller, core), 100000000); + + create_sell_order(core_seller, core.amount(1), test.amount(2)); + create_sell_order(core_seller, core.amount(1), test.amount(2)); + create_sell_order(core_buyer, test.amount(3), core.amount(1)); + + BOOST_CHECK_EQUAL(get_balance(core_buyer, core), 1); + BOOST_CHECK_EQUAL(get_balance(core_buyer, test), 9999998); + BOOST_CHECK_EQUAL(get_balance(core_seller, core), 99999998); + BOOST_CHECK_EQUAL(get_balance(core_seller, test), 2); + + generate_block(); + fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + + auto result = get_market_order_history(core_id, test_id); + BOOST_CHECK_EQUAL(result.size(), 2u); + BOOST_CHECK(result[0].op.pays == core_id(db).amount(1)); + BOOST_CHECK(result[0].op.receives == test_id(db).amount(2)); + BOOST_CHECK(result[1].op.pays == test_id(db).amount(2)); + BOOST_CHECK(result[1].op.receives == core_id(db).amount(1)); + } catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/*** + * This test case reproduces one of the scenarios described in bitshares-core issue #342: + * when matching a limit order with another limit order, a small taker order will pay more than minimum required. + */ +BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1 ) +{ + try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + set_expiration( db, trx ); + + ACTORS( (seller)(buyer) ); + + const asset_object& test = create_user_issued_asset( "UIATEST" ); + const asset_id_type test_id = test.id; + const asset_object& core = get_asset( GRAPHENE_SYMBOL ); + const asset_id_type core_id = core.id; + + transfer( committee_account(db), seller, asset( 100000000 ) ); + + issue_uia( buyer, asset( 10000000, test_id ) ); + + BOOST_CHECK_EQUAL(get_balance(buyer, core), 0); + BOOST_CHECK_EQUAL(get_balance(buyer, test), 10000000); + BOOST_CHECK_EQUAL(get_balance(seller, test), 0); + BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); + + // seller sells 3 core for 31 test, price 10.33 test per core + limit_order_id_type sell_id = create_sell_order( seller, core.amount(3), test.amount(31) )->id; + + // buyer buys 2 core with 25 test, price 12.5 test per core + // the order is filled immediately + BOOST_CHECK( !create_sell_order( buyer, test.amount(25), core.amount(2) ) ); + + BOOST_CHECK_EQUAL( sell_id(db).for_sale.value, 1 ); // 2 core sold, 1 remaining + + BOOST_CHECK_EQUAL(get_balance(seller, core), 99999997); + BOOST_CHECK_EQUAL(get_balance(seller, test), 25); // seller got 25 test + BOOST_CHECK_EQUAL(get_balance(buyer, core), 2); // buyer got 2 core + BOOST_CHECK_EQUAL(get_balance(buyer, test), 9999975); // buyer paid 25 test, + // effective price is 25/2 which is much higher than 31/3 + + generate_block(); + + // buyer buys 2 core with 25 test, price 12.5 test per core + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(25,test_id), asset(2,core_id) )->id; + + generate_block(); + + BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 15 ); // 10 test sold, 15 remaining + + BOOST_CHECK_EQUAL(get_balance(seller_id, core_id), 99999997); + BOOST_CHECK_EQUAL(get_balance(seller_id, test_id), 35); // seller got 10 more test + BOOST_CHECK_EQUAL(get_balance(buyer_id, core_id), 3); // buyer got 1 more core + BOOST_CHECK_EQUAL(get_balance(buyer_id, test_id), 9999950); + + } catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/*** + * This test case tests one of the scenarios described in bitshares-core issue #342 after hard fork: + * when matching a limit order with another limit order, + * a small taker order will only pay minimum required amount, and the rest will be returned. + */ +BOOST_AUTO_TEST_CASE( limit_limit_rounding_test1_after_hf_342 ) +{ + try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS( (seller)(buyer) ); + + const asset_object& test = create_user_issued_asset( "UIATEST" ); + const asset_id_type test_id = test.id; + const asset_object& core = get_asset( GRAPHENE_SYMBOL ); + const asset_id_type core_id = core.id; + + transfer( committee_account(db), seller, asset( 100000000 ) ); + + issue_uia( buyer, asset( 10000000, test_id ) ); + + BOOST_CHECK_EQUAL(get_balance(buyer, core), 0); + BOOST_CHECK_EQUAL(get_balance(buyer, test), 10000000); + BOOST_CHECK_EQUAL(get_balance(seller, test), 0); + BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); + + // seller sells 3 core for 31 test, price 10.33 test per core + limit_order_id_type sell_id = create_sell_order( seller, core.amount(3), test.amount(31) )->id; + + // buyer buys 2 core with 25 test, price 12.5 test per core + // the order is filled immediately + BOOST_CHECK( !create_sell_order( buyer, test.amount(25), core.amount(2) ) ); + + BOOST_CHECK_EQUAL( sell_id(db).for_sale.value, 1 ); // 2 core sold, 1 remaining + + BOOST_CHECK_EQUAL(get_balance(buyer, core), 2); // buyer got 2 core + BOOST_CHECK_EQUAL(get_balance(buyer, test), 9999979); // buyer actually paid 21 test according to price 10.33 + BOOST_CHECK_EQUAL(get_balance(seller, core), 99999997); + BOOST_CHECK_EQUAL(get_balance(seller, test), 21); // seller got 21 test + + generate_block(); + set_expiration( db, trx ); + + // buyer buys 2 core with 25 test, price 12.5 test per core + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(25,test_id), asset(2,core_id) )->id; + + generate_block(); + + BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 15 ); // 10 test sold according to price 10.33, and 15 remaining + + BOOST_CHECK_EQUAL(get_balance(buyer_id, core_id), 3); // buyer got 1 more core + BOOST_CHECK_EQUAL(get_balance(buyer_id, test_id), 9999954); + BOOST_CHECK_EQUAL(get_balance(seller_id, core_id), 99999997); + BOOST_CHECK_EQUAL(get_balance(seller_id, test_id), 31); // seller got 10 more test, in total 31 as expected + + } catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/*** + * This test case reproduces one of the scenarios described in bitshares-core issue #342: + * when matching a limit order with another limit order, a small maker order will pay more than minimum required. + */ +BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2 ) +{ + try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + set_expiration( db, trx ); + + ACTORS( (seller)(buyer) ); + + const asset_object& test = create_user_issued_asset( "UIATEST" ); + const asset_id_type test_id = test.id; + const asset_object& core = get_asset( GRAPHENE_SYMBOL ); + const asset_id_type core_id = core.id; + + transfer( committee_account(db), seller, asset( 100000000 ) ); + + issue_uia( buyer, asset( 10000000, test_id ) ); + + BOOST_CHECK_EQUAL(get_balance(buyer, core), 0); + BOOST_CHECK_EQUAL(get_balance(buyer, test), 10000000); + BOOST_CHECK_EQUAL(get_balance(seller, test), 0); + BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); + + // buyer buys 17 core with 3 test, price 3/17 = 0.176 test per core + limit_order_id_type tmp_buy_id = create_sell_order( buyer, test.amount(3), core.amount(17) )->id; + // seller sells 33 core for 5 test, price 5/33 = 0.1515 test per core + limit_order_id_type sell_id = create_sell_order( seller, core.amount(33), test.amount(5) )->id; + + BOOST_CHECK( !db.find_object( tmp_buy_id ) ); // buy order is filled + BOOST_CHECK_EQUAL( sell_id(db).for_sale.value, 16 ); // 17 core sold, 16 remaining + + BOOST_CHECK_EQUAL(get_balance(seller, core), 99999967); + BOOST_CHECK_EQUAL(get_balance(seller, test), 3); // seller got 3 test + BOOST_CHECK_EQUAL(get_balance(buyer, core), 17); // buyer got 17 core + BOOST_CHECK_EQUAL(get_balance(buyer, test), 9999997); // buyer paid 3 test + + generate_block(); + set_expiration( db, trx ); + + // buyer buys 15 core with 3 test, price 3/15 = 0.2 test per core + // even 15 < 16, since it's taker, we'll check with maker's price, then turns out the buy order is bigger + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(3,test_id), asset(15,core_id) )->id; + + generate_block(); + + BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 1 ); // 2 test sold, 1 remaining + + BOOST_CHECK_EQUAL(get_balance(seller_id, core_id), 99999967); // seller paid the 16 core which was remaining in the order + BOOST_CHECK_EQUAL(get_balance(seller_id, test_id), 5); // seller got 2 more test + // effective price 16/2 which is much higher than 33/5 + BOOST_CHECK_EQUAL(get_balance(buyer_id, core_id), 33); // buyer got 16 more core + BOOST_CHECK_EQUAL(get_balance(buyer_id, test_id), 9999994); + + } catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/*** + * This test case tests one of the scenarios described in bitshares-core issue #342 after hard fork: + * when matching a limit order with another limit order, + * a small maker order will only pay minimum required amount, and the rest will be returned. + */ +BOOST_AUTO_TEST_CASE( limit_limit_rounding_test2_after_hf_342 ) +{ + try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS( (seller)(buyer) ); + + const asset_object& test = create_user_issued_asset( "UIATEST" ); + const asset_id_type test_id = test.id; + const asset_object& core = get_asset( GRAPHENE_SYMBOL ); + const asset_id_type core_id = core.id; + + transfer( committee_account(db), seller, asset( 100000000 ) ); + + issue_uia( buyer, asset( 10000000, test_id ) ); + + BOOST_CHECK_EQUAL(get_balance(buyer, core), 0); + BOOST_CHECK_EQUAL(get_balance(buyer, test), 10000000); + BOOST_CHECK_EQUAL(get_balance(seller, test), 0); + BOOST_CHECK_EQUAL(get_balance(seller, core), 100000000); + + // buyer buys 17 core with 3 test, price 3/17 = 0.176 test per core + limit_order_id_type tmp_buy_id = create_sell_order( buyer, test.amount(3), core.amount(17) )->id; + // seller sells 33 core for 5 test, price 5/33 = 0.1515 test per core + limit_order_id_type sell_id = create_sell_order( seller, core.amount(33), test.amount(5) )->id; + + BOOST_CHECK( !db.find_object( tmp_buy_id ) ); // buy order is filled + BOOST_CHECK_EQUAL( sell_id(db).for_sale.value, 16 ); // 17 core sold, 16 remaining + + BOOST_CHECK_EQUAL(get_balance(seller, core), 99999967); + BOOST_CHECK_EQUAL(get_balance(seller, test), 3); // seller got 3 test + BOOST_CHECK_EQUAL(get_balance(buyer, core), 17); // buyer got 17 core + BOOST_CHECK_EQUAL(get_balance(buyer, test), 9999997); // buyer paid 3 test + + generate_block(); + set_expiration( db, trx ); + + // buyer buys 15 core with 3 test, price 3/15 = 0.2 test per core + // even 15 < 16, since it's taker, we'll check with maker's price, then turns out the buy order is bigger + limit_order_id_type buy_id = create_sell_order( buyer_id, asset(3,test_id), asset(15,core_id) )->id; + + generate_block(); + + BOOST_CHECK( !db.find_object( sell_id ) ); // sell order is filled + BOOST_CHECK_EQUAL( buy_id(db).for_sale.value, 1 ); // 2 test sold, 1 remaining + + BOOST_CHECK_EQUAL(get_balance(buyer_id, core_id), 31); // buyer got 14 more core according to price 0.1515 + BOOST_CHECK_EQUAL(get_balance(buyer_id, test_id), 9999994); + BOOST_CHECK_EQUAL(get_balance(seller_id, core_id), 99999967+16-14); // seller got refunded 2 core + BOOST_CHECK_EQUAL(get_balance(seller_id, test_id), 5); // seller got 2 more test, effective price 14/2 which is close to 33/5 + + } catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/*** + * Reproduces bitshares-core issue #132: something for nothing when matching a limit order with a call order. + * Also detects the cull_small issue in check_call_orders. + */ +BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1 ) +{ try { // matching a limit order with call order + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/175 CORE/USD = 62/700 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(100000), asset(15500)); + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(10)); + transfer(borrower2, seller, bitusd.amount(100000)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 10, call.debt.value ); + BOOST_CHECK_EQUAL( 1, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 200010, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at price 11 USD / 1 CORE, but call only owes 10 USD, + // so the seller will pay 10 USD but get nothing. + // The remaining 1 USD is too little to get any CORE, so the limit order will be cancelled + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(11), core.amount(1)) ); + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 200000, get_balance(seller, bitusd) ); // the seller paid 10 USD + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // the seller got nothing + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Another test case + * reproduces bitshares-core issue #132: something for nothing when matching a limit order with a call order. + * Also detects the cull_small issue in check_call_orders. + * + * In this test case, the limit order is taker. + */ +BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2 ) +{ try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(10)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 10, call.debt.value ); + BOOST_CHECK_EQUAL( 1, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100010, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at price 33 USD / 3 CORE, but call only owes 10 USD, + // so the seller will pay 10 USD but get nothing. + // The remaining USD will be left in the order on the market + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); // the seller got nothing + BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Yet another test case + * reproduces bitshares-core issue #132: something for nothing when matching a limit order with a call order. + * Also detects the cull_small issue in check_call_orders. + * + * In this test case, the limit order is maker. + */ +BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3 ) +{ try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + const asset_id_type bitusd_id = bitusd.id; + const asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(10)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 10, call.debt.value ); + BOOST_CHECK_EQUAL( 1, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100010, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + // create a limit order which will be matched later + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); + BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + generate_block(); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd_id(db).amount( 120 ) / core_id(db).amount(10); + publish_feed( bitusd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // the limit order will match with call at price 33 USD / 3 CORE, but call only owes 10 USD, + // so the seller will pay 10 USD but get nothing. + // The remaining USD will be in the order on the market + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100010-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 0, get_balance(seller_id, core_id) ); // the seller got nothing + BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, bitusd_id) ); + BOOST_CHECK_EQUAL( init_balance, get_balance(borrower_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Fixed bitshares-core issue #132: something for nothing when matching a limit order with a call order. + */ +BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test1_after_hardfork ) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_184_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/175 CORE/USD = 62/700 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(100000), asset(15500)); + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(10)); + transfer(borrower2, seller, bitusd.amount(100000)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 10, call.debt.value ); + BOOST_CHECK_EQUAL( 1, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 200010, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at price 120 USD / 11 CORE (assume hard fork core-342 and hard fork core-338 occur at same time), + // but call only owes 10 USD, + // Since the call would pay off all debt, let it pay 1 CORE from collateral + // The remaining 1 USD is too little to get any CORE, so the limit order will be cancelled + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(11), core.amount(1)) ); + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 200000, get_balance(seller, bitusd) ); // the seller paid 10 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Another test case + * for fixed bitshares-core issue #132: something for nothing when matching a limit order with a call order. + * + * In this test case, the limit order is taker. + */ +BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test2_after_hardfork ) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_184_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(10)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 10, call.debt.value ); + BOOST_CHECK_EQUAL( 1, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100010, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at price 120 USD / 11 CORE (assume hard fork core-342 and hard fork core-338 occur at same time), + // but call only owes 10 USD, + // Since the call would pay off all debt, let it pay 1 CORE from collateral + // The remaining USD will be left in the order on the market + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE + BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Yet another test case + * for fixed bitshares-core issue #132: something for nothing when matching a limit order with a call order. + * Also detects the cull_small issue in check_call_orders. + * + * In this test case, the limit order is maker. + */ +BOOST_AUTO_TEST_CASE( issue_132_limit_and_call_test3_after_hardfork ) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_184_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + const asset_id_type bitusd_id = bitusd.id; + const asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(10), asset(1)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(10)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 10, call.debt.value ); + BOOST_CHECK_EQUAL( 1, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100010, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + // create a limit order which will be matched later + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); + BOOST_CHECK_EQUAL( 100010-33, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + generate_block(); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd_id(db).amount( 120 ) / core_id(db).amount(10); + publish_feed( bitusd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // the limit order will match with call at price 33 USD / 3 CORE, but call only owes 10 USD, + // Since the call would pay off all debt, let it pay 1 CORE from collateral + // The remaining USD will be in the order on the market + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100010-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller_id, core_id) ); // the seller got 1 CORE + BOOST_CHECK_EQUAL( 33-10, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, bitusd_id) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * This test case reproduces one of the scenarios described in bitshares-core issue #342: + * when matching a big taker limit order with a small maker call order, + * rounding was in favor of the small call order. + */ +BOOST_AUTO_TEST_CASE( limit_call_rounding_test1 ) +{ try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(20)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 20, call.debt.value ); + BOOST_CHECK_EQUAL( 2, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100020, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at limit order's price 33 USD / 3 CORE, but call only owes 20 USD, + // so the seller will pay the whole 20 USD and get 1 CORE, since 20 USD doesn't worth 2 CORE according to price 33/3, + // effective price is 20/1 which is worse than the limit order's desired 33/3. + // The remaining USD will be left in the order on the market + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE + BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * This test case tests one of the scenarios described in bitshares-core issue #342 after hard fork: + * when matching a big taker limit order with a small maker call order, + * rounding in favor of the big limit order. + */ +BOOST_AUTO_TEST_CASE( limit_call_rounding_test1_after_hf_342 ) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(20)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 20, call.debt.value ); + BOOST_CHECK_EQUAL( 2, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100020, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at price 120 USD / 11 CORE (assume hard fork core-342 and hard fork core-338 occur at same time), + // but call only owes 20 USD, + // so the seller will pay 20 USD and get 2 CORE, since 20 USD worths a little more than 1 CORE according to price 120/11, + // effective price is 20/2 which is not worse than the limit order's desired 33/3. + // The remaining USD will be left in the order on the market + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 2, get_balance(seller, core) ); // the seller got 2 CORE + BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Due to #338, when matching a smaller taker limit order with a big maker call order, + * the small order will be filled at its own price. + * So unable or no need to reproduce one of the scenarios described in bitshares-core issue #342: + * when matching a small taker limit order with a big maker call order, + * the small limit order would be paying too much. + * But we'll just write the test case for #338 here. + */ +BOOST_AUTO_TEST_CASE( limit_call_rounding_test2 ) +{ try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(20)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 20, call.debt.value ); + BOOST_CHECK_EQUAL( 2, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100020, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at limit order's price 15 USD / 1 CORE, + // so the seller will pay 15 USD and get 1 CORE, + // effective price is 15/1. + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(15), core.amount(1)) ); // the sell order is filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK_EQUAL( 20-15, call.debt.value ); // call paid 15 USD + BOOST_CHECK_EQUAL( 2-1, call.collateral.value ); // call got 1 CORE + BOOST_CHECK_EQUAL( 100020-15, get_balance(seller, bitusd) ); // the seller paid 15 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + + +/*** + * This test case tests one of the scenarios described in bitshares-core issue #342 after hard fork: + * when matching a small taker limit order with a big maker call order, + * the small limit order would be paying minimum required. + */ +BOOST_AUTO_TEST_CASE( limit_call_rounding_test2_after_hf_342 ) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(20)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 20, call.debt.value ); + BOOST_CHECK_EQUAL( 2, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100020, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 120 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // This would match with call at price 120 USD / 11 CORE (assume hard fork core-342 and hard fork core-338 occur at same time), + // so the seller will get 1 CORE, and pay 11 USD since 1 CORE worths a little more than 10 USD according to price 120/11, + // and the extra 4 USD will be returned but not overpaid, + // effective price is 11/1 which is close to 120/11. + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(15), core.amount(1)) ); // the sell order is filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK_EQUAL( 20-11, call.debt.value ); // call paid 11 USD + BOOST_CHECK_EQUAL( 2-1, call.collateral.value ); // call got 1 CORE + BOOST_CHECK_EQUAL( 100020-11, get_balance(seller, bitusd) ); // the seller paid 11 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // the seller got 1 CORE + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * This test case reproduces one of the scenarios described in bitshares-core issue #342: + * when matching a small taker call order with a big maker limit order, + * rounding was in favor of the small call order. + */ +BOOST_AUTO_TEST_CASE( call_limit_rounding_test1 ) +{ try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + const asset_id_type bitusd_id = bitusd.id; + const asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(20)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 20, call.debt.value ); + BOOST_CHECK_EQUAL( 2, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100020, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + // create a limit order which will be matched later + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); + BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + generate_block(); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd_id(db).amount( 120 ) / core_id(db).amount(10); + publish_feed( bitusd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // The limit would match with call at limit order's price 33 USD / 3 CORE, but call only owes 20 USD, + // so the seller will pay the whole 20 USD and get 1 CORE, since 20 USD doesn't worth 2 CORE according to price 33/3, + // effective price is 20/1 which is worse than the limit order's desired 33/3. + // The remaining USD will be left in the order on the market + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100020-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller_id, core_id) ); // the seller got 1 CORE + BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, bitusd_id) ); + BOOST_CHECK_EQUAL( init_balance-1, get_balance(borrower_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * This test case tests one of the scenarios described in bitshares-core issue #342 after hard fork: + * when matching a small taker call order with a big maker limit order, + * rounding in favor of the big limit order. + */ +BOOST_AUTO_TEST_CASE( call_limit_rounding_test1_after_hf_342 ) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + const asset_id_type bitusd_id = bitusd.id; + const asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(20), asset(2)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(20)); + transfer(borrower3, seller, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 20, call.debt.value ); + BOOST_CHECK_EQUAL( 2, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 100020, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower, core) ); + + // create a limit order which will be matched later + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(33), core.amount(3))->id; + BOOST_CHECK_EQUAL( 33, sell_id(db).for_sale.value ); + BOOST_CHECK_EQUAL( 100020-33, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + generate_block(); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd_id(db).amount( 120 ) / core_id(db).amount(10); + publish_feed( bitusd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // The limit would match with call at limit order's price 33 USD / 3 CORE, but call only owes 20 USD, + // so the seller will pay 20 USD and get 2 CORE, since 20 USD worths a little more than 1 CORE according to price 33/3, + // effective price is 20/2 which is not worse than the limit order's desired 33/3. + // The remaining USD will be left in the order on the market + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK_EQUAL( 100020-33, get_balance(seller_id, bitusd_id) ); // the seller paid 33 USD + BOOST_CHECK_EQUAL( 2, get_balance(seller_id, core_id) ); // the seller got 2 CORE + BOOST_CHECK_EQUAL( 33-20, sell_id(db).for_sale.value ); // the sell order has some USD left + BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, bitusd_id) ); + BOOST_CHECK_EQUAL( init_balance-2, get_balance(borrower_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * This test case reproduces one of the scenarios described in bitshares-core issue #342: + * when matching a big taker call order with a small maker limit order, + * the small limit order would be paying too much. + */ +BOOST_AUTO_TEST_CASE( call_limit_rounding_test2 ) +{ try { + generate_blocks( HARDFORK_555_TIME ); + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(seller2)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + const asset_id_type bitusd_id = bitusd.id; + const asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(50), asset(5)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(50)); + transfer(borrower3, seller2, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 50, call.debt.value ); + BOOST_CHECK_EQUAL( 5, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 50, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 100000, get_balance(seller2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-5, get_balance(borrower, core) ); + + // create a buy order which will be matched + limit_order_id_type buy_id = create_sell_order(buyer, core.amount(1), bitusd.amount(10))->id; + BOOST_CHECK_EQUAL( 1, buy_id(db).for_sale.value ); + BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); + + // create a limit order to fill the buy order, and remaining amounts will be matched later + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(31), core.amount(2))->id; + BOOST_CHECK( !db.find( buy_id ) ); // the buy order is filled + BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); + BOOST_CHECK_EQUAL( 10, get_balance(buyer, bitusd) ); // buyer got 10 usd + BOOST_CHECK_EQUAL( 21, sell_id(db).for_sale.value ); // remaining amount of sell order is 21 + BOOST_CHECK_EQUAL( 50-31, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // seller got 1 core + + // create another limit order which will be matched later + limit_order_id_type sell_id2 = create_sell_order(seller2, bitusd.amount(14), core.amount(1))->id; + BOOST_CHECK_EQUAL( 14, sell_id2(db).for_sale.value ); + BOOST_CHECK_EQUAL( 100000-14, get_balance(seller2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller2, core) ); + + generate_block(); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd_id(db).amount( 120 ) / core_id(db).amount(10); + publish_feed( bitusd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // call will match with the limit orders at limit orders' prices, + // firstly, call will match with sell_id, which has 21 USD remaining, with price 31 USD / 2 CORE, + // so the seller will pay 21 USD, get 1 CORE since 21 USD doesn't worth 2 CORE according to price 31/2, + // effective price is 21/1 which is much bigger than 31/2; + // then, call will match with sell_id2, which has 14 USD remaining, with price 14 USD / 1 CORE, + // so the seller will pay 14 USD, get 1 CORE since 14 USD worths just 1 CORE according to price 14/1 + BOOST_CHECK( !db.find( sell_id ) ); // the sell order is filled + BOOST_CHECK( !db.find( sell_id2 ) ); // the other sell order is filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK_EQUAL( 50-14-21, call_id(db).debt.value ); // call paid 14 USD and 21 USD + BOOST_CHECK_EQUAL( 5-1-1, call_id(db).collateral.value ); // call got 1 CORE and 1 CORE + BOOST_CHECK_EQUAL( 50-31, get_balance(seller_id, bitusd_id) ); // seller paid 31 USD in total + BOOST_CHECK_EQUAL( 1+1, get_balance(seller_id, core_id) ); // seller got 1 more CORE + BOOST_CHECK_EQUAL( 100000-14, get_balance(seller2_id, bitusd_id) ); // seller2 paid 14 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller2_id, core_id) ); // seller2 got 1 CORE + BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, bitusd_id) ); + BOOST_CHECK_EQUAL( init_balance-5, get_balance(borrower_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * This test case tests one of the scenarios described in bitshares-core issue #342 after hard fork: + * when matching a big taker call order with a small maker limit order, + * the small limit order would be paying minimum required. + */ +BOOST_AUTO_TEST_CASE( call_limit_rounding_test2_after_hf_342 ) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(seller2)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + const asset_id_type bitusd_id = bitusd.id; + const asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount( 5 ); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 200% collateral, call price is 10/175 CORE/USD = 40/700 + const call_order_object& call = *borrow( borrower, bitusd.amount(50), asset(5)); + call_order_id_type call_id = call.id; + // create yet another position with 350% collateral, call price is 17.5/175 CORE/USD = 77/700 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(100000), asset(17500)); + transfer(borrower, seller, bitusd.amount(50)); + transfer(borrower3, seller2, bitusd.amount(100000)); + + BOOST_CHECK_EQUAL( 50, call.debt.value ); + BOOST_CHECK_EQUAL( 5, call.collateral.value ); + BOOST_CHECK_EQUAL( 100000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + + BOOST_CHECK_EQUAL( 50, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 100000, get_balance(seller2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( init_balance-5, get_balance(borrower, core) ); + + // create a buy order which will be matched + limit_order_id_type buy_id = create_sell_order(buyer, core.amount(1), bitusd.amount(10))->id; + BOOST_CHECK_EQUAL( 1, buy_id(db).for_sale.value ); + BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); + + // create a limit order to fill the buy order, and remaining amounts will be matched later + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(31), core.amount(2))->id; + BOOST_CHECK( !db.find( buy_id ) ); // the buy order is filled + BOOST_CHECK_EQUAL( 1000000-1, get_balance(buyer, core) ); + BOOST_CHECK_EQUAL( 10, get_balance(buyer, bitusd) ); // buyer got 10 usd + BOOST_CHECK_EQUAL( 21, sell_id(db).for_sale.value ); // remaining amount of sell order is 21 + BOOST_CHECK_EQUAL( 50-31, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 1, get_balance(seller, core) ); // seller got 1 core + + // create another limit order which will be matched later + limit_order_id_type sell_id2 = create_sell_order(seller2, bitusd.amount(14), core.amount(1))->id; + BOOST_CHECK_EQUAL( 14, sell_id2(db).for_sale.value ); + BOOST_CHECK_EQUAL( 100000-14, get_balance(seller2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller2, core) ); + + generate_block(); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd_id(db).amount( 120 ) / core_id(db).amount(10); + publish_feed( bitusd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 120 USD / 10 CORE, mssp = 120/11 USD/CORE + + // call will match with the limit orders at limit orders' prices, + // firstly, call will match with sell_id, which has 21 USD remaining, with price 31 USD / 2 CORE, + // so the seller will get 1 CORE since 21 USD doesn't work 2 CORE according to price 31/2, + // and the seller will pay 16 USD since 1 CORE worths a little more than 15 USD according to price 31/2, + // and the extra 5 USD will be returned to seller since it doesn't worth 1 CORE, + // effective price is 16/1 which is close to 31/2; + // secondly, call will match with sell_id2, which has 14 USD remaining, with price 14 USD / 1 CORE, + // so the seller will get 1 CORE and pay 14 USD since 14 USD just worths 1 CORE according to price 14/1 + BOOST_CHECK( !db.find( sell_id ) ); // the sell order is filled + BOOST_CHECK( !db.find( sell_id2 ) ); // the other sell order is filled + BOOST_CHECK( db.find( call_id ) != nullptr ); // the first call order did not get filled + BOOST_CHECK_EQUAL( 50-14-16, call_id(db).debt.value ); // call paid 14 USD and 16 USD + BOOST_CHECK_EQUAL( 5-1-1, call_id(db).collateral.value ); // call got 1 CORE and 1 CORE + BOOST_CHECK_EQUAL( 50-31+(21-16), get_balance(seller_id, bitusd_id) ); // seller paid 31 USD then get refunded 5 USD + BOOST_CHECK_EQUAL( 1+1, get_balance(seller_id, core_id) ); // seller got 1 more CORE + BOOST_CHECK_EQUAL( 100000-14, get_balance(seller2_id, bitusd_id) ); // seller2 paid 14 USD + BOOST_CHECK_EQUAL( 1, get_balance(seller2_id, core_id) ); // seller2 got 1 CORE + BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, bitusd_id) ); + BOOST_CHECK_EQUAL( init_balance-5, get_balance(borrower_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/market_tests.cpp b/tests/tests/market_tests.cpp new file mode 100644 index 0000000000..1f29f0c843 --- /dev/null +++ b/tests/tests/market_tests.cpp @@ -0,0 +1,1510 @@ +/* + * Copyright (c) 2017 Peter Conrad, and other contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE(market_tests, database_fixture) + +/*** + * Reproduce bitshares-core issue #338 #343 #453 #606 #625 #649 + */ +BOOST_AUTO_TEST_CASE(issue_338_etc) +{ try { + generate_blocks(HARDFORK_615_TIME); // get around Graphene issue #615 feed expiration bug + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + asset_id_type usd_id = bitusd.id; + asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call2_id = call2.id; + // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); + call_order_id_type call3_id = call3.id; + transfer(borrower, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This order slightly below the call price will not be matched #606 + limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(7), core.amount(59))->id; + // This order above the MSSP will not be matched + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(60))->id; + + cancel_limit_order( sell_med(db) ); + cancel_limit_order( sell_high(db) ); + cancel_limit_order( sell_low(db) ); + + // current implementation: an incoming limit order will be filled at the + // requested price #338 + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(60)) ); + BOOST_CHECK_EQUAL( 993, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 60, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 993, call.debt.value ); + BOOST_CHECK_EQUAL( 14940, call.collateral.value ); + + limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->id; + // margin call takes precedence + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(60)) ); + BOOST_CHECK_EQUAL( 986, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 120, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 986, call.debt.value ); + BOOST_CHECK_EQUAL( 14880, call.collateral.value ); + + limit_order_id_type buy_med = create_sell_order(buyer, asset(105), bitusd.amount(10))->id; + // margin call takes precedence + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(70)) ); + BOOST_CHECK_EQUAL( 979, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 190, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 979, call.debt.value ); + BOOST_CHECK_EQUAL( 14810, call.collateral.value ); + + limit_order_id_type buy_high = create_sell_order(buyer, asset(115), bitusd.amount(10))->id; + // margin call still has precedence (!) #625 + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(77)) ); + BOOST_CHECK_EQUAL( 972, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 267, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 972, call.debt.value ); + BOOST_CHECK_EQUAL( 14733, call.collateral.value ); + + cancel_limit_order( buy_high(db) ); + cancel_limit_order( buy_med(db) ); + cancel_limit_order( buy_low(db) ); + + // call with more usd + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(7700)) ); + BOOST_CHECK_EQUAL( 272, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 7967, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 272, call.debt.value ); + BOOST_CHECK_EQUAL( 7033, call.collateral.value ); + + // at this moment, collateralization of call is 7033 / 272 = 25.8 + // collateralization of call2 is 15500 / 1000 = 15.5 + // collateralization of call3 is 16000 / 1000 = 16 + + // call more, still matches with the first call order #343 + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(10), core.amount(110)) ); + BOOST_CHECK_EQUAL( 262, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 8077, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 262, call.debt.value ); + BOOST_CHECK_EQUAL( 6923, call.collateral.value ); + + // at this moment, collateralization of call is 6923 / 262 = 26.4 + // collateralization of call2 is 15500 / 1000 = 15.5 + // collateralization of call3 is 16000 / 1000 = 16 + + // force settle + force_settle( seller, bitusd.amount(10) ); + BOOST_CHECK_EQUAL( 252, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 8077, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 262, call.debt.value ); + BOOST_CHECK_EQUAL( 6923, call.collateral.value ); + + // generate blocks to let the settle order execute (price feed will expire after it) + generate_blocks( HARDFORK_615_TIME + fc::hours(25) ); + // call2 get settled #343 + BOOST_CHECK_EQUAL( 252, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 8177, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 262, call_id(db).debt.value ); + BOOST_CHECK_EQUAL( 6923, call_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 990, call2_id(db).debt.value ); + BOOST_CHECK_EQUAL( 15400, call2_id(db).collateral.value ); + + set_expiration( db, trx ); + update_feed_producers( usd_id(db), {feedproducer_id} ); + + // at this moment, collateralization of call is 8177 / 252 = 32.4 + // collateralization of call2 is 15400 / 990 = 15.5 + // collateralization of call3 is 16000 / 1000 = 16 + + // adjust price feed to get call2 into black swan territory, but not the first call order + current_feed.settlement_price = asset(1, usd_id) / asset(20, core_id); + publish_feed( usd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 1/20, mssp = 1/22 + + // black swan event doesn't occur #649 + BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() ); + + // generate a block + generate_block(); + + set_expiration( db, trx ); + update_feed_producers( usd_id(db), {feedproducer_id} ); + + // adjust price feed back + current_feed.settlement_price = asset(1, usd_id) / asset(10, core_id); + publish_feed( usd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 1/10, mssp = 1/11 + + transfer(borrower2_id, seller_id, asset(1000, usd_id)); + transfer(borrower3_id, seller_id, asset(1000, usd_id)); + + // Re-create sell_low, slightly below the call price, will not be matched, will expire soon + sell_low = create_sell_order(seller_id(db), asset(7, usd_id), asset(59), db.head_block_time()+fc::seconds(300) )->id; + // This would match but is blocked by sell_low, it has an amount same as call's debt which will be full filled later + sell_med = create_sell_order(seller_id(db), asset(262, usd_id), asset(2620))->id; // 1/10 + // Another big order above sell_med, blocked + limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->id; // 1/10.1 + // Another small order above sell_med2, blocked + limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->id; // 1/10.2 + + // generate a block, sell_low will expire + BOOST_TEST_MESSAGE( "Expire sell_low" ); + generate_blocks( HARDFORK_615_TIME + fc::hours(26) ); + BOOST_CHECK( db.find( sell_low ) == nullptr ); + + // #453 multiple order matching issue occurs + BOOST_CHECK( db.find( sell_med ) == nullptr ); // sell_med get filled + BOOST_CHECK( db.find( sell_med2 ) != nullptr ); // sell_med2 is still there + BOOST_CHECK( db.find( sell_med3 ) == nullptr ); // sell_med3 get filled + BOOST_CHECK( db.find( call_id ) == nullptr ); // the first call order get filled + BOOST_CHECK( db.find( call2_id ) == nullptr ); // the second call order get filled + BOOST_CHECK( db.find( call3_id ) != nullptr ); // the third call order is still there + + +} FC_LOG_AND_RETHROW() } + +/*** + * Fixed bitshares-core issue #338 #343 #606 #625 #649 + */ +BOOST_AUTO_TEST_CASE(hardfork_core_338_test) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_343_TIME - mi); // assume all hard forks occur at same time + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + asset_id_type usd_id = bitusd.id; + asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call2_id = call2.id; + // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); + call_order_id_type call3_id = call3.id; + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This sell order above MSSP will not be matched with a call + create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + + BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // This buy order is too low will not be matched with a sell order + limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->id; + // This buy order at MSSP will be matched only if no margin call (margin call takes precedence) + limit_order_id_type buy_med = create_sell_order(buyer, asset(110), bitusd.amount(10))->id; + // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence) + limit_order_id_type buy_high = create_sell_order(buyer, asset(111), bitusd.amount(10))->id; + + BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 90 - 110 - 111, get_balance(buyer, core) ); + + // This order slightly below the call price will be matched: #606 fixed + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(5900) ) ); + + // firstly it will match with buy_high, at buy_high's price: #625 fixed + BOOST_CHECK( !db.find( buy_high ) ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 ); + + // buy_high pays 111 CORE, receives 10 USD goes to buyer's balance + BOOST_CHECK_EQUAL( 10, get_balance(buyer, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 90 - 110 - 111, get_balance(buyer, core) ); + // sell order pays 10 USD, receives 111 CORE, remaining 690 USD for sale, still at price 7/59 + + // then it will match with call, at mssp: 1/11 = 690/7590 : #338 fixed + BOOST_CHECK_EQUAL( 2293, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 7701, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 310, call.debt.value ); + BOOST_CHECK_EQUAL( 7410, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call3.collateral.value ); + + // call's call_price will be updated after the match, to 741/31/1.75 CORE/USD = 2964/217 + // it's above settlement price (10/1) so won't be margin called again + BOOST_CHECK( price(asset(2964),asset(217,usd_id)) == call.call_price ); + + // This would match with call before, but would match with call2 after #343 fixed + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(6000) ) ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 ); + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 ); + + // fill price would be mssp: 1/11 = 700/7700 : #338 fixed + BOOST_CHECK_EQUAL( 1593, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 15401, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 310, call.debt.value ); + BOOST_CHECK_EQUAL( 7410, call.collateral.value ); + BOOST_CHECK_EQUAL( 300, call2.debt.value ); + BOOST_CHECK_EQUAL( 7800, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call3.collateral.value ); + // call2's call_price will be updated after the match, to 78/3/1.75 CORE/USD = 312/21 + BOOST_CHECK( price(asset(312),asset(21,usd_id)) == call2.call_price ); + // it's above settlement price (10/1) so won't be margin called + + // at this moment, collateralization of call is 7410 / 310 = 23.9 + // collateralization of call2 is 7800 / 300 = 26 + // collateralization of call3 is 16000 / 1000 = 16 + + // force settle + force_settle( seller, bitusd.amount(10) ); + + BOOST_CHECK_EQUAL( 1583, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 15401, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 310, call.debt.value ); + BOOST_CHECK_EQUAL( 7410, call.collateral.value ); + BOOST_CHECK_EQUAL( 300, call2.debt.value ); + BOOST_CHECK_EQUAL( 7800, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call3.collateral.value ); + + // generate blocks to let the settle order execute (price feed will expire after it) + generate_block(); + generate_blocks( db.head_block_time() + fc::hours(24) ); + + // call3 get settled, at settlement price 1/10: #343 fixed + BOOST_CHECK_EQUAL( 1583, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 15501, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 310, call_id(db).debt.value ); + BOOST_CHECK_EQUAL( 7410, call_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 300, call2_id(db).debt.value ); + BOOST_CHECK_EQUAL( 7800, call2_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 990, call3_id(db).debt.value ); + BOOST_CHECK_EQUAL( 15900, call3_id(db).collateral.value ); + + set_expiration( db, trx ); + update_feed_producers( usd_id(db), {feedproducer_id} ); + + // at this moment, collateralization of call is 7410 / 310 = 23.9 + // collateralization of call2 is 7800 / 300 = 26 + // collateralization of call3 is 15900 / 990 = 16.06 + + // adjust price feed to get call3 into black swan territory, but not the other call orders + // Note: after hard fork, black swan should occur when callateralization < mssp, but not at < feed + current_feed.settlement_price = asset(1, usd_id) / asset(16, core_id); + publish_feed( usd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 1/16, mssp = 10/176 + + // black swan event will occur: #649 fixed + BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); + // short positions will be closed + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call3_id ) ); + + // generate a block + generate_block(); + + +} FC_LOG_AND_RETHROW() } + +/*** + * Fixed bitshares-core issue #453: multiple limit order filling issue + */ +BOOST_AUTO_TEST_CASE(hardfork_core_453_test) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_453_TIME - mi); // assume all hard forks occur at same time + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + asset_id_type usd_id = bitusd.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call2_id = call2.id; + // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); + call_order_id_type call3_id = call3.id; + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // no margin call so far + + // This order would match call when it's margin called, it has an amount same as call's debt which will be full filled later + limit_order_id_type sell_med = create_sell_order(seller_id(db), asset(1000, usd_id), asset(10000))->id; // 1/10 + // Another big order above sell_med, amount bigger than call2's debt + limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->id; // 1/10.1 + // Another small order above sell_med2 + limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->id; // 1/10.2 + + // adjust price feed to get the call orders into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // Fixed #453 multiple order matching issue occurs + BOOST_CHECK( !db.find( sell_med ) ); // sell_med get filled + BOOST_CHECK( !db.find( sell_med2 ) ); // sell_med2 get filled + BOOST_CHECK( !db.find( sell_med3 ) ); // sell_med3 get filled + BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled + BOOST_CHECK( !db.find( call2_id ) ); // the second call order get filled + BOOST_CHECK( db.find( call3_id ) ); // the third call order is still there + + // generate a block + generate_block(); + + +} FC_LOG_AND_RETHROW() } + +/*** + * Tests (big) limit order matching logic after #625 got fixed + */ +BOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_625_TIME - mi); // assume all hard forks occur at same time + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(buyer2)(buyer3)(seller)(borrower)(borrower2)(borrower3)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, buyer2_id, asset(init_balance)); + transfer(committee_account, buyer3_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call2_id = call2.id; + // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) ); + BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) ); + BOOST_CHECK_EQUAL( init_balance - 25000, get_balance(borrower3, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) ); + + // adjust price feed to get call and call2 (but not call3) into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This sell order above MSSP will not be matched with a call + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + + BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // This buy order is too low will not be matched with a sell order + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + // This buy order at MSSP will be matched only if no margin call (margin call takes precedence) + limit_order_id_type buy_med = create_sell_order(buyer2, asset(11000), bitusd.amount(1000))->id; + // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence) + limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->id; + + BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(buyer2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(buyer3, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) ); + BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(buyer2, core) ); + BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) ); + + // Create a big sell order slightly below the call price, will be matched with several orders + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700*4), core.amount(5900*4) ) ); + + // firstly it will match with buy_high, at buy_high's price + BOOST_CHECK( !db.find( buy_high ) ); + // buy_high pays 111 CORE, receives 10 USD goes to buyer3's balance + BOOST_CHECK_EQUAL( 10, get_balance(buyer3, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) ); + + // then it will match with call, at mssp: 1/11 = 1000/11000 + BOOST_CHECK( !db.find( call_id ) ); + // call pays 11000 CORE, receives 1000 USD to cover borrower's position, remaining CORE goes to borrower's balance + BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(borrower, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + + // then it will match with call2, at mssp: 1/11 = 1000/11000 + BOOST_CHECK( !db.find( call2_id ) ); + // call2 pays 11000 CORE, receives 1000 USD to cover borrower2's position, remaining CORE goes to borrower2's balance + BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(borrower2, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) ); + + // then it will match with buy_med, at buy_med's price. Since buy_med is too big, it's partially filled. + // buy_med receives the remaining USD of sell order, minus market fees, goes to buyer2's balance + BOOST_CHECK_EQUAL( 783, get_balance(buyer2, bitusd) ); // 700*4-10-1000-1000=790, minus 1% market fee 790*100/10000=7 + BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(buyer2, core) ); + // buy_med pays at 1/11 = 790/8690 + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 11000-8690 ); + + // call3 is not in margin call territory so won't be matched + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + + // buy_low's price is too low that won't be matched + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + + // check seller balance + BOOST_CHECK_EQUAL( 193, get_balance(seller, bitusd) ); // 3000 - 7 - 700*4 + BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); // 111 + 11000 + 11000 + 8690 + + // Cancel buy_med + cancel_limit_order( buy_med(db) ); + BOOST_CHECK( !db.find( buy_med ) ); + BOOST_CHECK_EQUAL( 783, get_balance(buyer2, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 8690, get_balance(buyer2, core) ); + + // Create another sell order slightly below the call price, won't fill + limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->id; + BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 ); + // check seller balance + BOOST_CHECK_EQUAL( 193-7, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); + + // call3 is not in margin call territory so won't be matched + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + + // buy_low's price is too low that won't be matched + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + + // generate a block + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Fixed bitshares-core issue #453 #606: multiple order matching without black swan, multiple bitassets + */ +BOOST_AUTO_TEST_CASE(hard_fork_453_cross_test) +{ try { // create orders before hard fork, which will be matched on hard fork + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_453_TIME - mi); // assume all hard forks occur at same time + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& biteur = create_bitasset("EURBIT", feedproducer_id); + const auto& bitcny = create_bitasset("CNYBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + asset_id_type usd_id = bitusd.id; + asset_id_type eur_id = biteur.id; + asset_id_type cny_id = bitcny.id; + asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + update_feed_producers( biteur, {feedproducer.id} ); + update_feed_producers( bitcny, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + current_feed.settlement_price = biteur.amount( 1 ) / core.amount(5); + publish_feed( biteur, feedproducer, current_feed ); + current_feed.settlement_price = bitcny.amount( 1 ) / core.amount(5); + publish_feed( bitcny, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call_usd = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_usd_id = call_usd.id; + const call_order_object& call_eur = *borrow( borrower, biteur.amount(1000), asset(15000)); + call_order_id_type call_eur_id = call_eur.id; + const call_order_object& call_cny = *borrow( borrower, bitcny.amount(1000), asset(15000)); + call_order_id_type call_cny_id = call_cny.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call_usd2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call_usd2_id = call_usd2.id; + const call_order_object& call_eur2 = *borrow( borrower2, biteur.amount(1000), asset(15500)); + call_order_id_type call_eur2_id = call_eur2.id; + const call_order_object& call_cny2 = *borrow( borrower2, bitcny.amount(1000), asset(15500)); + call_order_id_type call_cny2_id = call_cny2.id; + // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 + const call_order_object& call_usd3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); + call_order_id_type call_usd3_id = call_usd3.id; + const call_order_object& call_eur3 = *borrow( borrower3, biteur.amount(1000), asset(16000)); + call_order_id_type call_eur3_id = call_eur3.id; + const call_order_object& call_cny3 = *borrow( borrower3, bitcny.amount(1000), asset(16000)); + call_order_id_type call_cny3_id = call_cny3.id; + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + transfer(borrower, seller, biteur.amount(1000)); + transfer(borrower2, seller, biteur.amount(1000)); + transfer(borrower3, seller, biteur.amount(1000)); + transfer(borrower, seller, bitcny.amount(1000)); + transfer(borrower2, seller, bitcny.amount(1000)); + transfer(borrower3, seller, bitcny.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call_usd.debt.value ); + BOOST_CHECK_EQUAL( 15000, call_usd.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call_usd2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call_usd2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call_usd3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call_usd3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 1000, call_eur.debt.value ); + BOOST_CHECK_EQUAL( 15000, call_eur.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call_eur2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call_eur2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call_eur3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call_eur3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, biteur) ); + BOOST_CHECK_EQUAL( 1000, call_cny.debt.value ); + BOOST_CHECK_EQUAL( 15000, call_cny.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call_cny2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call_cny2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call_cny3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call_cny3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitcny) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + current_feed.settlement_price = biteur.amount( 1 ) / core.amount(10); + publish_feed( biteur, feedproducer, current_feed ); + current_feed.settlement_price = bitcny.amount( 1 ) / core.amount(10); + publish_feed( bitcny, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This order below the call price will not be matched before hard fork: 1/8 #606 + limit_order_id_type sell_usd_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->id; + // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 + limit_order_id_type sell_usd_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->id; + // This order above the MSSP will not be matched before hard fork + limit_order_id_type sell_usd_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_usd_med = create_sell_order(seller, bitusd.amount(700), core.amount(6400))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_usd_med2 = create_sell_order(seller, bitusd.amount(7), core.amount(65))->id; + + // This order below the call price will not be matched before hard fork: 1/8 #606 + limit_order_id_type sell_eur_low = create_sell_order(seller, biteur.amount(1000), core.amount(7000))->id; + // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 + limit_order_id_type sell_eur_low2 = create_sell_order(seller, biteur.amount(1007), core.amount(8056))->id; + // This order above the MSSP will not be matched before hard fork + limit_order_id_type sell_eur_high = create_sell_order(seller, biteur.amount(7), core.amount(78))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_eur_med = create_sell_order(seller, biteur.amount(700), core.amount(6400))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_eur_med2 = create_sell_order(seller, biteur.amount(7), core.amount(65))->id; + + // This order below the call price will not be matched before hard fork: 1/8 #606 + limit_order_id_type sell_cny_low = create_sell_order(seller, bitcny.amount(1000), core.amount(7000))->id; + // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 + limit_order_id_type sell_cny_low2 = create_sell_order(seller, bitcny.amount(1007), core.amount(8056))->id; + // This order above the MSSP will not be matched before hard fork + limit_order_id_type sell_cny_high = create_sell_order(seller, bitcny.amount(7), core.amount(78))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_cny_med = create_sell_order(seller, bitcny.amount(700), core.amount(6400))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_cny_med2 = create_sell_order(seller, bitcny.amount(7), core.amount(65))->id; + + BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, eur_id) ); + BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, cny_id) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // generate a block to include operations above + generate_block(); + // go over the hard fork, make sure feed doesn't expire + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // sell_low and call should get matched first + BOOST_CHECK( !db.find( sell_usd_low ) ); + BOOST_CHECK( !db.find( call_usd_id ) ); + // sell_low2 and call2 should get matched + BOOST_CHECK( !db.find( call_usd2_id ) ); + // sell_low2 and call3 should get matched: fixed #453 + BOOST_CHECK( !db.find( sell_usd_low2 ) ); + // sell_med and call3 should get matched + BOOST_CHECK( !db.find( sell_usd_med ) ); + // call3 now is not at margin call state, so sell_med2 won't get matched + BOOST_CHECK_EQUAL( db.find( sell_usd_med2 )->for_sale.value, 7 ); + // sell_high should still be there, didn't match anything + BOOST_CHECK_EQUAL( db.find( sell_usd_high )->for_sale.value, 7 ); + + // sell_low and call should get matched first + BOOST_CHECK( !db.find( sell_eur_low ) ); + BOOST_CHECK( !db.find( call_eur_id ) ); + // sell_low2 and call2 should get matched + BOOST_CHECK( !db.find( call_eur2_id ) ); + // sell_low2 and call3 should get matched: fixed #453 + BOOST_CHECK( !db.find( sell_eur_low2 ) ); + // sell_med and call3 should get matched + BOOST_CHECK( !db.find( sell_eur_med ) ); + // call3 now is not at margin call state, so sell_med2 won't get matched + BOOST_CHECK_EQUAL( db.find( sell_eur_med2 )->for_sale.value, 7 ); + // sell_high should still be there, didn't match anything + BOOST_CHECK_EQUAL( db.find( sell_eur_high )->for_sale.value, 7 ); + + // sell_low and call should get matched first + BOOST_CHECK( !db.find( sell_cny_low ) ); + BOOST_CHECK( !db.find( call_cny_id ) ); + // sell_low2 and call2 should get matched + BOOST_CHECK( !db.find( call_cny2_id ) ); + // sell_low2 and call3 should get matched: fixed #453 + BOOST_CHECK( !db.find( sell_cny_low2 ) ); + // sell_med and call3 should get matched + BOOST_CHECK( !db.find( sell_cny_med ) ); + // call3 now is not at margin call state, so sell_med2 won't get matched + BOOST_CHECK_EQUAL( db.find( sell_cny_med2 )->for_sale.value, 7 ); + // sell_high should still be there, didn't match anything + BOOST_CHECK_EQUAL( db.find( sell_cny_high )->for_sale.value, 7 ); + + // all match price would be limit order price + BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, eur_id) ); + BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, cny_id) ); + BOOST_CHECK_EQUAL( (7000+8056+6400)*3, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 1000-7-700, call_usd3_id(db).debt.value ); + BOOST_CHECK_EQUAL( 16000-56-6400, call_usd3_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 1000-7-700, call_eur3_id(db).debt.value ); + BOOST_CHECK_EQUAL( 16000-56-6400, call_eur3_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 1000-7-700, call_cny3_id(db).debt.value ); + BOOST_CHECK_EQUAL( 16000-56-6400, call_cny3_id(db).collateral.value ); + // call3's call_price should be updated: 9544/293/1.75 = 9544*4 / 293*7 = 38176/2051 CORE/USD + BOOST_CHECK( price(asset(38176),asset(2051,usd_id)) == call_usd3_id(db).call_price ); + BOOST_CHECK( price(asset(38176),asset(2051,eur_id)) == call_eur3_id(db).call_price ); + BOOST_CHECK( price(asset(38176),asset(2051,cny_id)) == call_cny3_id(db).call_price ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Fixed bitshares-core issue #338 #453 #606: multiple order matching with black swan + */ +BOOST_AUTO_TEST_CASE(hard_fork_338_cross_test) +{ try { // create orders before hard fork, which will be matched on hard fork + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_338_TIME - mi); // assume all hard forks occur at same time + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + asset_id_type usd_id = bitusd.id; + asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call2_id = call2.id; + // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); + call_order_id_type call3_id = call3.id; + // create yet another position with 400% collateral, call price is 20/1.75 CORE/USD = 80/7 + const call_order_object& call4 = *borrow( borrower4, bitusd.amount(1000), asset(20000)); + call_order_id_type call4_id = call4.id; + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This order below the call price will not be matched before hard fork: 1/8 #606 + limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->id; + // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606 + limit_order_id_type sell_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->id; + // This would match but is blocked by sell_low?! #606 + limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(64))->id; + + // adjust price feed to get call_order into black swan territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(16); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/16, mssp = 10/176 + + // due to sell_low, black swan won't occur + BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() ); + + BOOST_CHECK_EQUAL( 3000-1000-1007-7, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // generate a block to include operations above + generate_block(); + // go over the hard fork, make sure feed doesn't expire + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // sell_low and call should get matched first + BOOST_CHECK( !db.find( sell_low ) ); + BOOST_CHECK( !db.find( call_id ) ); + // sell_low2 and call2 should get matched + BOOST_CHECK( !db.find( call2_id ) ); + // sell_low2 and call3 should get matched: fixed #453 + BOOST_CHECK( !db.find( sell_low2 ) ); + // sell_med and call3 should get matched + BOOST_CHECK( !db.find( sell_med ) ); + + // at this moment, + // collateralization of call3 is (16000-56-64) / (1000-7-7) = 15880/986 = 16.1, it's > 16 but < 17.6 + // although there is no sell order, it should trigger a black swan event right away, + // because after hard fork new limit order won't trigger black swan event + BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); + BOOST_CHECK( !db.find( call3_id ) ); + BOOST_CHECK( !db.find( call4_id ) ); + + // since 16.1 > 16, global settlement should at feed price 16/1 + // so settlement fund should be 986*16 + 1000*16 + BOOST_CHECK_EQUAL( 1986*16, usd_id(db).bitasset_data(db).settlement_fund.value ); + // global settlement price should be 16/1, since no rounding here + BOOST_CHECK( price(asset(1,usd_id),asset(16) ) == usd_id(db).bitasset_data(db).settlement_price ); + + BOOST_CHECK_EQUAL( 3000-1000-1007-7, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 7000+8056+64, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower3_id, usd_id) ); + BOOST_CHECK_EQUAL( init_balance-16000+15880-986*16, get_balance(borrower3_id, core_id) ); + BOOST_CHECK_EQUAL( 1000, get_balance(borrower4_id, usd_id) ); + BOOST_CHECK_EQUAL( init_balance-1000*16, get_balance(borrower4_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Fixed bitshares-core issue #649: Black swan detection fetch call order by call_price but not collateral ratio + */ +BOOST_AUTO_TEST_CASE(hard_fork_649_cross_test) +{ try { // create orders before hard fork, which will be matched on hard fork + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_343_TIME - mi); // assume all hard forks occur at same time + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + asset_id_type usd_id = bitusd.id; + asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call2_id = call2.id; + // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000)); + call_order_id_type call3_id = call3.id; + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 16000, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This would match with call at price 707/6464 + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(707), core.amount(6464)) ); + BOOST_CHECK_EQUAL( 3000-707, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 6464, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 293, call.debt.value ); + BOOST_CHECK_EQUAL( 8536, call.collateral.value ); + + // at this moment, + // collateralization of call is 8536 / 293 = 29.1 + // collateralization of call2 is 15500 / 1000 = 15.5 + // collateralization of call3 is 16000 / 1000 = 16 + + generate_block(); + set_expiration( db, trx ); + update_feed_producers( usd_id(db), {feedproducer_id} ); + + // adjust price feed to get call_order into black swan territory + current_feed.settlement_price = price(asset(1,usd_id) / asset(20)); + publish_feed( usd_id(db), feedproducer_id(db), current_feed ); + // settlement price = 1/20, mssp = 1/22 + + // due to #649, black swan won't occur + BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() ); + + // generate a block to include operations above + generate_block(); + BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() ); + // go over the hard fork, make sure feed doesn't expire + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // a black swan event should occur + BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() ); + BOOST_CHECK( !db.find( call_id ) ); + BOOST_CHECK( !db.find( call2_id ) ); + BOOST_CHECK( !db.find( call3_id ) ); + + // since least collateral ratio 15.5 < 20, global settlement should execute at price = least collateral ratio 15.5/1 + // so settlement fund should be 15500 + 15500 + round_up(15.5 * 293) + BOOST_CHECK_EQUAL( 15500*2 + (293 * 155 + 9) / 10, usd_id(db).bitasset_data(db).settlement_fund.value ); + // global settlement price should be settlement_fund/(2000+293), but not 15.5/1 due to rounding + BOOST_CHECK( price(asset(2293,usd_id),asset(15500*2+(293*155+9)/10) ) == usd_id(db).bitasset_data(db).settlement_price ); + + BOOST_CHECK_EQUAL( 3000-707, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 6464, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, usd_id) ); + BOOST_CHECK_EQUAL( init_balance-6464-(293*155+9)/10, get_balance(borrower_id, core_id) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower2_id, usd_id) ); + BOOST_CHECK_EQUAL( init_balance-15500, get_balance(borrower2_id, core_id) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower3_id, usd_id) ); + BOOST_CHECK_EQUAL( init_balance-15500, get_balance(borrower3_id, core_id) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * Fixed bitshares-core issue #343: change sorting of call orders when matching against limit order + */ +BOOST_AUTO_TEST_CASE(hard_fork_343_cross_test) +{ try { // create orders before hard fork, which will be matched on hard fork + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_343_TIME - mi); // assume all hard forks occur at same time + generate_block(); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + asset_id_type usd_id = bitusd.id; + asset_id_type core_id = core.id; + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + transfer(committee_account, borrower4_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7 + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000)); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7 + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500)); + call_order_id_type call2_id = call2.id; + // create yet another position with 350% collateral, call price is 17.5/1.75 CORE/USD = 77/7 + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(17500)); + call_order_id_type call3_id = call3.id; + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 17500, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // adjust price feed to get call_order into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This would match with call at price 700/6400 + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(6400)) ); + BOOST_CHECK_EQUAL( 3000-700, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 6400, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 300, call.debt.value ); + BOOST_CHECK_EQUAL( 8600, call.collateral.value ); + + // at this moment, + // collateralization of call is 8600 / 300 = 28.67 + // collateralization of call2 is 15500 / 1000 = 15.5 + // collateralization of call3 is 17500 / 1000 = 17.5 + + // generate a block to include operations above + generate_block(); + // go over the hard fork, make sure feed doesn't expire + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + // This will match with call2 at price 7/77 (#343 fixed) + BOOST_CHECK( !create_sell_order(seller_id(db), asset(7*50,usd_id), asset(65*50)) ); + BOOST_CHECK_EQUAL( 3000-700-7*50, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 6400+77*50, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 300, call_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8600, call_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 1000-7*50, call2_id(db).debt.value ); + BOOST_CHECK_EQUAL( 15500-77*50, call2_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3_id(db).debt.value ); + BOOST_CHECK_EQUAL( 17500, call3_id(db).collateral.value ); + + // at this moment, + // collateralization of call is 8600 / 300 = 28.67 + // collateralization of call2 is 11650 / 650 = 17.9 + // collateralization of call3 is 17500 / 1000 = 17.5 + + // This will match with call3 at price 7/77 (#343 fixed) + BOOST_CHECK( !create_sell_order(seller_id(db), asset(7,usd_id), asset(65)) ); + BOOST_CHECK_EQUAL( 3000-700-7*50-7, get_balance(seller_id, usd_id) ); + BOOST_CHECK_EQUAL( 6400+77*50+77, get_balance(seller_id, core_id) ); + BOOST_CHECK_EQUAL( 300, call_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8600, call_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 1000-7*50, call2_id(db).debt.value ); + BOOST_CHECK_EQUAL( 15500-77*50, call2_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 1000-7, call3_id(db).debt.value ); + BOOST_CHECK_EQUAL( 17500-77, call3_id(db).collateral.value ); + + // at this moment, + // collateralization of call is 8600 / 300 = 28.67 + // collateralization of call2 is 11650 / 650 = 17.9 + // collateralization of call3 is 17423 / 993 = 17.55 + + // no more margin call now + BOOST_CHECK( create_sell_order(seller_id(db), asset(7,usd_id), asset(65)) ); + + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * BSIP38 "target_collateral_ratio" test: matching a taker limit order with multiple maker call orders + */ +BOOST_AUTO_TEST_CASE(target_cr_test_limit_call) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_834_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(buyer2)(buyer3)(seller)(borrower)(borrower2)(borrower3)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, buyer2_id, asset(init_balance)); + transfer(committee_account, buyer3_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175% + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% is higher than 175% + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000); + call_order_id_type call2_id = call2.id; + // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) ); + BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) ); + BOOST_CHECK_EQUAL( init_balance - 25000, get_balance(borrower3, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) ); + + // adjust price feed to get call and call2 (but not call3) into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // This sell order above MSSP will not be matched with a call + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + + BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // This buy order is too low will not be matched with a sell order + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + // This buy order at MSSP will be matched only if no margin call (margin call takes precedence) + limit_order_id_type buy_med = create_sell_order(buyer2, asset(33000), bitusd.amount(3000))->id; + // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence) + limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->id; + + BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(buyer2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(buyer3, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) ); + BOOST_CHECK_EQUAL( init_balance - 33000, get_balance(buyer2, core) ); + BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) ); + + // call and call2's CR is quite high, and debt amount is quite a lot, assume neither of them will be completely filled + price match_price( bitusd.amount(1) / core.amount(11) ); + share_type call_to_cover = call_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750); + share_type call2_to_cover = call2_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750); + BOOST_CHECK_LT( call_to_cover.value, call_id(db).debt.value ); + BOOST_CHECK_LT( call2_to_cover.value, call2_id(db).debt.value ); + // even though call2 has a higher CR, since call's TCR is less than call2's TCR, so we expect call will cover less when called + BOOST_CHECK_LT( call_to_cover.value, call2_to_cover.value ); + + // Create a big sell order slightly below the call price, will be matched with several orders + BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700*4), core.amount(5900*4) ) ); + + // firstly it will match with buy_high, at buy_high's price + BOOST_CHECK( !db.find( buy_high ) ); + // buy_high pays 111 CORE, receives 10 USD goes to buyer3's balance + BOOST_CHECK_EQUAL( 10, get_balance(buyer3, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) ); + + // then it will match with call, at mssp: 1/11 = 1000/11000 + const call_order_object* tmp_call = db.find( call_id ); + BOOST_CHECK( tmp_call != nullptr ); + + // call will receive call_to_cover, pay 11*call_to_cover + share_type call_to_pay = call_to_cover * 11; + BOOST_CHECK_EQUAL( 1000 - call_to_cover.value, call.debt.value ); + BOOST_CHECK_EQUAL( 15000 - call_to_pay.value, call.collateral.value ); + // new collateral ratio should be higher than mcr as well as tcr + BOOST_CHECK( call.debt.value * 10 * 1750 < call.collateral.value * 1000 ); + idump( (call) ); + // borrower's balance doesn't change + BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + + // the limit order then will match with call2, at mssp: 1/11 = 1000/11000 + const call_order_object* tmp_call2 = db.find( call2_id ); + BOOST_CHECK( tmp_call2 != nullptr ); + + // call2 will receive call2_to_cover, pay 11*call2_to_cover + share_type call2_to_pay = call2_to_cover * 11; + BOOST_CHECK_EQUAL( 1000 - call2_to_cover.value, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500 - call2_to_pay.value, call2.collateral.value ); + // new collateral ratio should be higher than mcr as well as tcr + BOOST_CHECK( call2.debt.value * 10 * 2000 < call2.collateral.value * 1000 ); + idump( (call2) ); + // borrower2's balance doesn't change + BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) ); + + // then it will match with buy_med, at buy_med's price. Since buy_med is too big, it's partially filled. + // buy_med receives the remaining USD of sell order, minus market fees, goes to buyer2's balance + share_type buy_med_get = 700*4 - 10 - call_to_cover - call2_to_cover; + share_type buy_med_pay = buy_med_get * 11; // buy_med pays at 1/11 + buy_med_get -= (buy_med_get/100); // minus 1% market fee + BOOST_CHECK_EQUAL( buy_med_get.value, get_balance(buyer2, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 33000, get_balance(buyer2, core) ); + BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 33000-buy_med_pay.value ); + + // call3 is not in margin call territory so won't be matched + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + + // buy_low's price is too low that won't be matched + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + + // check seller balance + BOOST_CHECK_EQUAL( 193, get_balance(seller, bitusd) ); // 3000 - 7 - 700*4 + BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); // 111 + (700*4-10)*11 + + // Cancel buy_med + cancel_limit_order( buy_med(db) ); + BOOST_CHECK( !db.find( buy_med ) ); + BOOST_CHECK_EQUAL( buy_med_get.value, get_balance(buyer2, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - buy_med_pay.value, get_balance(buyer2, core) ); + + // Create another sell order slightly below the call price, won't fill + limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->id; + BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 ); + // check seller balance + BOOST_CHECK_EQUAL( 193-7, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); + + // call3 is not in margin call territory so won't be matched + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + + // buy_low's price is too low that won't be matched + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + + // generate a block + generate_block(); + +} FC_LOG_AND_RETHROW() } + +/*** + * BSIP38 "target_collateral_ratio" test: matching a maker limit order with multiple taker call orders + */ +BOOST_AUTO_TEST_CASE(target_cr_test_call_limit) +{ try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_834_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + set_expiration( db, trx ); + + ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer)); + + const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); + const auto& core = asset_id_type()(db); + + int64_t init_balance(1000000); + + transfer(committee_account, buyer_id, asset(init_balance)); + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + transfer(committee_account, borrower3_id, asset(init_balance)); + update_feed_producers( bitusd, {feedproducer.id} ); + + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5); + publish_feed( bitusd, feedproducer, current_feed ); + // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175% + const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700); + call_order_id_type call_id = call.id; + // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% is higher than 175% + const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000); + call_order_id_type call2_id = call2.id; + // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr + const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000)); + transfer(borrower, seller, bitusd.amount(1000)); + transfer(borrower2, seller, bitusd.amount(1000)); + transfer(borrower3, seller, bitusd.amount(1000)); + + BOOST_CHECK_EQUAL( 1000, call.debt.value ); + BOOST_CHECK_EQUAL( 15000, call.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500, call2.collateral.value ); + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) ); + BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) ); + BOOST_CHECK_EQUAL( init_balance - 25000, get_balance(borrower3, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) ); + + // This sell order above MSSP will not be matched with a call + limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id; + BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 ); + + BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) ); + BOOST_CHECK_EQUAL( 0, get_balance(seller, core) ); + + // This buy order is too low will not be matched with a sell order + limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id; + + BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) ); + BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) ); + + // Create a sell order which will be matched with several call orders later, price 1/9 + limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->id; + BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 ); + + // prepare price feed to get call and call2 (but not call3) into margin call territory + current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10); + + // call and call2's CR is quite high, and debt amount is quite a lot, assume neither of them will be completely filled + price match_price = sell_id(db).sell_price; + share_type call_to_cover = call_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750); + share_type call2_to_cover = call2_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750); + BOOST_CHECK_LT( call_to_cover.value, call_id(db).debt.value ); + BOOST_CHECK_LT( call2_to_cover.value, call2_id(db).debt.value ); + // even though call2 has a higher CR, since call's TCR is less than call2's TCR, so we expect call will cover less when called + BOOST_CHECK_LT( call_to_cover.value, call2_to_cover.value ); + + // adjust price feed to get call and call2 (but not call3) into margin call territory + publish_feed( bitusd, feedproducer, current_feed ); + // settlement price = 1/10, mssp = 1/11 + + // firstly the limit order will match with call, at limit order's price: 1/9 + const call_order_object* tmp_call = db.find( call_id ); + BOOST_CHECK( tmp_call != nullptr ); + + // call will receive call_to_cover, pay 9*call_to_cover + share_type call_to_pay = call_to_cover * 9; + BOOST_CHECK_EQUAL( 1000 - call_to_cover.value, call.debt.value ); + BOOST_CHECK_EQUAL( 15000 - call_to_pay.value, call.collateral.value ); + // new collateral ratio should be higher than mcr as well as tcr + BOOST_CHECK( call.debt.value * 10 * 1750 < call.collateral.value * 1000 ); + idump( (call) ); + // borrower's balance doesn't change + BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) ); + + // the limit order then will match with call2, at limit order's price: 1/9 + const call_order_object* tmp_call2 = db.find( call2_id ); + BOOST_CHECK( tmp_call2 != nullptr ); + + // if the limit is big enough, call2 will receive call2_to_cover, pay 11*call2_to_cover + // however it's not the case, so call2 will receive less + call2_to_cover = 500 - call_to_cover; + share_type call2_to_pay = call2_to_cover * 9; + BOOST_CHECK_EQUAL( 1000 - call2_to_cover.value, call2.debt.value ); + BOOST_CHECK_EQUAL( 15500 - call2_to_pay.value, call2.collateral.value ); + idump( (call2) ); + // borrower2's balance doesn't change + BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) ); + BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) ); + + // call3 is not in margin call territory so won't be matched + BOOST_CHECK_EQUAL( 1000, call3.debt.value ); + BOOST_CHECK_EQUAL( 25000, call3.collateral.value ); + + // sell_id is completely filled + BOOST_CHECK( !db.find( sell_id ) ); + + // check seller balance + BOOST_CHECK_EQUAL( 2493, get_balance(seller, bitusd) ); // 3000 - 7 - 500 + BOOST_CHECK_EQUAL( 4500, get_balance(seller, core) ); // 500*9 + + // buy_low's price is too low that won't be matched + BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 ); + + // generate a block + generate_block(); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/network_broadcast_api_tests.cpp b/tests/tests/network_broadcast_api_tests.cpp new file mode 100644 index 0000000000..a566750b67 --- /dev/null +++ b/tests/tests/network_broadcast_api_tests.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE(network_broadcast_api_tests, database_fixture) + +BOOST_AUTO_TEST_CASE( broadcast_transaction_with_callback_test ) { + try { + + uint32_t called = 0; + auto callback = [&]( const variant& v ) + { + ++called; + }; + + fc::ecc::private_key cid_key = fc::ecc::private_key::regenerate( fc::digest("key") ); + const account_id_type cid_id = create_account( "cid", cid_key.get_public_key() ).id; + fund( cid_id(db) ); + + auto nb_api = std::make_shared< graphene::app::network_broadcast_api >( app ); + + set_expiration( db, trx ); + transfer_operation trans; + trans.from = cid_id; + trans.to = account_id_type(); + trans.amount = asset(1); + trx.operations.push_back( trans ); + sign( trx, cid_key ); + + nb_api->broadcast_transaction_with_callback( callback, trx ); + + trx.clear(); + + generate_block(); + + fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + + BOOST_CHECK_EQUAL( called, 1u ); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/operation_tests.cpp b/tests/tests/operation_tests.cpp index 7b3867d7e5..132f99120e 100644 --- a/tests/tests/operation_tests.cpp +++ b/tests/tests/operation_tests.cpp @@ -23,6 +23,7 @@ */ #include +#include #include #include @@ -36,6 +37,7 @@ #include #include +#include #include #include "../common/database_fixture.hpp" @@ -43,6 +45,8 @@ using namespace graphene::chain; using namespace graphene::chain::test; +#define UIA_TEST_SYMBOL "UIATEST" + BOOST_FIXTURE_TEST_SUITE( operation_tests, database_fixture ) BOOST_AUTO_TEST_CASE( feed_limit_logic_test ) @@ -75,17 +79,126 @@ BOOST_AUTO_TEST_CASE( feed_limit_logic_test ) throw; } } + BOOST_AUTO_TEST_CASE( call_order_update_test ) { try { + + ACTORS((dan)(sam)); + const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& core = asset_id_type()(db); + + transfer(committee_account, dan_id, asset(10000000)); + transfer(committee_account, sam_id, asset(10000000)); + update_feed_producers( bitusd, {sam.id} ); + + price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); + current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default + publish_feed( bitusd, sam, current_feed ); + + FC_ASSERT( bitusd.bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + + BOOST_TEST_MESSAGE( "attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); + borrow( dan, bitusd.amount(5000), asset(10000)); + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 5000 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 10000 ); + + BOOST_TEST_MESSAGE( "covering 2500 usd and freeing 5000 core..." ); + cover( dan, bitusd.amount(2500), asset(5000)); + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 2500 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 10000 + 5000 ); + + BOOST_TEST_MESSAGE( "verifying that attempting to cover the full amount without claiming the collateral fails" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(2500), core.amount(0) ), fc::exception ); + + cover( dan, bitusd.amount(2500), core.amount(5000)); + + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 0 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 ); + + borrow( dan, bitusd.amount(5000), asset(10000)); + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 5000 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 10000 ); + + + // test just increasing collateral + BOOST_TEST_MESSAGE( "increasing collateral" ); + borrow( dan, bitusd.amount(0), asset(10000)); + + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 5000 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 20000 ); + + // test just decreasing debt + BOOST_TEST_MESSAGE( "decreasing debt" ); + cover( dan, bitusd.amount(1000), asset(0)); + + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 4000 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 20000 ); + + BOOST_TEST_MESSAGE( "increasing debt without increasing collateral" ); + borrow( dan, bitusd.amount(1000), asset(0)); + + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 5000 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 20000 ); + + BOOST_TEST_MESSAGE( "increasing debt a lot without increasing collateral, fails due to black swan" ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(80000), asset(0)), fc::exception ); + BOOST_TEST_MESSAGE( "attempting to claim most of collateral without paying off debt, fails due to black swan" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), asset(20000-1)), fc::exception ); + BOOST_TEST_MESSAGE( "attempting to claim all collateral without paying off debt" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), asset(20000)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), asset(20000-1)), fc::exception ); + + borrow( sam, bitusd.amount(1000), asset(10000)); + transfer( sam, dan, bitusd.amount(1000) ); + + BOOST_TEST_MESSAGE( "attempting to claim more collateral than available" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(4000), asset(20001)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(4000), asset(20100)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(4000), asset(30000)), fc::exception ); + + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(5000), asset(20001)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(5000), asset(20100)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(5000), asset(30000)), fc::exception ); + + BOOST_TEST_MESSAGE( "attempting to pay more debt than required" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(15000)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(20000)), fc::exception ); + + BOOST_TEST_MESSAGE( "attempting to pay more debt than required, and claim more collateral than available" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(20001)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(40000)), fc::exception ); + + BOOST_TEST_MESSAGE( "attempting reduce collateral without paying off any debt" ); + cover( dan, bitusd.amount(0), asset(1000)); + + BOOST_TEST_MESSAGE( "attempting change call price to be below minimum for debt/collateral ratio" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), asset(0)), fc::exception ); + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( old_call_order_update_test_after_hardfork_583 ) +{ + try { + + generate_blocks( HARDFORK_CORE_583_TIME ); + generate_block(); + set_expiration( db, trx ); + ACTORS((dan)(sam)); const auto& bitusd = create_bitasset("USDBIT", sam.id); const auto& core = asset_id_type()(db); transfer(committee_account, dan_id, asset(10000000)); + transfer(committee_account, sam_id, asset(10000000)); update_feed_producers( bitusd, {sam.id} ); price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); + current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default publish_feed( bitusd, sam, current_feed ); FC_ASSERT( bitusd.bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); @@ -133,10 +246,33 @@ BOOST_AUTO_TEST_CASE( call_order_update_test ) BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 5000 ); BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 20000 ); - BOOST_TEST_MESSAGE( "increasing debt without increasing collateral again" ); + BOOST_TEST_MESSAGE( "increasing debt a lot without increasing collateral, fails due to black swan" ); GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(80000), asset(0)), fc::exception ); + BOOST_TEST_MESSAGE( "attempting to claim most of collateral without paying off debt, fails due to black swan" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), asset(20000-1)), fc::exception ); BOOST_TEST_MESSAGE( "attempting to claim all collateral without paying off debt" ); GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), asset(20000)), fc::exception ); + + borrow( sam, bitusd.amount(1000), asset(10000)); + transfer( sam, dan, bitusd.amount(1000) ); + + BOOST_TEST_MESSAGE( "attempting to claim more collateral than available" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(4000), asset(20001)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(4000), asset(20100)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(4000), asset(30000)), fc::exception ); + + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(5000), asset(20001)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(5000), asset(20100)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(5000), asset(30000)), fc::exception ); + + BOOST_TEST_MESSAGE( "attempting to pay more debt than required" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(15000)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(20000)), fc::exception ); + + BOOST_TEST_MESSAGE( "attempting to pay more debt than required, and claim more collateral than available" ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(20001)), fc::exception ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(6000), asset(40000)), fc::exception ); + BOOST_TEST_MESSAGE( "attempting reduce collateral without paying off any debt" ); cover( dan, bitusd.amount(0), asset(1000)); @@ -149,6 +285,403 @@ BOOST_AUTO_TEST_CASE( call_order_update_test ) } } +BOOST_AUTO_TEST_CASE( asset_settle_cancel_operation_test_after_hf588 ) +{ + // fast jump to hardfork time + generate_blocks( HARDFORK_CORE_588_TIME ); + // one more block to pass hardfork time + generate_block(); + set_expiration( db, trx ); + + BOOST_TEST_MESSAGE( "Creating a proposal containing a asset_settle_cancel_operation" ); + { + proposal_create_operation pcop = proposal_create_operation::committee_proposal( + db.get_global_properties().parameters, db.head_block_time()); + pcop.fee_paying_account = GRAPHENE_TEMP_ACCOUNT; + pcop.expiration_time = db.head_block_time() + *pcop.review_period_seconds + 10; + asset_settle_cancel_operation ascop; + ascop.amount.amount = 1; + pcop.proposed_ops.emplace_back(ascop); + trx.operations.push_back(pcop); + + BOOST_CHECK_EXCEPTION(PUSH_TX(db, trx), fc::assert_exception, + [](fc::assert_exception const &e) -> bool { + std::cout << e.to_string() << std::endl; + if (e.to_string().find("Virtual operation") != std::string::npos) + return true; + + return false; + }); + } + + BOOST_TEST_MESSAGE( "Creating a recursive proposal containing asset_settle_cancel_operation" ); + { + proposal_create_operation pcop = proposal_create_operation::committee_proposal( + db.get_global_properties().parameters, db.head_block_time()); + + pcop.fee_paying_account = GRAPHENE_TEMP_ACCOUNT; + pcop.expiration_time = db.head_block_time() + *pcop.review_period_seconds + 10; + proposal_create_operation inner_pcop = proposal_create_operation::committee_proposal( + db.get_global_properties().parameters, db.head_block_time()); + + inner_pcop.fee_paying_account = GRAPHENE_TEMP_ACCOUNT; + inner_pcop.expiration_time = db.head_block_time() + *inner_pcop.review_period_seconds + 10; + + asset_settle_cancel_operation ascop; + ascop.amount.amount = 1; + inner_pcop.proposed_ops.emplace_back(ascop); + pcop.proposed_ops.emplace_back(inner_pcop); + + trx.operations.push_back(pcop); + + BOOST_CHECK_EXCEPTION(PUSH_TX(db, trx), fc::assert_exception, + [](fc::assert_exception const &e) -> bool { + std::cout << e.to_string() << std::endl; + if (e.to_string().find("Virtual operation") != std::string::npos) + return true; + + return false; + }); + } +} + +BOOST_AUTO_TEST_CASE( more_call_order_update_test ) +{ + try { + + ACTORS((dan)(sam)(alice)(bob)); + const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& core = asset_id_type()(db); + + transfer(committee_account, dan_id, asset(10000000)); + transfer(committee_account, sam_id, asset(10000000)); + transfer(committee_account, alice_id, asset(10000000)); + transfer(committee_account, bob_id, asset(10000000)); + update_feed_producers( bitusd, {sam.id} ); + + price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); + current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default + current_feed.maximum_short_squeeze_ratio = 1100; // need to set this explicitly, testnet has a different default + publish_feed( bitusd, sam, current_feed ); + + FC_ASSERT( bitusd.bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + + BOOST_TEST_MESSAGE( "attempting to borrow using 1.75x collateral at 1:1 price should not be allowed" ); + GRAPHENE_REQUIRE_THROW( borrow( bob, bitusd.amount(10000), core.amount(17500) ), fc::exception ); + + BOOST_TEST_MESSAGE( "alice borrow using 4x collateral at 1:1 price" ); + borrow( alice, bitusd.amount(100000), core.amount(400000) )->id; + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); + + BOOST_TEST_MESSAGE( "alice place an order to sell usd at 1.05" ); + const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->id; + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow too much using 1.75x collateral at 1:1 price should not be allowed" ); + GRAPHENE_REQUIRE_THROW( borrow( bob, bitusd.amount(10000), core.amount(17500) ), fc::exception ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow less using 1.75x collateral at 1:1 price should be allowed and margin called" ); + BOOST_CHECK( !borrow( bob, bitusd.amount(100), core.amount(175) ) ); + BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 ); + BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); + const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->id; + BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); + BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 200 ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow too much more using 1.75x collateral at 1:1 price should not be allowed" ); + GRAPHENE_REQUIRE_THROW( borrow( bob, bitusd.amount(10000-100), core.amount(17500-200) ), fc::exception ); + + BOOST_TEST_MESSAGE( "bob attempting to reduce collateral to 1.75x at 1:1 price should be allowed and margin called" ); + BOOST_CHECK( !borrow( bob, bitusd.amount(0), core.amount(175-200) ) ); + BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); + BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 105 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 + 105 ); + BOOST_CHECK( !db.find( bob_call_id ) ); + + BOOST_TEST_MESSAGE( "alice cancel sell order" ); + cancel_limit_order( alice_sell_id(db) ); + + BOOST_TEST_MESSAGE( "dan attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); + borrow( dan, bitusd.amount(5000), asset(10000)); + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 5000 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 10000 ); + + BOOST_TEST_MESSAGE( "sam update price feed so dan's position will enter margin call territory." ); + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(180); + publish_feed( bitusd, sam, current_feed ); + + BOOST_TEST_MESSAGE( "dan covering 2500 usd and freeing 5000 core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(2500), core.amount(5000) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan covering 2500 usd and freeing 5001 core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(2500), core.amount(5001) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan covering 2500 usd and freeing 4999 core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(2500), core.amount(4999) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan borrow 2500 more usd wth 5000 more core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(2500), core.amount(5000) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan borrow 2500 more usd wth 4999 more core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(2500), core.amount(4999) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan borrow 2500 more usd wth 5001 more core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(2500), core.amount(5001) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan covering 0 usd and freeing 1 core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), core.amount(1) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan adding 1 core as collateral should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(0), core.amount(1) ), fc::exception ); + + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( more_call_order_update_test_after_hardfork_583 ) +{ + try { + + generate_blocks( HARDFORK_CORE_583_TIME ); + generate_block(); + set_expiration( db, trx ); + + ACTORS((dan)(sam)(alice)(bob)); + const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& core = asset_id_type()(db); + + transfer(committee_account, dan_id, asset(10000000)); + transfer(committee_account, sam_id, asset(10000000)); + transfer(committee_account, alice_id, asset(10000000)); + transfer(committee_account, bob_id, asset(10000000)); + update_feed_producers( bitusd, {sam.id} ); + + price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); + current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default + current_feed.maximum_short_squeeze_ratio = 1100; // need to set this explicitly, testnet has a different default + publish_feed( bitusd, sam, current_feed ); + + FC_ASSERT( bitusd.bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + + BOOST_TEST_MESSAGE( "attempting to borrow using 1.75x collateral at 1:1 price should not be allowed" ); + GRAPHENE_REQUIRE_THROW( borrow( bob, bitusd.amount(10000), core.amount(17500) ), fc::exception ); + + BOOST_TEST_MESSAGE( "alice borrow using 4x collateral at 1:1 price" ); + borrow( alice, bitusd.amount(100000), core.amount(400000) )->id; + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); + + BOOST_TEST_MESSAGE( "alice place an order to sell usd at 1.05" ); + const limit_order_id_type alice_sell_id = create_sell_order( alice, bitusd.amount(1000), core.amount(1050) )->id; + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow too much using 1.75x collateral at 1:1 price should not be allowed" ); + GRAPHENE_REQUIRE_THROW( borrow( bob, bitusd.amount(10000), core.amount(17500) ), fc::exception ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow less using 1.75x collateral at 1:1 price should be allowed and margin called" ); + BOOST_CHECK( !borrow( bob, bitusd.amount(100), core.amount(175) ) ); + BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 ); + BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); + const call_order_id_type bob_call_id = borrow( bob, bitusd.amount(100), asset(200))->id; + BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); + BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 200 ); + + BOOST_TEST_MESSAGE( "bob attempting to borrow too much more using 1.75x collateral at 1:1 price should not be allowed" ); + GRAPHENE_REQUIRE_THROW( borrow( bob, bitusd.amount(10000-100), core.amount(17500-200) ), fc::exception ); + + BOOST_TEST_MESSAGE( "bob attempting to reduce collateral to 1.75x at 1:1 price should be allowed and margin called" ); + BOOST_CHECK( !borrow( bob, bitusd.amount(0), core.amount(175-200) ) ); + BOOST_REQUIRE_EQUAL( get_balance( bob, bitusd ), 100 + 100 ); + BOOST_REQUIRE_EQUAL( get_balance( bob, core ), 10000000 - 105 - 105 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, bitusd ), 100000 - 1000 ); + BOOST_REQUIRE_EQUAL( get_balance( alice, core ), 10000000 - 400000 + 105 + 105 ); + BOOST_CHECK( !db.find( bob_call_id ) ); + + BOOST_TEST_MESSAGE( "alice cancel sell order" ); + cancel_limit_order( alice_sell_id(db) ); + + BOOST_TEST_MESSAGE( "dan attempting to borrow using 2x collateral at 1:1 price now that there is a valid order" ); + borrow( dan, bitusd.amount(5000), asset(10000)); + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 5000 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 10000 ); + + BOOST_TEST_MESSAGE( "sam update price feed so dan's position will enter margin call territory." ); + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(180); + publish_feed( bitusd, sam, current_feed ); + + BOOST_TEST_MESSAGE( "dan covering 2500 usd and freeing 5000 core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(2500), core.amount(5000) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan covering 2500 usd and freeing 5001 core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(2500), core.amount(5001) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan borrow 2500 more usd wth 5000 more core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(2500), core.amount(5000) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan borrow 2500 more usd wth 4999 more core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(2500), core.amount(4999) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan covering 2500 usd and freeing 4999 core should be allowed..." ); + cover( dan, bitusd.amount(2500), asset(4999)); + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 2500 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 10000 + 4999 ); + + BOOST_TEST_MESSAGE( "dan covering 0 usd and freeing 1 core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( cover( dan, bitusd.amount(0), core.amount(1) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan adding 1 core as collateral should be allowed..." ); + borrow( dan, bitusd.amount(0), asset(1)); + BOOST_REQUIRE_EQUAL( get_balance( dan, bitusd ), 2500 ); + BOOST_REQUIRE_EQUAL( get_balance( dan, core ), 10000000 - 10000 + 4999 - 1 ); + + BOOST_TEST_MESSAGE( "dan borrow 2500 more usd wth 5002 more core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(2500), core.amount(5002) ), fc::exception ); + + BOOST_TEST_MESSAGE( "dan borrow 2500 more usd wth 5003 more core should not be allowed..." ); + GRAPHENE_REQUIRE_THROW( borrow( dan, bitusd.amount(2500), asset(5003) ), fc::exception ); + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( call_order_update_validation_test ) +{ + call_order_update_operation op; + + // throw on default values + BOOST_CHECK_THROW( op.validate(), fc::assert_exception ); + + // minimum changes to make it valid + op.delta_debt = asset( 1, asset_id_type(1) ); + op.validate(); // won't throw if has a non-zero debt with different asset_id_type than collateral + + // throw on negative fee + op.fee = asset( -1 ); + BOOST_CHECK_THROW( op.validate(), fc::assert_exception ); + op.fee = asset( 0 ); + + // throw on identical debt and collateral asset id + op.delta_collateral = asset( 0, asset_id_type(1) ); + BOOST_CHECK_THROW( op.validate(), fc::assert_exception ); + + // throw on zero debt and collateral amount + op.delta_debt = asset( 0, asset_id_type(0) ); + BOOST_CHECK_THROW( op.validate(), fc::assert_exception ); + op.delta_debt = asset( -1, asset_id_type(0) ); + + op.validate(); // valid now + + op.extensions.value.target_collateral_ratio = 0; + op.validate(); // still valid + + op.extensions.value.target_collateral_ratio = 65535; + op.validate(); // still valid + +} + +// Tests that target_cr option can't be set before hard fork core-834 +// TODO: remove this test case after hard fork +BOOST_AUTO_TEST_CASE( call_order_update_target_cr_hardfork_time_test ) +{ + try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_834_TIME - mi); + + set_expiration( db, trx ); + + ACTORS((sam)(alice)(bob)); + const auto& bitusd = create_bitasset("USDBIT", sam.id); + const auto& core = asset_id_type()(db); + asset_id_type bitusd_id = bitusd.id; + asset_id_type core_id = core.id; + + transfer(committee_account, sam_id, asset(10000000)); + transfer(committee_account, alice_id, asset(10000000)); + transfer(committee_account, bob_id, asset(10000000)); + update_feed_producers( bitusd, {sam.id} ); + + price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); + current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default + current_feed.maximum_short_squeeze_ratio = 1100; // need to set this explicitly, testnet has a different default + publish_feed( bitusd, sam, current_feed ); + + FC_ASSERT( bitusd.bitasset_data(db).current_feed.settlement_price == current_feed.settlement_price ); + + BOOST_TEST_MESSAGE( "alice tries to borrow using 4x collateral at 1:1 price with target_cr set, " + "will fail before hard fork time" ); + GRAPHENE_REQUIRE_THROW( borrow( alice, bitusd.amount(100000), core.amount(400000), 0 ), fc::assert_exception ); + GRAPHENE_REQUIRE_THROW( borrow( alice, bitusd.amount(100000), core.amount(400000), 1 ), fc::assert_exception ); + GRAPHENE_REQUIRE_THROW( borrow( alice, bitusd.amount(100000), core.amount(400000), 1749 ), fc::assert_exception ); + GRAPHENE_REQUIRE_THROW( borrow( alice, bitusd.amount(100000), core.amount(400000), 1750 ), fc::assert_exception ); + GRAPHENE_REQUIRE_THROW( borrow( alice, bitusd.amount(100000), core.amount(400000), 1751 ), fc::assert_exception ); + GRAPHENE_REQUIRE_THROW( borrow( alice, bitusd.amount(100000), core.amount(400000), 65535 ), fc::assert_exception ); + + auto call_update_proposal = [this]( const account_object& proposer, + const account_object& updater, + const asset& delta_collateral, + const asset& delta_debt, + const optional target_cr ) + { + call_order_update_operation op; + op.funding_account = updater.id; + op.delta_collateral = delta_collateral; + op.delta_debt = delta_debt; + op.extensions.value.target_collateral_ratio = target_cr; + + const auto& curfees = *db.get_global_properties().parameters.current_fees; + const auto& proposal_create_fees = curfees.get(); + proposal_create_operation prop; + prop.fee_paying_account = proposer.id; + prop.proposed_ops.emplace_back( op ); + prop.expiration_time = db.head_block_time() + fc::days(1); + prop.fee = asset( proposal_create_fees.fee + proposal_create_fees.price_per_kbyte ); + + signed_transaction tx; + tx.operations.push_back( prop ); + db.current_fee_schedule().set_fee( tx.operations.back() ); + set_expiration( db, tx ); + PUSH_TX( db, tx, ~0 ); + }; + + BOOST_TEST_MESSAGE( "bob tries to propose a proposal with target_cr set, " + "will fail before hard fork time" ); + GRAPHENE_REQUIRE_THROW( call_update_proposal( bob, alice, bitusd.amount(10), core.amount(40), 0 ), fc::assert_exception ); + GRAPHENE_REQUIRE_THROW( call_update_proposal( bob, alice, bitusd.amount(10), core.amount(40), 1750 ), fc::assert_exception ); + GRAPHENE_REQUIRE_THROW( call_update_proposal( bob, alice, bitusd.amount(10), core.amount(40), 65535 ), fc::assert_exception ); + + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + set_expiration( db, trx ); + + BOOST_TEST_MESSAGE( "bob tries to propose a proposal with target_cr set, " + "will success after hard fork time" ); + // now able to propose + call_update_proposal( bob_id(db), alice_id(db), bitusd_id(db).amount(10), core_id(db).amount(40), 65535 ); + + generate_block(); + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + /** * This test sets up a situation where a margin call will be executed and ensures that * it is properly filled. @@ -179,6 +712,8 @@ BOOST_AUTO_TEST_CASE( margin_call_limit_test ) price_feed current_feed; current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(100); + current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default + current_feed.maximum_short_squeeze_ratio = 1500; // need to set this explicitly, testnet has a different default // starting out with price 1:1 publish_feed( bitusd, feedproducer, current_feed ); @@ -229,178 +764,70 @@ BOOST_AUTO_TEST_CASE( margin_call_limit_test ) } } -/** - * This test sets up the minimum condition for a black swan to occur but does - * not test the full range of cases that may be possible during a black swan. - */ -BOOST_AUTO_TEST_CASE( black_swan ) +BOOST_AUTO_TEST_CASE( prediction_market ) { try { - ACTORS((buyer)(seller)(borrower)(borrower2)(feedproducer)); + ACTORS((judge)(dan)(nathan)); - const auto& bitusd = create_bitasset("USDBIT", feedproducer_id); - const auto& core = asset_id_type()(db); + const auto& pmark = create_prediction_market("PMARK", judge_id); + const auto pmark_dd_id = pmark.dynamic_asset_data_id; + const auto& core = asset_id_type()(db); int64_t init_balance(1000000); + transfer(committee_account, judge_id, asset(init_balance)); + transfer(committee_account, dan_id, asset(init_balance)); + transfer(committee_account, nathan_id, asset(init_balance)); - transfer(committee_account, buyer_id, asset(init_balance)); - transfer(committee_account, borrower_id, asset(init_balance)); - transfer(committee_account, borrower2_id, asset(init_balance)); - update_feed_producers(bitusd, {feedproducer.id}); - - price_feed current_feed; - current_feed.settlement_price = bitusd.amount(100) / core.amount(100); - - // starting out with price 1:1 - publish_feed(bitusd, feedproducer, current_feed); - - // start out with 2:1 collateral - borrow(borrower, bitusd.amount(1000), asset(2000)); - borrow(borrower2, bitusd.amount(1000), asset(4000)); - - BOOST_REQUIRE_EQUAL( get_balance(borrower, bitusd), 1000 ); - BOOST_REQUIRE_EQUAL( get_balance(borrower2, bitusd), 1000 ); - BOOST_REQUIRE_EQUAL( get_balance(borrower , core), init_balance - 2000 ); - BOOST_REQUIRE_EQUAL( get_balance(borrower2, core), init_balance - 4000 ); - - current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(200); - publish_feed( bitusd, feedproducer, current_feed ); - - /// this sell order is designed to trigger a black swan - create_sell_order( borrower2, bitusd.amount(1000), core.amount(3000) ); - - FC_ASSERT( bitusd.bitasset_data(db).has_settlement() ); - - force_settle(borrower, bitusd.amount(100)); - - BOOST_TEST_MESSAGE( "Verify that we cannot borrow after black swan" ); - GRAPHENE_REQUIRE_THROW( borrow(borrower, bitusd.amount(1000), asset(2000)), fc::exception ); - } catch( const fc::exception& e) { - edump((e.to_detail_string())); - throw; - } -} - -/** - * Black swan occurs when price feed falls, triggered by settlement - * order. - */ -BOOST_AUTO_TEST_CASE( black_swan_issue_346 ) -{ try { - ACTORS((buyer)(seller)(borrower)(borrower2)(settler)(feeder)); - - const asset_object& core = asset_id_type()(db); + update_feed_producers( pmark, { judge_id }); + price_feed feed; + feed.settlement_price = asset( 1, pmark.id ) / asset( 1 ); + publish_feed( pmark, judge, feed ); - int trial = 0; - const int64_t init_balance(1000000); + BOOST_TEST_MESSAGE( "Require throw for mismatch collateral amounts" ); + GRAPHENE_REQUIRE_THROW( borrow( dan, pmark.amount(1000), asset(2000) ), fc::exception ); - vector< const account_object* > actors{ &buyer, &seller, &borrower, &borrower2, &settler, &feeder }; + BOOST_TEST_MESSAGE( "Open position with equal collateral" ); + borrow( dan, pmark.amount(1000), asset(1000) ); - auto top_up = [&]() - { - for( const account_object* actor : actors ) - { - int64_t bal = get_balance( *actor, core ); - if( bal < init_balance ) - transfer( committee_account, actor->id, asset(init_balance - bal) ); - else if( bal > init_balance ) - transfer( actor->id, committee_account, asset(bal - init_balance) ); - } - }; + BOOST_TEST_MESSAGE( "Cover position with unequal asset should fail." ); + GRAPHENE_REQUIRE_THROW( cover( dan, pmark.amount(500), asset(1000) ), fc::exception ); - auto setup_asset = [&]() -> const asset_object& - { - const asset_object& bitusd = create_bitasset("USDBIT"+fc::to_string(trial)+"X", feeder_id); - update_feed_producers( bitusd, {feeder.id} ); - BOOST_CHECK( !bitusd.bitasset_data(db).has_settlement() ); - trial++; - return bitusd; - }; + BOOST_TEST_MESSAGE( "Cover half of position with equal ammounts" ); + cover( dan, pmark.amount(500), asset(500) ); - /* - * GRAPHENE_COLLATERAL_RATIO_DENOM - uint16_t maintenance_collateral_ratio = GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO; - uint16_t maximum_short_squeeze_ratio = GRAPHENE_DEFAULT_MAX_SHORT_SQUEEZE_RATIO; - */ + BOOST_TEST_MESSAGE( "Verify that forced settlment fails before global settlement" ); + GRAPHENE_REQUIRE_THROW( force_settle( dan, pmark.amount(100) ), fc::exception ); - // situations to test: - // 1. minus short squeeze protection would be black swan, otherwise no - // 2. issue 346 (price feed drops followed by force settle, drop should trigger BS) - // 3. feed price < D/C of least collateralized short < call price < highest bid + BOOST_TEST_MESSAGE( "Shouldn't be allowed to force settle at more than 1 collateral per debt" ); + GRAPHENE_REQUIRE_THROW( force_global_settle( pmark, pmark.amount(100) / core.amount(105) ), fc::exception ); - auto set_price = [&]( - const asset_object& bitusd, - const price& settlement_price - ) - { - price_feed feed; - feed.settlement_price = settlement_price; - feed.core_exchange_rate = settlement_price; - wdump( (feed.max_short_squeeze_price()) ); - publish_feed( bitusd, feeder, feed ); - }; + BOOST_TEST_MESSAGE( "Globally settling" ); + force_global_settle( pmark, pmark.amount(100) / core.amount(95) ); - auto wait_for_settlement = [&]() - { - const auto& idx = db.get_index_type().indices().get(); - const auto& itr = idx.rbegin(); - if( itr == idx.rend() ) - return; - generate_blocks( itr->settlement_date ); - BOOST_CHECK( !idx.empty() ); - generate_block(); - BOOST_CHECK( idx.empty() ); - }; + BOOST_TEST_MESSAGE( "Can not globally settle again" ); + GRAPHENE_REQUIRE_THROW( force_global_settle( pmark, pmark.amount(100) / core.amount(95) ), fc::exception ); - { - const asset_object& bitusd = setup_asset(); - top_up(); - set_price( bitusd, bitusd.amount(1) / core.amount(5) ); // $0.20 - borrow(borrower, bitusd.amount(100), asset(1000)); // 2x collat - transfer( borrower, settler, bitusd.amount(100) ); - - // drop to $0.02 and settle - BOOST_CHECK( !bitusd.bitasset_data(db).has_settlement() ); - set_price( bitusd, bitusd.amount(1) / core.amount(50) ); // $0.02 - BOOST_CHECK( bitusd.bitasset_data(db).has_settlement() ); - GRAPHENE_REQUIRE_THROW( borrow( borrower2, bitusd.amount(100), asset(10000) ), fc::exception ); - force_settle( settler, bitusd.amount(100) ); - - // wait for forced settlement to execute - // this would throw on Sep.18 testnet, see #346 - wait_for_settlement(); - } + BOOST_TEST_MESSAGE( "Verify that forced settlment succeedes after global settlement" ); + force_settle( dan, pmark.amount(100) ); - // issue 350 - { - // ok, new asset - const asset_object& bitusd = setup_asset(); - top_up(); - set_price( bitusd, bitusd.amount(40) / core.amount(1000) ); // $0.04 - borrow( borrower, bitusd.amount(100), asset(5000) ); // 2x collat - transfer( borrower, seller, bitusd.amount(100) ); - limit_order_id_type oid_019 = create_sell_order( seller, bitusd.amount(39), core.amount(2000) )->id; // this order is at $0.019, we should not be able to match against it - limit_order_id_type oid_020 = create_sell_order( seller, bitusd.amount(40), core.amount(2000) )->id; // this order is at $0.020, we should be able to match against it - set_price( bitusd, bitusd.amount(21) / core.amount(1000) ); // $0.021 - // - // We attempt to match against $0.019 order and black swan, - // and this is intended behavior. See discussion in ticket. - // - BOOST_CHECK( bitusd.bitasset_data(db).has_settlement() ); - BOOST_CHECK( db.find_object( oid_019 ) != nullptr ); - BOOST_CHECK( db.find_object( oid_020 ) == nullptr ); - } + // force settle the rest + force_settle( dan, pmark.amount(400) ); + BOOST_CHECK_EQUAL( 0, pmark_dd_id(db).current_supply.value ); + generate_block(~database::skip_transaction_dupe_check); + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + generate_block(); } catch( const fc::exception& e) { edump((e.to_detail_string())); throw; } } -BOOST_AUTO_TEST_CASE( prediction_market ) +BOOST_AUTO_TEST_CASE( prediction_market_resolves_to_0 ) { try { ACTORS((judge)(dan)(nathan)); const auto& pmark = create_prediction_market("PMARK", judge_id); + const auto pmark_dd_id = pmark.dynamic_asset_data_id; const auto& core = asset_id_type()(db); int64_t init_balance(1000000); @@ -408,39 +835,36 @@ BOOST_AUTO_TEST_CASE( prediction_market ) transfer(committee_account, dan_id, asset(init_balance)); transfer(committee_account, nathan_id, asset(init_balance)); - BOOST_TEST_MESSAGE( "Require throw for mismatch collateral amounts" ); - GRAPHENE_REQUIRE_THROW( borrow( dan, pmark.amount(1000), asset(2000) ), fc::exception ); + update_feed_producers( pmark, { judge_id }); + price_feed feed; + feed.settlement_price = asset( 1, pmark.id ) / asset( 1 ); + publish_feed( pmark, judge, feed ); - BOOST_TEST_MESSAGE( "Open position with equal collateral" ); borrow( dan, pmark.amount(1000), asset(1000) ); - - BOOST_TEST_MESSAGE( "Cover position with unequal asset should fail." ); - GRAPHENE_REQUIRE_THROW( cover( dan, pmark.amount(500), asset(1000) ), fc::exception ); - - BOOST_TEST_MESSAGE( "Cover half of position with equal ammounts" ); - cover( dan, pmark.amount(500), asset(500) ); - - BOOST_TEST_MESSAGE( "Verify that forced settlment fails before global settlement" ); - GRAPHENE_REQUIRE_THROW( force_settle( dan, pmark.amount(100) ), fc::exception ); - - BOOST_TEST_MESSAGE( "Shouldn't be allowed to force settle at more than 1 collateral per debt" ); - GRAPHENE_REQUIRE_THROW( force_global_settle( pmark, pmark.amount(100) / core.amount(105) ), fc::exception ); - - force_global_settle( pmark, pmark.amount(100) / core.amount(95) ); + // force settle with 0 outcome + force_global_settle( pmark, pmark.amount(100) / core.amount(0) ); BOOST_TEST_MESSAGE( "Verify that forced settlment succeedes after global settlement" ); force_settle( dan, pmark.amount(100) ); - } catch( const fc::exception& e) { + // force settle the rest + force_settle( dan, pmark.amount(900) ); + BOOST_CHECK_EQUAL( 0, pmark_dd_id(db).current_supply.value ); + + generate_block(~database::skip_transaction_dupe_check); + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + generate_block(); +} catch( const fc::exception& e) { edump((e.to_detail_string())); throw; } } - BOOST_AUTO_TEST_CASE( create_account_test ) { try { + generate_blocks( HARDFORK_CORE_143_TIME ); + set_expiration( db, trx ); trx.operations.push_back(make_account()); account_create_operation op = trx.operations.back().get(); @@ -458,6 +882,23 @@ BOOST_AUTO_TEST_CASE( create_account_test ) REQUIRE_THROW_WITH_VALUE(op, name, ".aaaa"); REQUIRE_THROW_WITH_VALUE(op, options.voting_account, account_id_type(999999999)); + // Not allow voting for non-exist entities. + auto save_num_committee = op.options.num_committee; + auto save_num_witness = op.options.num_witness; + op.options.num_committee = 1; + op.options.num_witness = 0; + REQUIRE_THROW_WITH_VALUE(op, options.votes, boost::assign::list_of(vote_id_type("0:1")).convert_to_container>()); + op.options.num_witness = 1; + op.options.num_committee = 0; + REQUIRE_THROW_WITH_VALUE(op, options.votes, boost::assign::list_of(vote_id_type("1:19")).convert_to_container>()); + op.options.num_witness = 0; + REQUIRE_THROW_WITH_VALUE(op, options.votes, boost::assign::list_of(vote_id_type("2:19")).convert_to_container>()); + REQUIRE_THROW_WITH_VALUE(op, options.votes, boost::assign::list_of(vote_id_type("3:99")).convert_to_container>()); + GRAPHENE_REQUIRE_THROW( vote_id_type("2:a"), fc::exception ); + GRAPHENE_REQUIRE_THROW( vote_id_type(""), fc::exception ); + op.options.num_committee = save_num_committee; + op.options.num_witness = save_num_witness; + auto auth_bak = op.owner; op.owner.add_authority(account_id_type(9999999999), 10); trx.operations.back() = op; @@ -601,7 +1042,7 @@ BOOST_AUTO_TEST_CASE( create_committee_member ) REQUIRE_THROW_WITH_VALUE(op, fee, asset(-600)); trx.operations.back() = op; - committee_member_id_type committee_member_id = db.get_index_type>>().get_next_id(); + committee_member_id_type committee_member_id = db.get_index_type().get_next_id(); PUSH_TX( db, trx, ~0 ); const committee_member_object& d = committee_member_id(db); @@ -689,18 +1130,18 @@ BOOST_AUTO_TEST_CASE( create_uia ) asset_create_operation creator; creator.issuer = account_id_type(); creator.fee = asset(); - creator.symbol = "TEST"; + creator.symbol = UIA_TEST_SYMBOL; creator.common_options.max_supply = 100000000; creator.precision = 2; creator.common_options.market_fee_percent = GRAPHENE_MAX_MARKET_FEE_PERCENT/100; /*1%*/ creator.common_options.issuer_permissions = UIA_ASSET_ISSUER_PERMISSION_MASK; creator.common_options.flags = charge_market_fee; - creator.common_options.core_exchange_rate = price({asset(2),asset(1,asset_id_type(1))}); + creator.common_options.core_exchange_rate = price(asset(2),asset(1,asset_id_type(1))); trx.operations.push_back(std::move(creator)); PUSH_TX( db, trx, ~0 ); const asset_object& test_asset = test_asset_id(db); - BOOST_CHECK(test_asset.symbol == "TEST"); + BOOST_CHECK(test_asset.symbol == UIA_TEST_SYMBOL); BOOST_CHECK(asset(1, test_asset_id) * test_asset.options.core_exchange_rate == asset(2)); BOOST_CHECK((test_asset.options.flags & white_list) == 0); BOOST_CHECK(test_asset.options.max_supply == 100000000); @@ -725,8 +1166,8 @@ BOOST_AUTO_TEST_CASE( create_uia ) REQUIRE_THROW_WITH_VALUE(op, symbol, "AAA."); REQUIRE_THROW_WITH_VALUE(op, symbol, "AB CD"); REQUIRE_THROW_WITH_VALUE(op, symbol, "ABCDEFGHIJKLMNOPQRSTUVWXYZ"); - REQUIRE_THROW_WITH_VALUE(op, common_options.core_exchange_rate, price({asset(-100), asset(1)})); - REQUIRE_THROW_WITH_VALUE(op, common_options.core_exchange_rate, price({asset(100),asset(-1)})); + REQUIRE_THROW_WITH_VALUE(op, common_options.core_exchange_rate, price(asset(-100), asset(1))); + REQUIRE_THROW_WITH_VALUE(op, common_options.core_exchange_rate, price(asset(100),asset(-1))); } catch(fc::exception& e) { edump((e.to_detail_string())); throw; @@ -738,7 +1179,7 @@ BOOST_AUTO_TEST_CASE( update_uia ) using namespace graphene; try { INVOKE(create_uia); - const auto& test = get_asset("TEST"); + const auto& test = get_asset(UIA_TEST_SYMBOL); const auto& nathan = create_account("nathan"); asset_update_operation op; @@ -809,13 +1250,149 @@ BOOST_AUTO_TEST_CASE( update_uia ) } } +BOOST_AUTO_TEST_CASE( update_uia_issuer ) +{ + using namespace graphene; + using namespace graphene::chain; + using namespace graphene::chain::test; + try { + + // Lambda for creating accounts with 2 different keys + auto create_account_2_keys = [&]( const string name, + fc::ecc::private_key active, + fc::ecc::private_key owner ) { + + trx.operations.push_back(make_account()); + account_create_operation op = trx.operations.back().get(); + op.name = name; + op.active = authority(1, public_key_type(active.get_public_key()), 1); + op.owner = authority(1, public_key_type(owner.get_public_key()), 1); + signed_transaction trx; + trx.operations.push_back(op); + db.current_fee_schedule().set_fee( trx.operations.back() ); + set_expiration( db, trx ); + PUSH_TX( db, trx, ~0 ); + + return get_account(name); + }; + + auto update_asset_issuer = [&](const asset_object& current, + const account_object& new_issuer) { + asset_update_operation op; + op.issuer = current.issuer; + op.asset_to_update = current.id; + op.new_options = current.options; + op.new_issuer = new_issuer.id; + signed_transaction tx; + tx.operations.push_back( op ); + db.current_fee_schedule().set_fee( tx.operations.back() ); + set_expiration( db, tx ); + PUSH_TX( db, tx, ~0 ); + }; + + // Lambda for updating the issuer on chain using a particular key + auto update_issuer = [&](const asset_id_type asset_id, + const account_object& issuer, + const account_object& new_issuer, + const fc::ecc::private_key& key) + { + asset_update_issuer_operation op; + op.issuer = issuer.id; + op.new_issuer = new_issuer.id; + op.asset_to_update = asset_id; + signed_transaction tx; + tx.operations.push_back( op ); + db.current_fee_schedule().set_fee( tx.operations.back() ); + set_expiration( db, tx ); + sign(tx, key); + PUSH_TX( db, tx, database::skip_transaction_dupe_check ); + }; + + auto update_issuer_proposal = [&](const asset_id_type asset_id, + const account_object& issuer, + const account_object& new_issuer, + const fc::ecc::private_key& key) + { + asset_update_issuer_operation op; + op.issuer = issuer.id; + op.new_issuer = new_issuer.id; + op.asset_to_update = asset_id; + + const auto& curfees = *db.get_global_properties().parameters.current_fees; + const auto& proposal_create_fees = curfees.get(); + proposal_create_operation prop; + prop.fee_paying_account = issuer.id; + prop.proposed_ops.emplace_back( op ); + prop.expiration_time = db.head_block_time() + fc::days(1); + prop.fee = asset( proposal_create_fees.fee + proposal_create_fees.price_per_kbyte ); + + signed_transaction tx; + tx.operations.push_back( prop ); + db.current_fee_schedule().set_fee( tx.operations.back() ); + set_expiration( db, tx ); + sign( tx, key ); + PUSH_TX( db, tx ); + + }; + + // Create alice account + fc::ecc::private_key alice_owner = fc::ecc::private_key::regenerate(fc::digest("key1")); + fc::ecc::private_key alice_active = fc::ecc::private_key::regenerate(fc::digest("key2")); + fc::ecc::private_key bob_owner = fc::ecc::private_key::regenerate(fc::digest("key3")); + fc::ecc::private_key bob_active = fc::ecc::private_key::regenerate(fc::digest("key4")); + + // Create accounts + const auto& alice = create_account_2_keys("alice", alice_active, alice_owner); + const auto& bob = create_account_2_keys("bob", bob_active, bob_owner); + const account_id_type alice_id = alice.id; + const account_id_type bob_id = bob.id; + + // Create asset + const auto& test = create_user_issued_asset("UPDATEISSUER", alice_id(db), 0); + const asset_id_type test_id = test.id; + + BOOST_TEST_MESSAGE( "can't use this operation before the hardfork" ); + GRAPHENE_REQUIRE_THROW( update_issuer( test_id, alice_id(db), bob_id(db), alice_owner), fc::exception ); + + BOOST_TEST_MESSAGE( "can't use this operation before the hardfork (even if wrapped into a proposal)" ); + GRAPHENE_REQUIRE_THROW( update_issuer_proposal( test_id, alice_id(db), bob_id(db), alice_owner), fc::exception ); + + // Fast Forward to Hardfork time + generate_blocks( HARDFORK_CORE_199_TIME ); + + BOOST_TEST_MESSAGE( "After hardfork time, proposal goes through (but doesn't execute yet)" ); + update_issuer_proposal( test_id, alice_id(db), bob_id(db), alice_owner); + + BOOST_TEST_MESSAGE( "Can't change issuer if not my asset" ); + GRAPHENE_REQUIRE_THROW( update_issuer( test_id, bob_id(db), alice_id(db), bob_active ), fc::exception ); + GRAPHENE_REQUIRE_THROW( update_issuer( test_id, bob_id(db), alice_id(db), bob_owner ), fc::exception ); + + BOOST_TEST_MESSAGE( "Can't change issuer with alice's active key" ); + GRAPHENE_REQUIRE_THROW( update_issuer( test_id, alice_id(db), bob_id(db), alice_active ), fc::exception ); + + BOOST_TEST_MESSAGE( "Old method with asset_update needs to fail" ); + GRAPHENE_REQUIRE_THROW( update_asset_issuer( test_id(db), bob_id(db) ), fc::exception ); + + BOOST_TEST_MESSAGE( "Updating issuer to bob" ); + update_issuer( test_id, alice_id(db), bob_id(db), alice_owner ); + + BOOST_CHECK(test_id(db).issuer == bob_id); + + } + catch( const fc::exception& e ) + { + edump((e.to_detail_string())); + throw; + } +} + BOOST_AUTO_TEST_CASE( issue_uia ) { try { INVOKE(create_uia); INVOKE(create_account_test); - const asset_object& test_asset = *db.get_index_type().indices().get().find("TEST"); + const asset_object& test_asset = *db.get_index_type().indices().get().find(UIA_TEST_SYMBOL); const account_object& nathan_account = *db.get_index_type().indices().get().find("nathan"); asset_issue_operation op; @@ -854,7 +1431,7 @@ BOOST_AUTO_TEST_CASE( transfer_uia ) try { INVOKE(issue_uia); - const asset_object& uia = *db.get_index_type().indices().get().find("TEST"); + const asset_object& uia = *db.get_index_type().indices().get().find(UIA_TEST_SYMBOL); const account_object& nathan = *db.get_index_type().indices().get().find("nathan"); const account_object& committee = account_id_type()(db); @@ -882,7 +1459,7 @@ BOOST_AUTO_TEST_CASE( transfer_uia ) BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new ) { try { INVOKE( issue_uia ); - const asset_object& core_asset = get_asset( "TEST" ); + const asset_object& core_asset = get_asset( UIA_TEST_SYMBOL ); const asset_object& test_asset = get_asset( GRAPHENE_SYMBOL ); const account_object& nathan_account = get_account( "nathan" ); const account_object& buyer_account = create_account( "buyer" ); @@ -922,7 +1499,7 @@ BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new ) BOOST_AUTO_TEST_CASE( create_buy_exact_match_uia ) { try { INVOKE( issue_uia ); - const asset_object& test_asset = get_asset( "TEST" ); + const asset_object& test_asset = get_asset( UIA_TEST_SYMBOL ); const asset_object& core_asset = get_asset( GRAPHENE_SYMBOL ); const account_object& nathan_account = get_account( "nathan" ); const account_object& buyer_account = create_account( "buyer" ); @@ -963,7 +1540,7 @@ BOOST_AUTO_TEST_CASE( create_buy_exact_match_uia ) BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new_reverse ) { try { INVOKE( issue_uia ); - const asset_object& test_asset = get_asset( "TEST" ); + const asset_object& test_asset = get_asset( UIA_TEST_SYMBOL ); const asset_object& core_asset = get_asset( GRAPHENE_SYMBOL ); const account_object& nathan_account = get_account( "nathan" ); const account_object& buyer_account = create_account( "buyer" ); @@ -1003,7 +1580,7 @@ BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new_reverse ) BOOST_AUTO_TEST_CASE( create_buy_uia_multiple_match_new_reverse_fract ) { try { INVOKE( issue_uia ); - const asset_object& test_asset = get_asset( "TEST" ); + const asset_object& test_asset = get_asset( UIA_TEST_SYMBOL ); const asset_object& core_asset = get_asset( GRAPHENE_SYMBOL ); const account_object& nathan_account = get_account( "nathan" ); const account_object& buyer_account = create_account( "buyer" ); @@ -1051,7 +1628,7 @@ BOOST_AUTO_TEST_CASE( uia_fees ) enable_fees(); - const asset_object& test_asset = get_asset("TEST"); + const asset_object& test_asset = get_asset(UIA_TEST_SYMBOL); const asset_dynamic_data_object& asset_dynamic = test_asset.dynamic_asset_data_id(db); const account_object& nathan_account = get_account("nathan"); const account_object& committee_account = account_id_type()(db); @@ -1114,7 +1691,7 @@ BOOST_AUTO_TEST_CASE( uia_fees ) BOOST_AUTO_TEST_CASE( cancel_limit_order_test ) { try { INVOKE( issue_uia ); - const asset_object& test_asset = get_asset( "TEST" ); + const asset_object& test_asset = get_asset( UIA_TEST_SYMBOL ); const account_object& buyer_account = create_account( "buyer" ); transfer( committee_account(db), buyer_account, asset( 10000 ) ); @@ -1155,7 +1732,7 @@ BOOST_AUTO_TEST_CASE( witness_feeds ) vector active_witnesses; for( const witness_id_type& wit_id : global_props.active_witnesses ) active_witnesses.push_back( wit_id(db).witness_account ); - BOOST_REQUIRE_EQUAL(active_witnesses.size(), 10); + BOOST_REQUIRE_EQUAL(active_witnesses.size(), 10u); asset_publish_feed_operation op; op.publisher = active_witnesses[0]; @@ -1192,41 +1769,6 @@ BOOST_AUTO_TEST_CASE( witness_feeds ) } } - -/** - * Create an order such that when the trade executes at the - * requested price the resulting payout to one party is 0 - * - * I am unable to actually create such an order; I'm not sure it's possible. What I have done is create an order which - * broke an assert in the matching algorithm. - */ -BOOST_AUTO_TEST_CASE( trade_amount_equals_zero ) -{ - try { - INVOKE(issue_uia); - const asset_object& test = get_asset( "TEST" ); - const asset_object& core = get_asset( GRAPHENE_SYMBOL ); - const account_object& core_seller = create_account( "shorter1" ); - const account_object& core_buyer = get_account("nathan"); - - transfer( committee_account(db), core_seller, asset( 100000000 ) ); - - BOOST_CHECK_EQUAL(get_balance(core_buyer, core), 0); - BOOST_CHECK_EQUAL(get_balance(core_buyer, test), 10000000); - BOOST_CHECK_EQUAL(get_balance(core_seller, test), 0); - BOOST_CHECK_EQUAL(get_balance(core_seller, core), 100000000); - - //ilog( "=================================== START===================================\n\n"); - create_sell_order(core_seller, core.amount(1), test.amount(900000)); - //ilog( "=================================== STEP===================================\n\n"); - create_sell_order(core_buyer, test.amount(900001), core.amount(1)); - } catch( const fc::exception& e) { - edump((e.to_detail_string())); - throw; - } -} - - /** * Create an order that cannot be filled immediately and have the * transaction fail. @@ -1235,7 +1777,7 @@ BOOST_AUTO_TEST_CASE( limit_order_fill_or_kill ) { try { INVOKE(issue_uia); const account_object& nathan = get_account("nathan"); - const asset_object& test = get_asset("TEST"); + const asset_object& test = get_asset(UIA_TEST_SYMBOL); const asset_object& core = asset_id_type()(db); limit_order_create_operation op; @@ -1287,7 +1829,7 @@ BOOST_AUTO_TEST_CASE( witness_pay_test ) const asset_object* core = &asset_id_type()(db); const account_object* nathan = &get_account("nathan"); enable_fees(); - BOOST_CHECK_GT(db.current_fee_schedule().get().membership_lifetime_fee, 0); + BOOST_CHECK_GT(db.current_fee_schedule().get().membership_lifetime_fee, 0u); // Based on the size of the reserve fund later in the test, the witness budget will be set to this value const uint64_t ref_budget = ((uint64_t( db.current_fee_schedule().get().membership_lifetime_fee ) @@ -1297,10 +1839,10 @@ BOOST_AUTO_TEST_CASE( witness_pay_test ) ) >> GRAPHENE_CORE_ASSET_CYCLE_RATE_BITS ; // change this if ref_budget changes - BOOST_CHECK_EQUAL( ref_budget, 594 ); + BOOST_CHECK_EQUAL( ref_budget, 594u ); const uint64_t witness_ppb = ref_budget * 10 / 23 + 1; // change this if ref_budget changes - BOOST_CHECK_EQUAL( witness_ppb, 259 ); + BOOST_CHECK_EQUAL( witness_ppb, 259u ); // following two inequalities need to hold for maximal code coverage BOOST_CHECK_LT( witness_ppb * 2, ref_budget ); BOOST_CHECK_GT( witness_ppb * 3, ref_budget ); @@ -1346,28 +1888,28 @@ BOOST_AUTO_TEST_CASE( witness_pay_test ) generate_block(); BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, 0 ); } - BOOST_CHECK_EQUAL( db.head_block_time().sec_since_epoch() - pay_fee_time, 24 * block_interval ); + BOOST_CHECK_EQUAL( db.head_block_time().sec_since_epoch() - pay_fee_time, 24u * block_interval ); schedule_maint(); // The 80% lifetime referral fee went to the committee account, which burned it. Check that it's here. BOOST_CHECK( core->reserved(db).value == 8000*prec ); generate_block(); BOOST_CHECK_EQUAL( core->reserved(db).value, 999999406 ); - BOOST_CHECK_EQUAL( db.get_dynamic_global_properties().witness_budget.value, ref_budget ); + BOOST_CHECK_EQUAL( db.get_dynamic_global_properties().witness_budget.value, (int64_t)ref_budget ); // first witness paid from old budget (so no pay) BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, 0 ); // second witness finally gets paid! generate_block(); - BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, witness_ppb ); - BOOST_CHECK_EQUAL( db.get_dynamic_global_properties().witness_budget.value, ref_budget - witness_ppb ); + BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, (int64_t)witness_ppb ); + BOOST_CHECK_EQUAL( db.get_dynamic_global_properties().witness_budget.value, (int64_t)(ref_budget - witness_ppb) ); generate_block(); - BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, witness_ppb ); - BOOST_CHECK_EQUAL( db.get_dynamic_global_properties().witness_budget.value, ref_budget - 2 * witness_ppb ); + BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, (int64_t)witness_ppb ); + BOOST_CHECK_EQUAL( db.get_dynamic_global_properties().witness_budget.value, (int64_t)(ref_budget - 2 * witness_ppb) ); generate_block(); - BOOST_CHECK_LT( last_witness_vbo_balance().value, witness_ppb ); - BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, ref_budget - 2 * witness_ppb ); + BOOST_CHECK_LT( last_witness_vbo_balance().value, (int64_t)witness_ppb ); + BOOST_CHECK_EQUAL( last_witness_vbo_balance().value, (int64_t)(ref_budget - 2 * witness_ppb) ); BOOST_CHECK_EQUAL( db.get_dynamic_global_properties().witness_budget.value, 0 ); generate_block(); @@ -1387,7 +1929,7 @@ BOOST_AUTO_TEST_CASE( reserve_asset_test ) { ACTORS((alice)(bob)(sam)(judge)); const auto& basset = create_bitasset("USDBIT", judge_id); - const auto& uasset = create_user_issued_asset("TEST"); + const auto& uasset = create_user_issued_asset(UIA_TEST_SYMBOL); const auto& passet = create_prediction_market("PMARK", judge_id); const auto& casset = asset_id_type()(db); @@ -1399,7 +1941,7 @@ BOOST_AUTO_TEST_CASE( reserve_asset_test ) transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); - db.push_transaction( tx, database::skip_authority_check | database::skip_tapos_check | database::skip_transaction_signatures ); + PUSH_TX( db, tx, database::skip_tapos_check | database::skip_transaction_signatures ); } ; auto _issue_uia = [&]( const account_object& recipient, asset amount ) @@ -1411,7 +1953,7 @@ BOOST_AUTO_TEST_CASE( reserve_asset_test ) transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); - db.push_transaction( tx, database::skip_authority_check | database::skip_tapos_check | database::skip_transaction_signatures ); + PUSH_TX( db, tx, database::skip_tapos_check | database::skip_transaction_signatures ); } ; int64_t init_balance = 10000; @@ -1432,6 +1974,7 @@ BOOST_AUTO_TEST_CASE( reserve_asset_test ) update_feed_producers( basset, {sam.id} ); price_feed current_feed; current_feed.settlement_price = basset.amount( 2 ) / casset.amount(100); + current_feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default publish_feed( basset, sam, current_feed ); borrow( alice_id, basset.amount( init_balance ), casset.amount( 100*init_balance ) ); BOOST_CHECK_EQUAL( get_balance( alice, basset ), init_balance ); @@ -1501,7 +2044,7 @@ BOOST_AUTO_TEST_CASE( cover_with_collateral_test ) transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); - db.push_transaction( tx, database::skip_authority_check | database::skip_tapos_check | database::skip_transaction_signatures ); + PUSH_TX( db, tx, database::skip_tapos_check | database::skip_transaction_signatures ); } ; // margin call requirement: 1.75x @@ -1550,7 +2093,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_create_test ) INVOKE( create_uia ); const asset_object& core = asset_id_type()(db); - const asset_object& test_asset = get_asset("TEST"); + const asset_object& test_asset = get_asset(UIA_TEST_SYMBOL); vesting_balance_create_operation op; op.fee = core.amount( 0 ); @@ -1601,7 +2144,7 @@ BOOST_AUTO_TEST_CASE( vesting_balance_withdraw_test ) generate_block(); const asset_object& core = asset_id_type()(db); - const asset_object& test_asset = get_asset( "TEST" ); + const asset_object& test_asset = get_asset( UIA_TEST_SYMBOL ); vesting_balance_withdraw_operation op; op.fee = core.amount( 0 ); diff --git a/tests/tests/operation_tests2.cpp b/tests/tests/operation_tests2.cpp index 8cefec4e24..a5c7b839c9 100644 --- a/tests/tests/operation_tests2.cpp +++ b/tests/tests/operation_tests2.cpp @@ -25,20 +25,18 @@ #include #include -#include #include -#include -#include #include #include #include #include -#include #include #include #include +#include + #include #include @@ -50,7 +48,67 @@ using namespace graphene::chain::test; BOOST_FIXTURE_TEST_SUITE( operation_tests, database_fixture ) -BOOST_AUTO_TEST_CASE( withdraw_permission_create ) +/*** + * A descriptor of a particular withdrawal period + */ +struct withdrawal_period_descriptor { + withdrawal_period_descriptor(const time_point_sec start, const time_point_sec end, const asset available, const asset claimed) + : period_start_time(start), period_end_time(end), available_this_period(available), claimed_this_period(claimed) {} + + // Start of period + time_point_sec period_start_time; + + // End of period + time_point_sec period_end_time; + + // Quantify how much is still available to be withdrawn during this period + asset available_this_period; + + // Quantify how much has already been claimed during this period + asset claimed_this_period; + + string const to_string() const { + string asset_id = fc::to_string(available_this_period.asset_id.space_id) + + "." + fc::to_string(available_this_period.asset_id.type_id) + + "." + fc::to_string(available_this_period.asset_id.instance.value); + string text = fc::to_string(available_this_period.amount.value) + + " " + asset_id + + " is available from " + period_start_time.to_iso_string() + + " to " + period_end_time.to_iso_string(); + return text; + } +}; + + +/*** + * Get a description of the current withdrawal period + * @param current_time Current time + * @return A description of the current period + */ +withdrawal_period_descriptor current_period(const withdraw_permission_object& permit, fc::time_point_sec current_time) { + // @todo [6] Is there a potential race condition where a call to available_this_period might become out of sync with this function's later use of period start time? + asset available = permit.available_this_period(current_time); + asset claimed = asset(permit.withdrawal_limit.amount - available.amount, permit.withdrawal_limit.asset_id); + auto periods = (current_time - permit.period_start_time).to_seconds() / permit.withdrawal_period_sec; + time_point_sec current_period_start = permit.period_start_time + (periods * permit.withdrawal_period_sec); + time_point_sec current_period_end = current_period_start + permit.withdrawal_period_sec; + withdrawal_period_descriptor descriptor = withdrawal_period_descriptor(current_period_start, current_period_end, available, claimed); + + return descriptor; +} + +/** + * This auxiliary test is used for two purposes: + * (a) it checks the creation of withdrawal claims, + * (b) it is used as a precursor for tests that evaluate withdrawal claims. + * + * NOTE: This test verifies proper withdrawal claim behavior + * as it occurred before (for backward compatibility) + * Issue #23 was addressed. + * That issue is concerned with ensuring that the first claim + * can occur before the first withdrawal period. + */ +BOOST_AUTO_TEST_CASE( withdraw_permission_create_before_hardfork_23 ) { try { auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); @@ -68,7 +126,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_create ) op.withdrawal_limit = asset(5); op.withdrawal_period_sec = fc::hours(1).to_seconds(); op.periods_until_expiration = 5; - op.period_start_time = db.head_block_time() + db.get_global_properties().parameters.block_interval*5; + op.period_start_time = db.head_block_time() + db.get_global_properties().parameters.block_interval*5; // 5 blocks after current blockchain time trx.operations.push_back(op); REQUIRE_OP_VALIDATION_FAILURE(op, withdrawal_limit, asset()); REQUIRE_OP_VALIDATION_FAILURE(op, periods_until_expiration, 0); @@ -81,13 +139,232 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_create ) trx.operations.back() = op; } sign( trx, nathan_private_key ); - db.push_transaction( trx ); + PUSH_TX( db, trx ); trx.clear(); } FC_LOG_AND_RETHROW() } -BOOST_AUTO_TEST_CASE( withdraw_permission_test ) +/** + * This auxiliary test is used for two purposes: + * (a) it checks the creation of withdrawal claims, + * (b) it is used as a precursor for tests that evaluate withdrawal claims. + * + * NOTE: This test verifies proper withdrawal claim behavior + * as it should occur after Issue #23 is addressed. + * That issue is concerned with ensuring that the first claim + * can occur before the first withdrawal period. + */ +BOOST_AUTO_TEST_CASE( withdraw_permission_create_after_hardfork_23 ) { try { - INVOKE(withdraw_permission_create); + auto nathan_private_key = generate_private_key("nathan"); + auto dan_private_key = generate_private_key("dan"); + account_id_type nathan_id = create_account("nathan", nathan_private_key.get_public_key()).id; + account_id_type dan_id = create_account("dan", dan_private_key.get_public_key()).id; + + transfer(account_id_type(), nathan_id, asset(1000)); + generate_block(); + set_expiration( db, trx ); + + { + withdraw_permission_create_operation op; + op.authorized_account = dan_id; + op.withdraw_from_account = nathan_id; + op.withdrawal_limit = asset(5); + op.withdrawal_period_sec = fc::hours(1).to_seconds(); + op.periods_until_expiration = 5; + op.period_start_time = HARDFORK_23_TIME + db.get_global_properties().parameters.block_interval*5; // 5 blocks after fork time + trx.operations.push_back(op); + REQUIRE_OP_VALIDATION_FAILURE(op, withdrawal_limit, asset()); + REQUIRE_OP_VALIDATION_FAILURE(op, periods_until_expiration, 0); + REQUIRE_OP_VALIDATION_FAILURE(op, withdraw_from_account, dan_id); + REQUIRE_OP_VALIDATION_FAILURE(op, withdrawal_period_sec, 0); + REQUIRE_THROW_WITH_VALUE(op, withdrawal_limit, asset(10, asset_id_type(10))); + REQUIRE_THROW_WITH_VALUE(op, authorized_account, account_id_type(1000)); + REQUIRE_THROW_WITH_VALUE(op, period_start_time, fc::time_point_sec(10000)); + REQUIRE_THROW_WITH_VALUE(op, withdrawal_period_sec, 1); + trx.operations.back() = op; + } + sign( trx, nathan_private_key ); + PUSH_TX( db, trx ); + trx.clear(); +} FC_LOG_AND_RETHROW() } + +/** + * Test the claims of withdrawals both before and during + * authorized withdrawal periods. + * NOTE: The simulated elapse of blockchain time through the use of + * generate_blocks() must be carefully used in order to simulate + * this test. + * NOTE: This test verifies proper withdrawal claim behavior + * as it occurred before (for backward compatibility) + * Issue #23 was addressed. + * That issue is concerned with ensuring that the first claim + * can occur before the first withdrawal period. + */ +BOOST_AUTO_TEST_CASE( withdraw_permission_test_before_hardfork_23 ) +{ try { + INVOKE(withdraw_permission_create_before_hardfork_23); + + auto nathan_private_key = generate_private_key("nathan"); + auto dan_private_key = generate_private_key("dan"); + account_id_type nathan_id = get_account("nathan").id; + account_id_type dan_id = get_account("dan").id; + withdraw_permission_id_type permit; + set_expiration( db, trx ); + + fc::time_point_sec first_start_time; + { + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(permit_object.authorized_account == dan_id); + BOOST_CHECK(permit_object.withdraw_from_account == nathan_id); + BOOST_CHECK(permit_object.period_start_time > db.head_block_time()); + first_start_time = permit_object.period_start_time; + BOOST_CHECK(permit_object.withdrawal_limit == asset(5)); + BOOST_CHECK(permit_object.withdrawal_period_sec == fc::hours(1).to_seconds()); + BOOST_CHECK(permit_object.expiration == first_start_time + permit_object.withdrawal_period_sec*5 ); + } + + generate_blocks(2); // Still before the first period, but BEFORE the real time during which "early" claims are checked + + { + withdraw_permission_claim_operation op; + op.withdraw_permission = permit; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(1); + set_expiration( db, trx ); + + trx.operations.push_back(op); + sign( trx, dan_private_key ); // Transaction should be signed to be valid + + // This operation/transaction will be pushed early/before the first + // withdrawal period + // However, this will not cause an exception prior to HARDFORK_23_TIME + // because withdrawaing before that the first period was acceptable + // before the fix + PUSH_TX( db, trx ); // <-- Claim #1 + + + //Get to the actual withdrawal period + bool miss_intermediate_blocks = false; // Required to have generate_blocks() elapse flush to the time of interest + generate_blocks(first_start_time, miss_intermediate_blocks); + set_expiration( db, trx ); + + REQUIRE_THROW_WITH_VALUE(op, withdraw_permission, withdraw_permission_id_type(5)); + REQUIRE_THROW_WITH_VALUE(op, withdraw_from_account, dan_id); + REQUIRE_THROW_WITH_VALUE(op, withdraw_from_account, account_id_type()); + REQUIRE_THROW_WITH_VALUE(op, withdraw_to_account, nathan_id); + REQUIRE_THROW_WITH_VALUE(op, withdraw_to_account, account_id_type()); + REQUIRE_THROW_WITH_VALUE(op, amount_to_withdraw, asset(10)); + REQUIRE_THROW_WITH_VALUE(op, amount_to_withdraw, asset(6)); + set_expiration( db, trx ); + trx.clear(); + trx.operations.push_back(op); + sign( trx, dan_private_key ); + PUSH_TX( db, trx ); // <-- Claim #2 + + // would be legal on its own, but doesn't work because trx already withdrew + REQUIRE_THROW_WITH_VALUE(op, amount_to_withdraw, asset(5)); + + // Make sure we can withdraw again this period, as long as we're not exceeding the periodic limit + trx.clear(); + // withdraw 1 + trx.operations = {op}; + // make it different from previous trx so it's non-duplicate + trx.expiration += fc::seconds(1); + sign( trx, dan_private_key ); + PUSH_TX( db, trx ); // <-- Claim #3 + trx.clear(); + } + + // Account for three (3) claims of one (1) unit + BOOST_CHECK_EQUAL(get_balance(nathan_id, asset_id_type()), 997); + BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 3); + + { + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(permit_object.authorized_account == dan_id); + BOOST_CHECK(permit_object.withdraw_from_account == nathan_id); + BOOST_CHECK(permit_object.period_start_time == first_start_time); + BOOST_CHECK(permit_object.withdrawal_limit == asset(5)); + BOOST_CHECK(permit_object.withdrawal_period_sec == fc::hours(1).to_seconds()); + BOOST_CHECK_EQUAL(permit_object.claimed_this_period.value, 3 ); // <-- Account for three (3) claims of one (1) unit + BOOST_CHECK(permit_object.expiration == first_start_time + 5*permit_object.withdrawal_period_sec); + generate_blocks(first_start_time + permit_object.withdrawal_period_sec); + // lazy update: verify period_start_time isn't updated until new trx occurs + BOOST_CHECK(permit_object.period_start_time == first_start_time); + } + + { + // Leave Nathan with one unit + transfer(nathan_id, dan_id, asset(996)); + + // Attempt a withdrawal claim for units than available + withdraw_permission_claim_operation op; + op.withdraw_permission = permit; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(5); + trx.operations.push_back(op); + set_expiration( db, trx ); + sign( trx, dan_private_key ); + //Throws because nathan doesn't have the money + GRAPHENE_CHECK_THROW(PUSH_TX( db, trx ), fc::exception); + + // Attempt a withdrawal claim for which nathan does have sufficient units + op.amount_to_withdraw = asset(1); + trx.clear(); + trx.operations = {op}; + set_expiration( db, trx ); + sign( trx, dan_private_key ); + PUSH_TX( db, trx ); + } + + BOOST_CHECK_EQUAL(get_balance(nathan_id, asset_id_type()), 0); + BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 1000); + trx.clear(); + transfer(dan_id, nathan_id, asset(1000)); + + { + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(permit_object.authorized_account == dan_id); + BOOST_CHECK(permit_object.withdraw_from_account == nathan_id); + BOOST_CHECK(permit_object.period_start_time == first_start_time + permit_object.withdrawal_period_sec); + BOOST_CHECK(permit_object.expiration == first_start_time + 5*permit_object.withdrawal_period_sec); + BOOST_CHECK(permit_object.withdrawal_limit == asset(5)); + BOOST_CHECK(permit_object.withdrawal_period_sec == fc::hours(1).to_seconds()); + generate_blocks(permit_object.expiration); + } + // Ensure the permit object has been garbage collected + BOOST_CHECK(db.find_object(permit) == nullptr); + + { + withdraw_permission_claim_operation op; + op.withdraw_permission = permit; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(5); + trx.operations.push_back(op); + set_expiration( db, trx ); + sign( trx, dan_private_key ); + //Throws because the permission has expired + GRAPHENE_CHECK_THROW(PUSH_TX( db, trx ), fc::exception); + } + } FC_LOG_AND_RETHROW() } + +/** + * Test the claims of withdrawals both before and during + * authorized withdrawal periods. + * NOTE: The simulated elapse of blockchain time through the use of + * generate_blocks() must be carefully used in order to simulate + * this test. + * NOTE: This test verifies proper withdrawal claim behavior + * as it should occur after Issue #23 is addressed. + * That issue is concerned with ensuring that the first claim + * can occur before the first withdrawal period. + */ +BOOST_AUTO_TEST_CASE( withdraw_permission_test_after_hardfork_23 ) +{ try { + INVOKE(withdraw_permission_create_after_hardfork_23); auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); @@ -108,7 +385,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) BOOST_CHECK(permit_object.expiration == first_start_time + permit_object.withdrawal_period_sec*5 ); } - generate_blocks(2); + generate_blocks(HARDFORK_23_TIME); // Still before the first period, but DURING the real time during which "early" claims are checked { withdraw_permission_claim_operation op; @@ -119,10 +396,13 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) set_expiration( db, trx ); trx.operations.push_back(op); + sign( trx, dan_private_key ); // Transaction should be signed to be valid //Throws because we haven't entered the first withdrawal period yet. GRAPHENE_REQUIRE_THROW(PUSH_TX( db, trx ), fc::exception); //Get to the actual withdrawal period - generate_blocks(permit(db).period_start_time); + bool miss_intermediate_blocks = false; // Required to have generate_blocks() elapse flush to the time of interest + generate_blocks(first_start_time, miss_intermediate_blocks); + set_expiration( db, trx ); REQUIRE_THROW_WITH_VALUE(op, withdraw_permission, withdraw_permission_id_type(5)); REQUIRE_THROW_WITH_VALUE(op, withdraw_from_account, dan_id); @@ -135,7 +415,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) trx.clear(); trx.operations.push_back(op); sign( trx, dan_private_key ); - PUSH_TX( db, trx ); + PUSH_TX( db, trx ); // <-- Claim #1 // would be legal on its own, but doesn't work because trx already withdrew REQUIRE_THROW_WITH_VALUE(op, amount_to_withdraw, asset(5)); @@ -147,10 +427,11 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) // make it different from previous trx so it's non-duplicate trx.expiration += fc::seconds(1); sign( trx, dan_private_key ); - PUSH_TX( db, trx ); + PUSH_TX( db, trx ); // <-- Claim #2 trx.clear(); } + // Account for two (2) claims of one (1) unit BOOST_CHECK_EQUAL(get_balance(nathan_id, asset_id_type()), 998); BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 2); @@ -161,7 +442,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) BOOST_CHECK(permit_object.period_start_time == first_start_time); BOOST_CHECK(permit_object.withdrawal_limit == asset(5)); BOOST_CHECK(permit_object.withdrawal_period_sec == fc::hours(1).to_seconds()); - BOOST_CHECK_EQUAL(permit_object.claimed_this_period.value, 2 ); + BOOST_CHECK_EQUAL(permit_object.claimed_this_period.value, 2 ); // <-- Account for two (2) claims of one (1) unit BOOST_CHECK(permit_object.expiration == first_start_time + 5*permit_object.withdrawal_period_sec); generate_blocks(first_start_time + permit_object.withdrawal_period_sec); // lazy update: verify period_start_time isn't updated until new trx occurs @@ -169,7 +450,10 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) } { + // Leave Nathan with one unit transfer(nathan_id, dan_id, asset(997)); + + // Attempt a withdrawal claim for units than available withdraw_permission_claim_operation op; op.withdraw_permission = permit; op.withdraw_from_account = nathan_id; @@ -180,6 +464,8 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) sign( trx, dan_private_key ); //Throws because nathan doesn't have the money GRAPHENE_CHECK_THROW(PUSH_TX( db, trx ), fc::exception); + + // Attempt a withdrawal claim for which nathan does have sufficient units op.amount_to_withdraw = asset(1); trx.clear(); trx.operations = {op}; @@ -222,7 +508,7 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_test ) BOOST_AUTO_TEST_CASE( withdraw_permission_nominal_case ) { try { - INVOKE(withdraw_permission_create); + INVOKE(withdraw_permission_create_before_hardfork_23); auto nathan_private_key = generate_private_key("nathan"); auto dan_private_key = generate_private_key("dan"); @@ -230,6 +516,12 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_nominal_case ) account_id_type dan_id = get_account("dan").id; withdraw_permission_id_type permit; + // Wait until the permission period's start time + const withdraw_permission_object& first_permit_object = permit(db); + generate_blocks( + first_permit_object.period_start_time); + + // Loop through the withdrawal periods and claim a withdrawal while(true) { const withdraw_permission_object& permit_object = permit(db); @@ -247,6 +539,8 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_nominal_case ) // if no further withdrawals are possible BOOST_CHECK(db.find_object(permit) != nullptr); BOOST_CHECK( permit_object.claimed_this_period == 5 ); + BOOST_CHECK_EQUAL( permit_object.available_this_period(db.head_block_time()).amount.value, 0 ); + BOOST_CHECK_EQUAL( current_period(permit_object, db.head_block_time()).available_this_period.amount.value, 0 ); trx.clear(); generate_blocks( permit_object.period_start_time @@ -259,9 +553,338 @@ BOOST_AUTO_TEST_CASE( withdraw_permission_nominal_case ) BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 25); } FC_LOG_AND_RETHROW() } +/** + * Test asset whitelisting feature for withdrawals. + * Reproduces https://github.com/bitshares/bitshares-core/issues/942 and tests the fix for it. + */ +BOOST_AUTO_TEST_CASE( withdraw_permission_whitelist_asset_test ) +{ try { + + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + + generate_blocks( HARDFORK_415_TIME, true, skip ); // get over Graphene 415 asset whitelisting bug + generate_block( skip ); + + for( int i=0; i<2; i++ ) + { + if( i == 1 ) + { + generate_blocks( HARDFORK_CORE_942_TIME, true, skip ); + generate_block( skip ); + } + + int blocks = 0; + set_expiration( db, trx ); + + ACTORS( (nathan)(dan)(izzy) ); + + const asset_id_type uia_id = create_user_issued_asset( "ADVANCED", izzy_id(db), white_list ).id; + + issue_uia( nathan_id, asset(1000, uia_id) ); + + // Make a whitelist authority + { + BOOST_TEST_MESSAGE( "Changing the whitelist authority" ); + asset_update_operation uop; + uop.issuer = izzy_id; + uop.asset_to_update = uia_id; + uop.new_options = uia_id(db).options; + uop.new_options.whitelist_authorities.insert(izzy_id); + trx.operations.push_back(uop); + PUSH_TX( db, trx, ~0 ); + trx.operations.clear(); + } + + // Add dan to whitelist + { + upgrade_to_lifetime_member( izzy_id ); + + account_whitelist_operation wop; + wop.authorizing_account = izzy_id; + wop.account_to_list = dan_id; + wop.new_listing = account_whitelist_operation::white_listed; + trx.operations.push_back( wop ); + PUSH_TX( db, trx, ~0 ); + trx.operations.clear(); + } + + // create withdraw permission + { + withdraw_permission_create_operation op; + op.authorized_account = dan_id; + op.withdraw_from_account = nathan_id; + op.withdrawal_limit = asset(5, uia_id); + op.withdrawal_period_sec = fc::hours(1).to_seconds(); + op.periods_until_expiration = 5; + op.period_start_time = db.head_block_time() + 1; + trx.operations.push_back(op); + PUSH_TX( db, trx, ~0 ); + trx.operations.clear(); + } + + withdraw_permission_id_type first_permit_id; // first object must have id 0 + + generate_block( skip ); // get to the time point that able to withdraw + ++blocks; + set_expiration( db, trx ); + + // try claim a withdrawal + { + withdraw_permission_claim_operation op; + op.withdraw_permission = first_permit_id; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(5, uia_id); + trx.operations.push_back(op); + if( i == 0 ) // before hard fork, should pass + PUSH_TX( db, trx, ~0 ); + else // after hard fork, should throw + GRAPHENE_CHECK_THROW( PUSH_TX( db, trx, ~0 ), fc::assert_exception ); + trx.operations.clear(); + } + + // TODO add test cases for other white-listing features + + // undo above tx's and reset + generate_block( skip ); + ++blocks; + while( blocks > 0 ) + { + db.pop_block(); + --blocks; + } + } + +} FC_LOG_AND_RETHROW() } + + +/** + * This case checks to see whether the amount claimed within any particular withdrawal period + * is properly reflected within the permission object. + * The maximum withdrawal per period will be limited to 5 units. + * There are a total of 5 withdrawal periods that are permitted. + * The test will evaluate the following withdrawal pattern: + * (1) during Period 1, a withdrawal of 4 units, + * (2) during Period 2, a withdrawal of 1 units, + * (3) during Period 3, a withdrawal of 0 units, + * (4) during Period 4, a withdrawal of 5 units, + * (5) during Period 5, a withdrawal of 3 units. + * + * Total withdrawal will be 13 units. + */ +BOOST_AUTO_TEST_CASE( withdraw_permission_incremental_case ) +{ try { + INVOKE(withdraw_permission_create_after_hardfork_23); + time_point_sec expected_first_period_start_time = HARDFORK_23_TIME + db.get_global_properties().parameters.block_interval*5; // Hard-coded to synchronize with withdraw_permission_create_after_hardfork_23() + uint64_t expected_period_duration_seconds = fc::hours(1).to_seconds(); // Hard-coded to synchronize with withdraw_permission_create_after_hardfork_23() + + auto nathan_private_key = generate_private_key("nathan"); + auto dan_private_key = generate_private_key("dan"); + account_id_type nathan_id = get_account("nathan").id; + account_id_type dan_id = get_account("dan").id; + withdraw_permission_id_type permit; + + // Wait until the permission period's start time + { + const withdraw_permission_object &before_first_permit_object = permit(db); + BOOST_CHECK_EQUAL(before_first_permit_object.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch()); + generate_blocks( + before_first_permit_object.period_start_time); + } + // Before withdrawing, check the period description + const withdraw_permission_object &first_permit_object = permit(db); + const withdrawal_period_descriptor first_period = current_period(first_permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(first_period.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch()); + BOOST_CHECK_EQUAL(first_period.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + expected_period_duration_seconds); + BOOST_CHECK_EQUAL(first_period.available_this_period.amount.value, 5); + + // Period 1: Withdraw 4 units + { + // Before claiming, check the period description + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(db.find_object(permit) != nullptr); + withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 0)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 1)); + + // Claim + withdraw_permission_claim_operation op; + op.withdraw_permission = permit; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(4); + trx.operations.push_back(op); + set_expiration( db, trx ); + sign( trx, dan_private_key ); + PUSH_TX( db, trx ); + + // After claiming, check the period description + BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK( permit_object.claimed_this_period == 4 ); + BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 4 ); + period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 1); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 0)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 1)); + + // Advance to next period + trx.clear(); + generate_blocks( + permit_object.period_start_time + + permit_object.withdrawal_period_sec ); + } + + // Period 2: Withdraw 1 units + { + // Before claiming, check the period description + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(db.find_object(permit) != nullptr); + withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 1)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 2)); + + // Claim + withdraw_permission_claim_operation op; + op.withdraw_permission = permit; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(1); + trx.operations.push_back(op); + set_expiration( db, trx ); + sign( trx, dan_private_key ); + PUSH_TX( db, trx ); + + // After claiming, check the period description + BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK( permit_object.claimed_this_period == 1 ); + BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 1 ); + period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 4); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 1)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 2)); + + // Advance to next period + trx.clear(); + generate_blocks( + permit_object.period_start_time + + permit_object.withdrawal_period_sec ); + } + + // Period 3: Withdraw 0 units + { + // Before claiming, check the period description + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(db.find_object(permit) != nullptr); + withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 2)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 3)); + + // No claim + + // After doing nothing, check the period description + period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 2)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 3)); + + // Advance to end of Period 3 + time_point_sec period_end_time = period_descriptor.period_end_time; + generate_blocks(period_end_time); + } + + // Period 4: Withdraw 5 units + { + // Before claiming, check the period description + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(db.find_object(permit) != nullptr); + withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 3)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 4)); + + // Claim + withdraw_permission_claim_operation op; + op.withdraw_permission = permit; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(5); + trx.operations.push_back(op); + set_expiration( db, trx ); + sign( trx, dan_private_key ); + PUSH_TX( db, trx ); + + // After claiming, check the period description + BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK( permit_object.claimed_this_period == 5 ); + BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 5 ); + period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 0); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 3)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 4)); + + // Advance to next period + trx.clear(); + generate_blocks( + permit_object.period_start_time + + permit_object.withdrawal_period_sec ); + } + + // Period 5: Withdraw 3 units + { + // Before claiming, check the period description + const withdraw_permission_object& permit_object = permit(db); + BOOST_CHECK(db.find_object(permit) != nullptr); + withdrawal_period_descriptor period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 5); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 4)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 5)); + + // Claim + withdraw_permission_claim_operation op; + op.withdraw_permission = permit; + op.withdraw_from_account = nathan_id; + op.withdraw_to_account = dan_id; + op.amount_to_withdraw = asset(3); + trx.operations.push_back(op); + set_expiration( db, trx ); + sign( trx, dan_private_key ); + PUSH_TX( db, trx ); + + // After claiming, check the period description + BOOST_CHECK(db.find_object(permit) != nullptr); + BOOST_CHECK( permit_object.claimed_this_period == 3 ); + BOOST_CHECK_EQUAL( permit_object.claimed_this_period.value, 3 ); + period_descriptor = current_period(permit_object, db.head_block_time()); + BOOST_CHECK_EQUAL(period_descriptor.available_this_period.amount.value, 2); + BOOST_CHECK_EQUAL(period_descriptor.period_start_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 4)); + BOOST_CHECK_EQUAL(period_descriptor.period_end_time.sec_since_epoch(), expected_first_period_start_time.sec_since_epoch() + (expected_period_duration_seconds * 5)); + + // Advance to next period + trx.clear(); + generate_blocks( + permit_object.period_start_time + + permit_object.withdrawal_period_sec ); + } + + // Withdrawal periods completed + BOOST_CHECK(db.find_object(permit) == nullptr); + + BOOST_CHECK_EQUAL(get_balance(nathan_id, asset_id_type()), 987); + BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 13); +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_CASE( withdraw_permission_update ) { try { - INVOKE(withdraw_permission_create); + INVOKE(withdraw_permission_create_before_hardfork_23); auto nathan_private_key = generate_private_key("nathan"); account_id_type nathan_id = get_account("nathan").id; @@ -345,7 +968,7 @@ BOOST_AUTO_TEST_CASE( mia_feeds ) } { const asset_bitasset_data_object& obj = bit_usd_id(db).bitasset_data(db); - BOOST_CHECK_EQUAL(obj.feeds.size(), 3); + BOOST_CHECK_EQUAL(obj.feeds.size(), 3u); BOOST_CHECK(obj.current_feed == price_feed()); } { @@ -404,7 +1027,7 @@ BOOST_AUTO_TEST_CASE( feed_limit_test ) op.issuer = bit_usd.issuer; trx.operations = {op}; sign( trx, nathan_private_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_TEST_MESSAGE("Checking current_feed is null"); BOOST_CHECK(bitasset.current_feed.settlement_price.is_null()); @@ -414,7 +1037,7 @@ BOOST_AUTO_TEST_CASE( feed_limit_test ) trx.clear(); trx.operations = {op}; sign( trx, nathan_private_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_TEST_MESSAGE("Checking current_feed is not null"); BOOST_CHECK(!bitasset.current_feed.settlement_price.is_null()); @@ -422,15 +1045,146 @@ BOOST_AUTO_TEST_CASE( feed_limit_test ) BOOST_AUTO_TEST_CASE( witness_create ) { try { + + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + generate_block(skip); + + auto wtplugin = app.register_plugin(); + wtplugin->plugin_set_app(&app); + boost::program_options::variables_map options; + + // init witness key cahce + std::set< witness_id_type > caching_witnesses; + std::vector< std::string > witness_ids; + for( uint64_t i = 1; ; ++i ) + { + witness_id_type wid(i); + caching_witnesses.insert( wid ); + string wid_str = "\"" + std::string(object_id_type(wid)) + "\""; + witness_ids.push_back( wid_str ); + if( !db.find(wid) ) + break; + } + options.insert( std::make_pair( "witness-id", boost::program_options::variable_value( witness_ids, false ) ) ); + wtplugin->plugin_initialize(options); + wtplugin->plugin_startup(); + + const auto& wit_key_cache = wtplugin->get_witness_key_cache(); + + // setup test account ACTOR(nathan); upgrade_to_lifetime_member(nathan_id); trx.clear(); - witness_id_type nathan_witness_id = create_witness(nathan_id, nathan_private_key).id; + + // create witness + witness_id_type nathan_witness_id = create_witness(nathan_id, nathan_private_key, skip).id; + + // nathan should be in the cache + BOOST_CHECK_EQUAL( caching_witnesses.count(nathan_witness_id), 1u ); + + // nathan's key in the cache should still be null before a new block is generated + auto nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && !nathan_itr->second.valid() ); + // Give nathan some voting stake transfer(committee_account, nathan_id, asset(10000000)); - generate_block(); + generate_block(skip); + + // nathan should be a witness now + BOOST_REQUIRE( db.find( nathan_witness_id ) ); + // nathan's key in the cache should have been stored now + nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && nathan_itr->second.valid() + && *nathan_itr->second == nathan_private_key.get_public_key() ); + + // undo the block + db.pop_block(); + + // nathan should not be a witness now + BOOST_REQUIRE( !db.find( nathan_witness_id ) ); + // nathan's key in the cache should still be valid, since witness plugin doesn't get notified on popped block + nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && nathan_itr->second.valid() + && *nathan_itr->second == nathan_private_key.get_public_key() ); + + // copy popped transactions + auto popped_tx = db._popped_tx; + + // generate another block + generate_block(skip); + + // nathan should not be a witness now + BOOST_REQUIRE( !db.find( nathan_witness_id ) ); + // nathan's key in the cache should be null now + BOOST_CHECK( nathan_itr != wit_key_cache.end() && !nathan_itr->second.valid() ); + + // push the popped tx + for( const auto& tx : popped_tx ) + { + PUSH_TX( db, tx, skip ); + } + // generate another block + generate_block(skip); set_expiration( db, trx ); + // nathan should be a witness now + BOOST_REQUIRE( db.find( nathan_witness_id ) ); + // nathan's key in the cache should have been stored now + nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && nathan_itr->second.valid() + && *nathan_itr->second == nathan_private_key.get_public_key() ); + + // generate a new key + fc::ecc::private_key new_signing_key = fc::ecc::private_key::regenerate(fc::digest("nathan_new")); + + // update nathan's block signing key + { + witness_update_operation wuop; + wuop.witness_account = nathan_id; + wuop.witness = nathan_witness_id; + wuop.new_signing_key = new_signing_key.get_public_key(); + signed_transaction wu_trx; + wu_trx.operations.push_back( wuop ); + set_expiration( db, wu_trx ); + PUSH_TX( db, wu_trx, skip ); + } + + // nathan's key in the cache should still be old key + nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && nathan_itr->second.valid() + && *nathan_itr->second == nathan_private_key.get_public_key() ); + + // generate another block + generate_block(skip); + + // nathan's key in the cache should have changed to new key + nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && nathan_itr->second.valid() + && *nathan_itr->second == new_signing_key.get_public_key() ); + + // undo the block + db.pop_block(); + + // nathan's key in the cache should still be new key, since witness plugin doesn't get notified on popped block + nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && nathan_itr->second.valid() + && *nathan_itr->second == new_signing_key.get_public_key() ); + + // generate another block + generate_block(skip); + + // nathan's key in the cache should be old key now + nathan_itr = wit_key_cache.find( nathan_witness_id ); + BOOST_CHECK( nathan_itr != wit_key_cache.end() && nathan_itr->second.valid() + && *nathan_itr->second == nathan_private_key.get_public_key() ); + + // voting { account_update_operation op; op.account = nathan_id; @@ -476,8 +1230,27 @@ BOOST_AUTO_TEST_CASE( witness_create ) * issuer and only if the global settle bit is set. */ BOOST_AUTO_TEST_CASE( global_settle_test ) -{ - try { +{ try { + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + + generate_block( skip ); + + for( int i=0; i<2; i++ ) + { + if( i == 1 ) + { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi, true, skip); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time, true, skip); + } + set_expiration( db, trx ); + ACTORS((nathan)(ben)(valentine)(dan)); asset_id_type bit_usd_id = create_bitasset("USDBIT", nathan_id, 100, global_settle | charge_market_fee).get_id(); @@ -548,11 +1321,23 @@ BOOST_AUTO_TEST_CASE( global_settle_test ) BOOST_CHECK_EQUAL(get_balance(valentine_id, bit_usd_id), 0); BOOST_CHECK_EQUAL(get_balance(valentine_id, asset_id_type()), 10045); BOOST_CHECK_EQUAL(get_balance(ben_id, bit_usd_id), 0); - BOOST_CHECK_EQUAL(get_balance(ben_id, asset_id_type()), 10091); + if( i == 1 ) // BSIP35: better rounding + { + BOOST_CHECK_EQUAL(get_balance(ben_id, asset_id_type()), 10090); + BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 9850); + } + else + { + BOOST_CHECK_EQUAL(get_balance(ben_id, asset_id_type()), 10091); + BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 9849); + } BOOST_CHECK_EQUAL(get_balance(dan_id, bit_usd_id), 0); - BOOST_CHECK_EQUAL(get_balance(dan_id, asset_id_type()), 9849); -} FC_LOG_AND_RETHROW() -} + + // undo above tx's and reset + generate_block( skip ); + db.pop_block(); + } +} FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE( worker_create_test ) { try { @@ -632,7 +1417,7 @@ BOOST_AUTO_TEST_CASE( worker_pay_test ) trx.operations.push_back(op); sign( trx, nathan_private_key ); PUSH_TX( db, trx ); - trx.signatures.clear(); + trx.clear_signatures(); REQUIRE_THROW_WITH_VALUE(op, amount, asset(1)); trx.clear(); } @@ -667,7 +1452,7 @@ BOOST_AUTO_TEST_CASE( worker_pay_test ) trx.operations.back() = op; sign( trx, nathan_private_key ); PUSH_TX( db, trx ); - trx.signatures.clear(); + trx.clear_signatures(); trx.clear(); } @@ -823,6 +1608,27 @@ BOOST_AUTO_TEST_CASE( burn_worker_test ) BOOST_AUTO_TEST_CASE( force_settle_test ) { + uint32_t skip = database::skip_witness_signature + | database::skip_transaction_signatures + | database::skip_transaction_dupe_check + | database::skip_block_size_check + | database::skip_tapos_check + | database::skip_merkle_check + ; + + generate_block( skip ); + + for( int i=0; i<2; i++ ) + { + if( i == 1 ) + { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_342_TIME - mi, true, skip); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time, true, skip); + } + set_expiration( db, trx ); + + int blocks = 0; try { ACTORS( (nathan)(shorter1)(shorter2)(shorter3)(shorter4)(shorter5) ); @@ -942,7 +1748,9 @@ BOOST_AUTO_TEST_CASE( force_settle_test ) BOOST_CHECK( settle_id(db).owner == nathan_id ); // Wait for settlement to take effect - generate_blocks(settle_id(db).settlement_date); + generate_blocks( settle_id(db).settlement_date, true, skip ); + blocks += 2; + BOOST_CHECK(db.find(settle_id) == nullptr); BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 50 ); BOOST_CHECK_EQUAL( get_balance(nathan_id, bitusd_id), 14950); @@ -972,7 +1780,8 @@ BOOST_AUTO_TEST_CASE( force_settle_test ) // c2 2000 : 3998 1.9990 550 settled // c1 1000 : 2000 2.0000 - generate_blocks( settle_id(db).settlement_date ); + generate_blocks( settle_id(db).settlement_date, true, skip ); + blocks += 2; int64_t call1_payout = 0; int64_t call2_payout = 550*99/100; @@ -980,6 +1789,13 @@ BOOST_AUTO_TEST_CASE( force_settle_test ) int64_t call4_payout = 4000*99/100; int64_t call5_payout = 5000*99/100; + if( i == 1 ) // BSIP35: better rounding + { + call3_payout = 49 + (2950*99+100-1)/100; // round up + call4_payout = (4000*99+100-1)/100; // round up + call5_payout = (5000*99+100-1)/100; // round up + } + BOOST_CHECK_EQUAL( get_balance(shorter1_id, core_id), initial_balance-2*1000 ); // full collat still tied up BOOST_CHECK_EQUAL( get_balance(shorter2_id, core_id), initial_balance-2*1999 ); // full collat still tied up BOOST_CHECK_EQUAL( get_balance(shorter3_id, core_id), initial_balance-call3_payout ); // initial balance minus transfer to Nathan (as BitUSD) @@ -1007,6 +1823,16 @@ BOOST_AUTO_TEST_CASE( force_settle_test ) edump((e.to_detail_string())); throw; } + + // undo above tx's and reset + generate_block( skip ); + ++blocks; + while( blocks > 0 ) + { + db.pop_block(); + --blocks; + } + } } BOOST_AUTO_TEST_CASE( assert_op_test ) @@ -1068,7 +1894,7 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) auto _sign = [&]( signed_transaction& tx, const private_key_type& key ) { tx.sign( key, db.get_chain_id() ); }; - db.open(td.path(), [this]{return genesis_state;}); + db.open(td.path(), [this]{return genesis_state;}, "TEST"); const balance_object& balance = balance_id_type()(db); BOOST_CHECK_EQUAL(balance.balance.amount.value, 1); BOOST_CHECK_EQUAL(balance_id_type(1)(db).balance.amount.value, 1); @@ -1081,13 +1907,13 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) trx.operations = {op}; _sign( trx, n_key ); // Fail because I'm claiming from an address which hasn't signed - GRAPHENE_CHECK_THROW(db.push_transaction(trx), tx_missing_other_auth); + GRAPHENE_CHECK_THROW(PUSH_TX(db, trx), tx_missing_other_auth); trx.clear(); op.balance_to_claim = balance_id_type(); op.balance_owner_key = n_key.get_public_key(); trx.operations = {op}; _sign( trx, n_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); // Not using fixture's get_balance() here because it uses fixture's db, not my override BOOST_CHECK_EQUAL(db.get_balance(op.deposit_to_account, asset_id_type()).amount.value, 1); @@ -1115,36 +1941,36 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) _sign( trx, n_key ); _sign( trx, v1_key ); // Attempting to claim 1 from a balance with 0 available - GRAPHENE_CHECK_THROW(db.push_transaction(trx), balance_claim_invalid_claim_amount); + GRAPHENE_CHECK_THROW(PUSH_TX(db, trx), balance_claim_invalid_claim_amount); op.balance_to_claim = vesting_balance_2.id; op.total_claimed.amount = 151; op.balance_owner_key = v2_key.get_public_key(); trx.operations = {op}; - trx.signatures.clear(); + trx.clear_signatures(); _sign( trx, n_key ); _sign( trx, v2_key ); // Attempting to claim 151 from a balance with 150 available - GRAPHENE_CHECK_THROW(db.push_transaction(trx), balance_claim_invalid_claim_amount); + GRAPHENE_CHECK_THROW(PUSH_TX(db, trx), balance_claim_invalid_claim_amount); op.balance_to_claim = vesting_balance_2.id; op.total_claimed.amount = 100; op.balance_owner_key = v2_key.get_public_key(); trx.operations = {op}; - trx.signatures.clear(); + trx.clear_signatures(); _sign( trx, n_key ); _sign( trx, v2_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_CHECK_EQUAL(db.get_balance(op.deposit_to_account, asset_id_type()).amount.value, 101); BOOST_CHECK_EQUAL(vesting_balance_2.balance.amount.value, 300); op.total_claimed.amount = 10; trx.operations = {op}; - trx.signatures.clear(); + trx.clear_signatures(); _sign( trx, n_key ); _sign( trx, v2_key ); // Attempting to claim twice within a day - GRAPHENE_CHECK_THROW(db.push_transaction(trx), balance_claim_claimed_too_often); + GRAPHENE_CHECK_THROW(PUSH_TX(db, trx), balance_claim_claimed_too_often); db.generate_block(db.get_slot_time(1), db.get_scheduled_witness(1), init_account_priv_key, skip_flags); slot = db.get_slot_at_time(vesting_balance_1.vesting_policy->begin_timestamp + 60); @@ -1155,10 +1981,10 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) op.total_claimed.amount = 500; op.balance_owner_key = v1_key.get_public_key(); trx.operations = {op}; - trx.signatures.clear(); + trx.clear_signatures(); _sign( trx, n_key ); _sign( trx, v1_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_CHECK(db.find_object(op.balance_to_claim) == nullptr); BOOST_CHECK_EQUAL(db.get_balance(op.deposit_to_account, asset_id_type()).amount.value, 601); @@ -1166,11 +1992,11 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) op.balance_owner_key = v2_key.get_public_key(); op.total_claimed.amount = 10; trx.operations = {op}; - trx.signatures.clear(); + trx.clear_signatures(); _sign( trx, n_key ); _sign( trx, v2_key ); // Attempting to claim twice within a day - GRAPHENE_CHECK_THROW(db.push_transaction(trx), balance_claim_claimed_too_often); + GRAPHENE_CHECK_THROW(PUSH_TX(db, trx), balance_claim_claimed_too_often); db.generate_block(db.get_slot_time(1), db.get_scheduled_witness(1), init_account_priv_key, skip_flags); slot = db.get_slot_at_time(db.head_block_time() + fc::days(1)); @@ -1179,10 +2005,10 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) op.total_claimed = vesting_balance_2.balance; trx.operations = {op}; - trx.signatures.clear(); + trx.clear_signatures(); _sign( trx, n_key ); _sign( trx, v2_key ); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_CHECK(db.find_object(op.balance_to_claim) == nullptr); BOOST_CHECK_EQUAL(db.get_balance(op.deposit_to_account, asset_id_type()).amount.value, 901); } FC_LOG_AND_RETHROW() } @@ -1202,7 +2028,7 @@ BOOST_AUTO_TEST_CASE(transfer_with_memo) { op.memo->set_message(alice_private_key, bob_public_key, "Dear Bob,\n\nMoney!\n\nLove, Alice"); trx.operations = {op}; trx.sign(alice_private_key, db.get_chain_id()); - db.push_transaction(trx); + PUSH_TX(db, trx); BOOST_CHECK_EQUAL(get_balance(alice_id, asset_id_type()), 500); BOOST_CHECK_EQUAL(get_balance(bob_id, asset_id_type()), 500); @@ -1228,7 +2054,7 @@ BOOST_AUTO_TEST_CASE(zero_second_vbo) transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); - db.push_transaction( tx, database::skip_authority_check | database::skip_tapos_check | database::skip_transaction_signatures ); + PUSH_TX( db, tx, database::skip_tapos_check | database::skip_transaction_signatures ); } enable_fees(); upgrade_to_lifetime_member(alice_id); @@ -1331,6 +2157,79 @@ BOOST_AUTO_TEST_CASE(zero_second_vbo) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( vbo_withdraw_different ) +{ + try + { + ACTORS((alice)(izzy)); + // don't pay witnesses so we have some worker budget to work with + + // transfer(account_id_type(), alice_id, asset(1000)); + + asset_id_type stuff_id = create_user_issued_asset( "STUFF", izzy_id(db), 0 ).id; + issue_uia( alice_id, asset( 1000, stuff_id ) ); + + // deposit STUFF with linear vesting policy + vesting_balance_id_type vbid; + { + linear_vesting_policy_initializer pinit; + pinit.begin_timestamp = db.head_block_time(); + pinit.vesting_cliff_seconds = 30; + pinit.vesting_duration_seconds = 30; + + vesting_balance_create_operation create_op; + create_op.creator = alice_id; + create_op.owner = alice_id; + create_op.amount = asset(100, stuff_id); + create_op.policy = pinit; + + signed_transaction create_tx; + create_tx.operations.push_back( create_op ); + set_expiration( db, create_tx ); + sign(create_tx, alice_private_key); + + processed_transaction ptx = PUSH_TX( db, create_tx ); + vbid = ptx.operation_results[0].get(); + } + + // wait for VB to mature + generate_blocks( 30 ); + + BOOST_CHECK( vbid(db).get_allowed_withdraw( db.head_block_time() ) == asset(100, stuff_id) ); + + // bad withdrawal op (wrong asset) + { + vesting_balance_withdraw_operation op; + + op.vesting_balance = vbid; + op.amount = asset(100); + op.owner = alice_id; + + signed_transaction withdraw_tx; + withdraw_tx.operations.push_back(op); + set_expiration( db, withdraw_tx ); + sign( withdraw_tx, alice_private_key ); + GRAPHENE_CHECK_THROW( PUSH_TX( db, withdraw_tx ), fc::exception ); + } + + // good withdrawal op + { + vesting_balance_withdraw_operation op; + + op.vesting_balance = vbid; + op.amount = asset(100, stuff_id); + op.owner = alice_id; + + signed_transaction withdraw_tx; + withdraw_tx.operations.push_back(op); + set_expiration( db, withdraw_tx ); + sign( withdraw_tx, alice_private_key ); + PUSH_TX( db, withdraw_tx ); + } + } + FC_LOG_AND_RETHROW() +} + // TODO: Write linear VBO tests BOOST_AUTO_TEST_CASE( top_n_special ) @@ -1530,26 +2429,26 @@ BOOST_AUTO_TEST_CASE( buyback ) sign( tx, philbin_private_key ); // Alice and Philbin signed, but asset issuer is invalid - GRAPHENE_CHECK_THROW( db.push_transaction(tx), account_create_buyback_incorrect_issuer ); + GRAPHENE_CHECK_THROW( PUSH_TX(db, tx), account_create_buyback_incorrect_issuer ); - tx.signatures.clear(); + tx.clear_signatures(); tx.operations.back().get< account_create_operation >().extensions.value.buyback_options->asset_to_buy_issuer = izzy_id; sign( tx, philbin_private_key ); // Izzy didn't sign - GRAPHENE_CHECK_THROW( db.push_transaction(tx), tx_missing_active_auth ); + GRAPHENE_CHECK_THROW( PUSH_TX(db, tx), tx_missing_active_auth ); sign( tx, izzy_private_key ); // OK - processed_transaction ptx = db.push_transaction( tx ); + processed_transaction ptx = PUSH_TX( db, tx ); rex_id = ptx.operation_results.back().get< object_id_type >(); // Try to create another account rex2 which is bbo on same asset - tx.signatures.clear(); + tx.clear_signatures(); tx.operations.back().get< account_create_operation >().name = "rex2"; sign( tx, izzy_private_key ); sign( tx, philbin_private_key ); - GRAPHENE_CHECK_THROW( db.push_transaction(tx), account_create_buyback_already_exists ); + GRAPHENE_CHECK_THROW( PUSH_TX(db, tx), account_create_buyback_already_exists ); } // issue some BUYME to Alice diff --git a/tests/tests/serialization_tests.cpp b/tests/tests/serialization_tests.cpp index fb87c4c44c..59e16f01e3 100644 --- a/tests/tests/serialization_tests.cpp +++ b/tests/tests/serialization_tests.cpp @@ -64,8 +64,8 @@ BOOST_AUTO_TEST_CASE( serialization_json_test ) op.to = account_id_type(2); op.amount = asset(100); trx.operations.push_back( op ); - fc::variant packed(trx); - signed_transaction unpacked = packed.as(); + fc::variant packed(trx, GRAPHENE_MAX_NESTED_OBJECTS); + signed_transaction unpacked = packed.as( GRAPHENE_MAX_NESTED_OBJECTS ); unpacked.validate(); BOOST_CHECK( digest(trx) == digest(unpacked) ); } catch (fc::exception& e) { diff --git a/tests/tests/settle_tests.cpp b/tests/tests/settle_tests.cpp new file mode 100644 index 0000000000..07244cd225 --- /dev/null +++ b/tests/tests/settle_tests.cpp @@ -0,0 +1,1503 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE( settle_tests, database_fixture ) + +BOOST_AUTO_TEST_CASE( settle_rounding_test ) +{ + try { + // get around Graphene issue #615 feed expiration bug + generate_blocks(HARDFORK_615_TIME); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + set_expiration( db, trx ); + + ACTORS((paul)(michael)(rachel)(alice)(bob)(ted)(joe)(jim)); + + // create assets + const auto& bitusd = create_bitasset("USDBIT", paul_id); + const auto& bitcny = create_bitasset("CNYBIT", paul_id); + const auto& core = asset_id_type()(db); + asset_id_type bitusd_id = bitusd.id; + asset_id_type bitcny_id = bitcny.id; + asset_id_type core_id = core.id; + + // fund accounts + transfer(committee_account, michael_id, asset( 100000000 ) ); + transfer(committee_account, paul_id, asset(10000000)); + transfer(committee_account, alice_id, asset(10000000)); + transfer(committee_account, bob_id, asset(10000000)); + transfer(committee_account, jim_id, asset(10000000)); + + // add a feed to asset + update_feed_producers( bitusd, {paul.id} ); + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(5); + publish_feed( bitusd, paul, current_feed ); + + // paul gets some bitusd + const call_order_object& call_paul = *borrow( paul, bitusd.amount(1000), core.amount(100) ); + call_order_id_type call_paul_id = call_paul.id; + BOOST_REQUIRE_EQUAL( get_balance( paul, bitusd ), 1000 ); + + // and transfer some to rachel + transfer(paul.id, rachel.id, asset(200, bitusd.id)); + + BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); + BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), 200); + BOOST_CHECK_EQUAL(get_balance(michael, bitusd), 0); + BOOST_CHECK_EQUAL(get_balance(michael, core), 100000000); + + // michael gets some bitusd + const call_order_object& call_michael = *borrow(michael, bitusd.amount(6), core.amount(8)); + call_order_id_type call_michael_id = call_michael.id; + + // add settle order and check rounding issue + operation_result result = force_settle(rachel, bitusd.amount(4)); + + force_settlement_id_type settle_id = result.get(); + BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 4 ); + + BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); + BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), 196); + BOOST_CHECK_EQUAL(get_balance(michael, bitusd), 6); + BOOST_CHECK_EQUAL(get_balance(michael, core), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul, core), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul, bitusd), 800); + + BOOST_CHECK_EQUAL( 1000, call_paul.debt.value ); + BOOST_CHECK_EQUAL( 100, call_paul.collateral.value ); + BOOST_CHECK_EQUAL( 6, call_michael.debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael.collateral.value ); + + generate_blocks( db.head_block_time() + fc::hours(20) ); + set_expiration( db, trx ); + + // default feed and settlement expires at the same time + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 100 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + // now yes expire settlement + generate_blocks( db.head_block_time() + fc::hours(6) ); + + // checks + BOOST_CHECK( !db.find( settle_id ) ); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); // rachel paid 4 usd and got nothing + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 196); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 800); + + BOOST_CHECK_EQUAL( 996, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 100, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 6, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 1002 ); // 1000 + 6 - 4 + + // settle more and check rounding issue + // by default 20% of total supply can be settled per maintenance interval, here we test less than it + set_expiration( db, trx ); + operation_result result2 = force_settle(rachel_id(db), bitusd_id(db).amount(34)); + + force_settlement_id_type settle_id2 = result2.get(); + BOOST_CHECK_EQUAL( settle_id2(db).balance.amount.value, 34 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 162); // 196-34 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 800); + + BOOST_CHECK_EQUAL( 996, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 100, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 6, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).collateral.value ); + + generate_blocks( db.head_block_time() + fc::hours(10) ); + set_expiration( db, trx ); + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 100 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + // now yes expire settlement + generate_blocks( db.head_block_time() + fc::hours(16) ); + set_expiration( db, trx ); + + // checks + BOOST_CHECK( !db.find( settle_id2 ) ); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); // rachel got 1 core and paid 34 usd + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 162); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 800); + + BOOST_CHECK_EQUAL( 962, call_paul_id(db).debt.value ); // 996 - 34 + BOOST_CHECK_EQUAL( 99, call_paul_id(db).collateral.value ); // 100 - 1 + BOOST_CHECK_EQUAL( 6, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 968 ); // 1002 - 34 + + // prepare for more tests + transfer(paul_id, rachel_id, asset(300, bitusd_id)); + borrow(michael_id(db), bitusd_id(db).amount(2), core_id(db).amount(3)); + + // settle even more and check rounding issue + // by default 20% of total supply can be settled per maintenance interval, here we test more than it + const operation_result result3 = force_settle(rachel_id(db), bitusd_id(db).amount(3)); + const operation_result result4 = force_settle(rachel_id(db), bitusd_id(db).amount(434)); + const operation_result result5 = force_settle(rachel_id(db), bitusd_id(db).amount(5)); + + force_settlement_id_type settle_id3 = result3.get(); + BOOST_CHECK_EQUAL( settle_id3(db).balance.amount.value, 3 ); + + force_settlement_id_type settle_id4 = result4.get(); + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 434 ); + + force_settlement_id_type settle_id5 = result5.get(); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); // 162 + 300 - 3 - 434 - 5 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 8); // 6 + 2 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999989); // 99999992 - 3 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); // 800 - 300 + + BOOST_CHECK_EQUAL( 962, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 99, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).debt.value ); // 6 + 2 + BOOST_CHECK_EQUAL( 11, call_michael_id(db).collateral.value ); // 8 + 3 + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 970 ); // 968 + 2 + + generate_blocks( db.head_block_time() + fc::hours(4) ); + set_expiration( db, trx ); + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // now yes expire settlement + generate_blocks( db.head_block_time() + fc::hours(22) ); + set_expiration( db, trx ); + + // checks + // maximum amount that can be settled now is round_down(970 * 20%) = 194. + // settle_id3 (amount was 3) will be filled and get nothing. + // settle_id4 will pay 194 - 3 = 191 usd, will get round_down(191*5/101) = 9 core + BOOST_CHECK( !db.find( settle_id3 ) ); + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 243 ); // 434 - 191 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); // no change, since it's after settle_id4 + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); // 1 + 9 + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); // no change + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 8); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999989); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 768, call_paul_id(db).debt.value ); // 962 - 3 - 191 + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); // 99 - 9 + BOOST_CHECK_EQUAL( 8, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 11, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 776 ); // 970 - 3 - 191 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 194 ); // 3 + 191 + + generate_block(); + + // michael borrows more + set_expiration( db, trx ); + borrow(michael_id(db), bitusd_id(db).amount(18), core_id(db).amount(200)); + + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 243 ); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 26); // 8 + 18 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999789); // 99999989 - 200 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 768, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 26, call_michael_id(db).debt.value ); // 8 + 18 + BOOST_CHECK_EQUAL( 211, call_michael_id(db).collateral.value ); // 11 + 200 + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 794 ); // 776 + 18 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 194 ); + + generate_block(); + + // maximum amount that can be settled now is round_down((794+194) * 20%) = 197, + // already settled 194, so 197 - 194 = 3 more usd can be settled, + // so settle_id3 will pay 3 usd and get nothing + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 240 ); // 243 - 3 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 26); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999789); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 765, call_paul_id(db).debt.value ); // 768 - 3 + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 26, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 211, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 791 ); // 794 - 3 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 197 ); // 194 + 3 + + // michael borrows a little more + set_expiration( db, trx ); + borrow(michael_id(db), bitusd_id(db).amount(20), core_id(db).amount(20)); + + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 240 ); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); // 26 + 20 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); // 99999789 - 20 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 765, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); // 26 + 20 + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); // 211 + 20 + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 811 ); // 791 + 20 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 197 ); + + generate_block(); + + // maximum amount that can be settled now is round_down((811+197) * 20%) = 201, + // already settled 197, so 201 - 197 = 4 more usd can be settled, + // so settle_id4 will pay 4 usd and get nothing + + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 236 ); // 240 - 4 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); // no change, since it's after settle_id4 + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 761, call_paul_id(db).debt.value ); // 765 - 4 + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 807 ); // 811 - 4 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 201 ); // 197 + 4 + + generate_block(); + + // jim borrow some cny + call_order_id_type call_jim_id = borrow(jim_id(db), bitcny_id(db).amount(2000), core_id(db).amount(2000))->id; + + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).debt.value ); + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).collateral.value ); + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 2000); + + // jim transfer some cny to joe + transfer(jim_id, joe_id, asset(1500, bitcny_id)); + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 1500); + + generate_block(); + + // give ted some usd + transfer(paul_id, ted_id, asset(100, bitusd_id)); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 100); // new: 100 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); // 500 - 100 + + // ted settle + const operation_result result6 = force_settle(ted_id(db), bitusd_id(db).amount(20)); + const operation_result result7 = force_settle(ted_id(db), bitusd_id(db).amount(21)); + const operation_result result8 = force_settle(ted_id(db), bitusd_id(db).amount(22)); + + force_settlement_id_type settle_id6 = result6.get(); + BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); + + force_settlement_id_type settle_id7 = result7.get(); + BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); + + force_settlement_id_type settle_id8 = result8.get(); + BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); + + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 37); // 100 - 20 - 21 - 22 + + // joe settle + const operation_result result101 = force_settle(joe_id(db), bitcny_id(db).amount(100)); + const operation_result result102 = force_settle(joe_id(db), bitcny_id(db).amount(1000)); + const operation_result result103 = force_settle(joe_id(db), bitcny_id(db).amount(300)); + + force_settlement_id_type settle_id101 = result101.get(); + BOOST_CHECK_EQUAL( settle_id101(db).balance.amount.value, 100 ); + + force_settlement_id_type settle_id102 = result102.get(); + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 1000 ); + + force_settlement_id_type settle_id103 = result103.get(); + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); + + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 100); // 1500 - 100 - 1000 - 300 + + generate_block(); + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // get to another maintenance interval + generate_blocks( db.head_block_time() + fc::hours(22) ); + set_expiration( db, trx ); + + // maximum amount that can be settled now is round_down(807 * 20%) = 161, + // settle_id4 will pay 161 usd, will get round_down(161*5/101) = 7 core + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 75 ); // 236 - 161 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); // no change, since it's after settle_id4 + BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); // no change since not expired + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 17); // 10 + 7 + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); // no change + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 37); + + BOOST_CHECK_EQUAL( 600, call_paul_id(db).debt.value ); // 761 - 161 + BOOST_CHECK_EQUAL( 83, call_paul_id(db).collateral.value ); // 90 - 7 + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 646 ); // 807 - 161 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 161 ); // reset to 0, then 161 more + + // current cny data + BOOST_CHECK_EQUAL( settle_id101(db).balance.amount.value, 100 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 1000 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); // no change since not expired + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 100); // 1500 - 100 - 1000 - 300 + + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).debt.value ); + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitcny_id(db).dynamic_data(db).current_supply.value, 2000 ); + BOOST_CHECK_EQUAL( bitcny_id(db).bitasset_data(db).force_settled_volume.value, 0 ); + + // bob borrow some + const call_order_object& call_bob = *borrow( bob_id(db), bitusd_id(db).amount(19), core_id(db).amount(2) ); + call_order_id_type call_bob_id = call_bob.id; + + BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 9999998); // 10000000 - 2 + BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); // new + + BOOST_CHECK_EQUAL( 19, call_bob_id(db).debt.value ); + BOOST_CHECK_EQUAL( 2, call_bob_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 665 ); // 646 + 19 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 161 ); + + generate_block(); + + // maximum amount that can be settled now is round_down((665+161) * 20%) = 165, + // settle_id4 will pay 165-161=4 usd, will get nothing + // bob's call order will get partially settled since its collateral ratio is the lowest + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 71 ); // 75 - 4 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); // no change, since it's after settle_id4 + BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); // no change since not expired + + BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 9999998); + BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 17); // no change + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); // no change + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 37); + + BOOST_CHECK_EQUAL( 15, call_bob_id(db).debt.value ); // 19 - 4 + BOOST_CHECK_EQUAL( 2, call_bob_id(db).collateral.value ); // no change + BOOST_CHECK_EQUAL( 600, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 83, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 661 ); // 665 - 4 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 165 ); // 161 + 4 + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // generate some blocks + generate_blocks( db.head_block_time() + fc::hours(10) ); + set_expiration( db, trx ); + + // check cny + // maximum amount that can be settled now is round_down(2000 * 20%) = 400, + // settle_id101's remaining amount is 100, so it can be fully processed, + // according to price 50 core / 101 cny, it will get 49 core and pay 100 cny; + // settle_id102's remaining amount is 1000, so 400-100=300 cny will be processed, + // according to price 50 core / 101 cny, it will get 148 core and pay 300 cny; + // settle_id103 won't be processed since it's after settle_id102 + BOOST_CHECK( !db.find( settle_id101 ) ); + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 700 ); // 1000 - 300 + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); // no change since it's after settle_id102 + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 197); // 49 + 148 + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 100); + + BOOST_CHECK_EQUAL( 1600, call_jim_id(db).debt.value ); // 2000 - 100 - 300 + BOOST_CHECK_EQUAL( 1803, call_jim_id(db).collateral.value ); // 2000 - 49 - 148 + + BOOST_CHECK_EQUAL( bitcny_id(db).dynamic_data(db).current_supply.value, 1600 ); + BOOST_CHECK_EQUAL( bitcny_id(db).bitasset_data(db).force_settled_volume.value, 400 ); // 100 + 300 + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // get to another maintenance interval + generate_blocks( db.head_block_time() + fc::hours(14) ); + set_expiration( db, trx ); + + // maximum amount that can be settled now is round_down(661 * 20%) = 132, + // settle_id4's remaining amount is 71, + // firstly it will pay 15 usd to call_bob and get nothing, + // call_bob will pay off all debt, so it will be closed and remaining collateral (2 core) will be returned; + // then it will pay 71-15=56 usd to call_paul and get round_down(56*5/101) = 2 core; + // settle_id5 (has 5 usd) will pay 5 usd and get nothing; + // settle_id6 (has 20 usd) will pay 20 usd and get nothing; + // settle_id7 (has 21 usd) will pay 21 usd and get 1 core; + // settle_id8 (has 22 usd) will pay 15 usd and get nothing, since reached 132 + BOOST_CHECK( !db.find( settle_id4 ) ); + BOOST_CHECK( !db.find( settle_id5 ) ); + BOOST_CHECK( !db.find( settle_id6 ) ); + BOOST_CHECK( !db.find( settle_id7 ) ); + BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 7 ); // 22 - 15 + + BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 10000000); // 9999998 + 2 + BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 19); // 17 + 2 + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 20); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 1); // 0 + 1 + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 37); + + BOOST_CHECK( !db.find( call_bob_id ) ); + BOOST_CHECK_EQUAL( 483, call_paul_id(db).debt.value ); // 600 - 56 - 5 - 20 - 21 - 15 + BOOST_CHECK_EQUAL( 80, call_paul_id(db).collateral.value ); // 83 - 2 - 1 + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 529 ); // 661 - 132 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 132 ); // reset to 0, then 132 more + + // check cny + // maximum amount that can be settled now is round_down(1600 * 20%) = 320, + // settle_id102's remaining amount is 700, so 320 cny will be processed, + // according to price 50 core / 101 cny, it will get 158 core and pay 320 cny; + // settle_id103 won't be processed since it's after settle_id102 + BOOST_CHECK( !db.find( settle_id101 ) ); + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 380 ); // 700 - 320 + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); // no change since it's after settle_id102 + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 355); // 197 + 158 + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 100); + + BOOST_CHECK_EQUAL( 1280, call_jim_id(db).debt.value ); // 1600 - 320 + BOOST_CHECK_EQUAL( 1645, call_jim_id(db).collateral.value ); // 1803 - 158 + + BOOST_CHECK_EQUAL( bitcny_id(db).dynamic_data(db).current_supply.value, 1280 ); + BOOST_CHECK_EQUAL( bitcny_id(db).bitasset_data(db).force_settled_volume.value, 320 ); // reset to 0, then 320 + + generate_block(); + + // Note: the scenario that a big settle order matching several smaller call orders, + // and another scenario about force_settlement_offset_percent parameter, + // are tested in force_settle_test in operation_test2.cpp. + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( settle_rounding_test_after_hf_184 ) +{ + try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_184_TIME - mi); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + set_expiration( db, trx ); + + ACTORS((paul)(michael)(rachel)(alice)(bob)(ted)(joe)(jim)); + + // create assets + const auto& bitusd = create_bitasset("USDBIT", paul_id); + const auto& bitcny = create_bitasset("CNYBIT", paul_id); + const auto& core = asset_id_type()(db); + asset_id_type bitusd_id = bitusd.id; + asset_id_type bitcny_id = bitcny.id; + asset_id_type core_id = core.id; + + // fund accounts + transfer(committee_account, michael_id, asset( 100000000 ) ); + transfer(committee_account, paul_id, asset(10000000)); + transfer(committee_account, alice_id, asset(10000000)); + transfer(committee_account, bob_id, asset(10000000)); + transfer(committee_account, jim_id, asset(10000000)); + + // add a feed to asset + update_feed_producers( bitusd, {paul.id} ); + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd.amount( 100 ) / core.amount(5); + publish_feed( bitusd, paul, current_feed ); + + // paul gets some bitusd + const call_order_object& call_paul = *borrow( paul, bitusd.amount(1000), core.amount(100) ); + call_order_id_type call_paul_id = call_paul.id; + BOOST_REQUIRE_EQUAL( get_balance( paul, bitusd ), 1000 ); + + // and transfer some to rachel + transfer(paul.id, rachel.id, asset(200, bitusd.id)); + + BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); + BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), 200); + BOOST_CHECK_EQUAL(get_balance(michael, bitusd), 0); + BOOST_CHECK_EQUAL(get_balance(michael, core), 100000000); + + // michael gets some bitusd + const call_order_object& call_michael = *borrow(michael, bitusd.amount(6), core.amount(8)); + call_order_id_type call_michael_id = call_michael.id; + + // add settle order and check rounding issue + const operation_result result = force_settle(rachel, bitusd.amount(4)); + + force_settlement_id_type settle_id = result.get(); + BOOST_CHECK_EQUAL( settle_id(db).balance.amount.value, 4 ); + + BOOST_CHECK_EQUAL(get_balance(rachel, core), 0); + BOOST_CHECK_EQUAL(get_balance(rachel, bitusd), 196); + BOOST_CHECK_EQUAL(get_balance(michael, bitusd), 6); + BOOST_CHECK_EQUAL(get_balance(michael, core), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul, core), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul, bitusd), 800); + + BOOST_CHECK_EQUAL( 1000, call_paul.debt.value ); + BOOST_CHECK_EQUAL( 100, call_paul.collateral.value ); + BOOST_CHECK_EQUAL( 6, call_michael.debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael.collateral.value ); + + generate_blocks( db.head_block_time() + fc::hours(20) ); + set_expiration( db, trx ); + + // default feed and settlement expires at the same time + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + // now yes expire settlement + generate_blocks( db.head_block_time() + fc::hours(6) ); + + // checks + BOOST_CHECK( !db.find( settle_id ) ); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 200); // rachel's settle order is cancelled and he get refunded + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 800); + + BOOST_CHECK_EQUAL( 1000, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 100, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 6, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 1006 ); // 1000 + 6 + + // settle more and check rounding issue + // by default 20% of total supply can be settled per maintenance interval, here we test less than it + set_expiration( db, trx ); + const operation_result result2 = force_settle(rachel_id(db), bitusd_id(db).amount(34)); + + force_settlement_id_type settle_id2 = result2.get(); + BOOST_CHECK_EQUAL( settle_id2(db).balance.amount.value, 34 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 166); // 200-34 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 800); + + BOOST_CHECK_EQUAL( 1000, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 100, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 6, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).collateral.value ); + + generate_blocks( db.head_block_time() + fc::hours(10) ); + set_expiration( db, trx ); + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + // now yes expire settlement + generate_blocks( db.head_block_time() + fc::hours(16) ); + set_expiration( db, trx ); + + // checks + BOOST_CHECK( !db.find( settle_id2 ) ); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); // rachel got 1 core + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 179); // paid 21 usd since 1 core worths a little more than 20 usd + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999992); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 800); + + BOOST_CHECK_EQUAL( 979, call_paul_id(db).debt.value ); // 1000 - 21 + BOOST_CHECK_EQUAL( 99, call_paul_id(db).collateral.value ); // 100 - 1 + BOOST_CHECK_EQUAL( 6, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 985 ); // 1006 - 21 + + // prepare for more tests + transfer(paul_id, rachel_id, asset(300, bitusd_id)); + borrow(michael_id(db), bitusd_id(db).amount(2), core_id(db).amount(3)); + + // settle even more and check rounding issue + // by default 20% of total supply can be settled per maintenance interval, here we test more than it + const operation_result result3 = force_settle(rachel_id(db), bitusd_id(db).amount(3)); + const operation_result result4 = force_settle(rachel_id(db), bitusd_id(db).amount(434)); + const operation_result result5 = force_settle(rachel_id(db), bitusd_id(db).amount(5)); + + force_settlement_id_type settle_id3 = result3.get(); + BOOST_CHECK_EQUAL( settle_id3(db).balance.amount.value, 3 ); + + force_settlement_id_type settle_id4 = result4.get(); + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 434 ); + + force_settlement_id_type settle_id5 = result5.get(); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 37); // 179 + 300 - 3 - 434 - 5 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 8); // 6 + 2 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999989); // 99999992 - 3 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); // 800 - 300 + + BOOST_CHECK_EQUAL( 979, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 99, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 8, call_michael_id(db).debt.value ); // 6 + 2 + BOOST_CHECK_EQUAL( 11, call_michael_id(db).collateral.value ); // 8 + 3 + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 987 ); // 985 + 2 + + generate_blocks( db.head_block_time() + fc::hours(4) ); + set_expiration( db, trx ); + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // now yes expire settlement + generate_blocks( db.head_block_time() + fc::hours(22) ); + set_expiration( db, trx ); + + // checks + // settle_id3 will be cancelled due to too small. + // maximum amount that can be settled now is round_down(987 * 20%) = 197, + // according to price (101/5), the amount worths more than 9 core but less than 10 core, so 9 core will be settled, + // and 9 core worths 181.5 usd, so rachel will pay 182 usd and get 9 core + BOOST_CHECK( !db.find( settle_id3 ) ); + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 252 ); // 434 - 182 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); // no change, since it's after settle_id4 + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); // 1 + 9 + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 40); // 37 + 3 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 8); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999989); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 797, call_paul_id(db).debt.value ); // 979 - 182 + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); // 99 - 9 + BOOST_CHECK_EQUAL( 8, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 11, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 805 ); // 987 - 182 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 182 ); + + generate_block(); + + // michael borrows more + set_expiration( db, trx ); + borrow(michael_id(db), bitusd_id(db).amount(18), core_id(db).amount(200)); + + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 252 ); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 40); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 26); // 8 + 18 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999789); // 99999989 - 200 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 797, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 26, call_michael_id(db).debt.value ); // 8 + 18 + BOOST_CHECK_EQUAL( 211, call_michael_id(db).collateral.value ); // 11 + 200 + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 823 ); // 805 + 18 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 182 ); + + generate_block(); + + // maximum amount that can be settled now is round_down((823+182) * 20%) = 201, + // already settled 182, so 201 - 182 = 19 more usd can be settled, + // according to price (101/5), the amount worths less than 1 core, + // so nothing will happen. + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 252 ); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 40); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 26); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999789); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 797, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 26, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 211, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 823 ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 182 ); + + // michael borrows a little more + set_expiration( db, trx ); + borrow(michael_id(db), bitusd_id(db).amount(20), core_id(db).amount(20)); + + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 252 ); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 10); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 40); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); // 26 + 20 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); // 99999789 - 20 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 797, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 90, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); // 26 + 20 + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); // 211 + 20 + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 843 ); // 823 + 20 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 182 ); + + generate_block(); + + // maximum amount that can be settled now is round_down((843+182) * 20%) = 205, + // already settled 182, so 205 - 182 = 23 more usd can be settled, + // according to price (101/5), the amount worths more than 1 core but less than 2 core, + // so settle order will fill 1 more core, since 1 core worth more than 20 usd but less than 21 usd, + // so rachel will pay 21 usd and get 1 core + + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 231 ); // 252 - 21 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); // no change, since it's after settle_id4 + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 11); // 10 + 1 + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 40); // no change + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 500); + + BOOST_CHECK_EQUAL( 776, call_paul_id(db).debt.value ); // 797 - 21 + BOOST_CHECK_EQUAL( 89, call_paul_id(db).collateral.value ); // 90 - 1 + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 822 ); // 843 - 21 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 203 ); // 182 + 21 + + // jim borrow some cny + call_order_id_type call_jim_id = borrow(jim_id(db), bitcny_id(db).amount(2000), core_id(db).amount(2000))->id; + + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).debt.value ); + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).collateral.value ); + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 2000); + + // jim transfer some cny to joe + transfer(jim_id, joe_id, asset(1500, bitcny_id)); + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 1500); + + generate_block(); + + // give ted some usd + transfer(paul_id, ted_id, asset(100, bitusd_id)); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 100); // new: 100 + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); // 500 - 100 + + // ted settle + const operation_result result6 = force_settle(ted_id(db), bitusd_id(db).amount(20)); + const operation_result result7 = force_settle(ted_id(db), bitusd_id(db).amount(21)); + const operation_result result8 = force_settle(ted_id(db), bitusd_id(db).amount(22)); + + force_settlement_id_type settle_id6 = result6.get(); + BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); + + force_settlement_id_type settle_id7 = result7.get(); + BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); + + force_settlement_id_type settle_id8 = result8.get(); + BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); + + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 37); // 100 - 20 - 21 - 22 + + // joe settle + const operation_result result101 = force_settle(joe_id(db), bitcny_id(db).amount(100)); + const operation_result result102 = force_settle(joe_id(db), bitcny_id(db).amount(1000)); + const operation_result result103 = force_settle(joe_id(db), bitcny_id(db).amount(300)); + + force_settlement_id_type settle_id101 = result101.get(); + BOOST_CHECK_EQUAL( settle_id101(db).balance.amount.value, 100 ); + + force_settlement_id_type settle_id102 = result102.get(); + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 1000 ); + + force_settlement_id_type settle_id103 = result103.get(); + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); + + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 100); // 1500 - 100 - 1000 - 300 + + generate_block(); + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // get to another maintenance interval + generate_blocks( db.head_block_time() + fc::hours(22) ); + set_expiration( db, trx ); + + // maximum amount that can be settled now is round_down(822 * 20%) = 164, + // according to price (101/5), the amount worths more than 8 core but less than 9 core, + // so settle order will fill 8 more core, since 8 core worth more than 161 usd but less than 162 usd, + // so rachel will pay 162 usd and get 8 core + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 69 ); // 231 - 162 + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); // no change, since it's after settle_id4 + BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); // no change since not expired + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 19); // 11 + 8 + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 40); // no change + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 37); + + BOOST_CHECK_EQUAL( 614, call_paul_id(db).debt.value ); // 776 - 162 + BOOST_CHECK_EQUAL( 81, call_paul_id(db).collateral.value ); // 89 - 8 + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 660 ); // 822 - 162 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 162 ); // reset to 0, then 162 more + + // current cny data + BOOST_CHECK_EQUAL( settle_id101(db).balance.amount.value, 100 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 1000 ); // no change since not expired + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); // no change since not expired + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 100); // 1500 - 100 - 1000 - 300 + + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).debt.value ); + BOOST_CHECK_EQUAL( 2000, call_jim_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitcny_id(db).dynamic_data(db).current_supply.value, 2000 ); + BOOST_CHECK_EQUAL( bitcny_id(db).bitasset_data(db).force_settled_volume.value, 0 ); + + // bob borrow some + const call_order_object& call_bob = *borrow( bob_id(db), bitusd_id(db).amount(19), core_id(db).amount(2) ); + call_order_id_type call_bob_id = call_bob.id; + + BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 9999998); // 10000000 - 2 + BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); // new + + BOOST_CHECK_EQUAL( 19, call_bob_id(db).debt.value ); + BOOST_CHECK_EQUAL( 2, call_bob_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 679 ); // 660 + 19 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 162 ); + + generate_block(); + + // maximum amount that can be settled now is round_down((679+162) * 20%) = 168, + // already settled 162, so 168 - 162 = 6 more usd can be settled, + // according to price (101/5), the amount worths less than 1 core, + // so nothing will happen. + BOOST_CHECK_EQUAL( settle_id4(db).balance.amount.value, 69 ); + BOOST_CHECK_EQUAL( settle_id5(db).balance.amount.value, 5 ); + BOOST_CHECK_EQUAL( settle_id6(db).balance.amount.value, 20 ); + BOOST_CHECK_EQUAL( settle_id7(db).balance.amount.value, 21 ); + BOOST_CHECK_EQUAL( settle_id8(db).balance.amount.value, 22 ); + + BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 9999998); + BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 19); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 40); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 37); + + BOOST_CHECK_EQUAL( 19, call_bob_id(db).debt.value ); + BOOST_CHECK_EQUAL( 2, call_bob_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 614, call_paul_id(db).debt.value ); + BOOST_CHECK_EQUAL( 81, call_paul_id(db).collateral.value ); + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 679 ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 162 ); + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // generate some blocks + generate_blocks( db.head_block_time() + fc::hours(10) ); + set_expiration( db, trx ); + + // check cny + // maximum amount that can be settled now is round_down(2000 * 20%) = 400, + // settle_id101's remaining amount is 100, so it can be fully processed, + // according to price 50 core / 101 cny, it will get 49 core and pay 99 cny, the rest (1 cny) will be refunded; + // settle_id102's remaining amount is 1000, so 400-99=301 cny will be processed, + // according to price 50 core / 101 cny, it will get 149 core and pay 301 cny; + // settle_id103 won't be processed since it's after settle_id102 + BOOST_CHECK( !db.find( settle_id101 ) ); + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 699 ); // 1000 - 301 + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); // no change since it's after settle_id102 + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 198); // 49 + 149 + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 101); // 100 + 1 + + BOOST_CHECK_EQUAL( 1600, call_jim_id(db).debt.value ); // 2000 - 99 - 301 + BOOST_CHECK_EQUAL( 1802, call_jim_id(db).collateral.value ); // 2000 - 49 - 149 + + BOOST_CHECK_EQUAL( bitcny_id(db).dynamic_data(db).current_supply.value, 1600 ); + BOOST_CHECK_EQUAL( bitcny_id(db).bitasset_data(db).force_settled_volume.value, 400 ); // 99 + 301 + + // adding new feed so we have valid price to exit + update_feed_producers( bitusd_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 101 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), alice_id(db), current_feed ); + + update_feed_producers( bitcny_id(db), {alice_id} ); + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitcny_id(db).amount( 101 ) / core_id(db).amount(50); + publish_feed( bitcny_id(db), alice_id(db), current_feed ); + + // get to another maintenance interval + generate_blocks( db.head_block_time() + fc::hours(14) ); + set_expiration( db, trx ); + + // maximum amount that can be settled now is round_down(679 * 20%) = 135, + // settle_id4's remaining amount is 69, so it can be fully processed: + // firstly call_bob will be matched, since it owes only 19 usd which worths less than 1 core, + // it will pay 1 core, and the rest (2-1=1 core) will be returned, short position will be closed; + // then call_paul will be matched, + // according to price (101/5), the amount (69-19=50 usd) worths more than 2 core but less than 3 core, + // so settle_id4 will get 2 more core, since 2 core worth more than 40 usd but less than 41 usd, + // call_rachel will pay 41 usd and get 2 core, the rest (50-41=9 usd) will be returned due to too small. + // settle_id5 (has 5 usd) will be cancelled due to too small; + // settle_id6 (has 20 usd) will be cancelled as well due to too small; + // settle_id7 (has 21 usd) will be filled and get 1 core, since it worths more than 1 core; but no more fund can be returned; + // settle_id8 (has 22 usd) will be filled and get 1 core, and 1 usd will be returned. + BOOST_CHECK( !db.find( settle_id4 ) ); + BOOST_CHECK( !db.find( settle_id5 ) ); + BOOST_CHECK( !db.find( settle_id6 ) ); + BOOST_CHECK( !db.find( settle_id7 ) ); + BOOST_CHECK( !db.find( settle_id8 ) ); + + BOOST_CHECK_EQUAL(get_balance(bob_id(db), core_id(db)), 9999999); // 9999998 + 1 + BOOST_CHECK_EQUAL(get_balance(bob_id(db), bitusd_id(db)), 19); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 22); // 19 + 1 + 2 + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 54); // 40 + 9 + 5 + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 46); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999769); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 400); + BOOST_CHECK_EQUAL(get_balance(ted_id(db), core_id(db)), 2); // 0 + 1 + 1 + BOOST_CHECK_EQUAL(get_balance(ted_id(db), bitusd_id(db)), 58); // 37 + 20 + 1 + + BOOST_CHECK( !db.find( call_bob_id ) ); + BOOST_CHECK_EQUAL( 531, call_paul_id(db).debt.value ); // 614 - 41 - 21 - 21 + BOOST_CHECK_EQUAL( 77, call_paul_id(db).collateral.value ); // 81 - 2 - 1 - 1 + BOOST_CHECK_EQUAL( 46, call_michael_id(db).debt.value ); + BOOST_CHECK_EQUAL( 231, call_michael_id(db).collateral.value ); + + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 577 ); // 679 - 19 - 41 - 21 - 21 + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).force_settled_volume.value, 102 ); // reset to 0, then 19 + 41 + 21 + 21 + + // check cny + // maximum amount that can be settled now is round_down(1600 * 20%) = 320, + // settle_id102's remaining amount is 699, so 320 cny will be processed, + // according to price 50 core / 101 cny, it will get 158 core and pay 320 cny; + // settle_id103 won't be processed since it's after settle_id102 + BOOST_CHECK( !db.find( settle_id101 ) ); + BOOST_CHECK_EQUAL( settle_id102(db).balance.amount.value, 379 ); // 699 - 320 + BOOST_CHECK_EQUAL( settle_id103(db).balance.amount.value, 300 ); // no change since it's after settle_id102 + + BOOST_CHECK_EQUAL(get_balance(jim_id(db), core_id(db)), 9998000); + BOOST_CHECK_EQUAL(get_balance(jim_id(db), bitcny_id(db)), 500); + BOOST_CHECK_EQUAL(get_balance(joe_id(db), core_id(db)), 356); // 198 + 158 + BOOST_CHECK_EQUAL(get_balance(joe_id(db), bitcny_id(db)), 101); + + BOOST_CHECK_EQUAL( 1280, call_jim_id(db).debt.value ); // 1600 - 320 + BOOST_CHECK_EQUAL( 1644, call_jim_id(db).collateral.value ); // 1802 - 158 + + BOOST_CHECK_EQUAL( bitcny_id(db).dynamic_data(db).current_supply.value, 1280 ); + BOOST_CHECK_EQUAL( bitcny_id(db).bitasset_data(db).force_settled_volume.value, 320 ); // reset to 0, then 320 + + generate_block(); + + // Note: the scenario that a big settle order matching several smaller call orders, + // and another scenario about force_settlement_offset_percent parameter, + // are tested in force_settle_test in operation_test2.cpp. + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( global_settle_rounding_test ) +{ + try { + // get around Graphene issue #615 feed expiration bug + generate_blocks(HARDFORK_615_TIME); + generate_block(); + set_expiration( db, trx ); + + ACTORS((paul)(michael)(rachel)(alice)); + + // create assets + const auto& bitusd = create_bitasset("USDBIT", paul_id); + const auto& core = asset_id_type()(db); + asset_id_type bitusd_id = bitusd.id; + asset_id_type core_id = core.id; + + // fund accounts + transfer(committee_account, michael_id, asset( 100000000 ) ); + transfer(committee_account, paul_id, asset( 10000000 ) ); + transfer(committee_account, alice_id, asset( 10000000 ) ); + + // allow global settle in bitusd + asset_update_operation op; + op.issuer = bitusd.issuer; + op.asset_to_update = bitusd.id; + op.new_options.issuer_permissions = global_settle; + op.new_options.flags = bitusd.options.flags; + op.new_options.core_exchange_rate = price( asset(1,bitusd_id), asset(1,core_id) ); + trx.operations.push_back(op); + sign(trx, paul_private_key); + PUSH_TX(db, trx); + generate_block(); + trx.clear(); + + // add a feed to asset + update_feed_producers( bitusd_id(db), {paul_id} ); + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 100 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), paul_id(db), current_feed ); + + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 10000000); + + // paul gets some bitusd + const call_order_object& call_paul = *borrow( paul_id(db), bitusd_id(db).amount(1001), core_id(db).amount(101)); + call_order_id_type call_paul_id = call_paul.id; + BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), bitusd_id(db) ), 1001 ); + BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), core_id(db) ), 10000000-101); + + // and transfer some to rachel + transfer(paul_id, rachel_id, asset(200, bitusd_id)); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 200); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999899); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + // michael borrow some bitusd + const call_order_object& call_michael = *borrow(michael_id(db), bitusd_id(db).amount(6), core_id(db).amount(8)); + call_order_id_type call_michael_id = call_michael.id; + + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000-8); + + // add global settle + force_global_settle(bitusd_id(db), bitusd_id(db).amount(10) / core_id(db).amount(1)); + generate_block(); + + BOOST_CHECK( bitusd_id(db).bitasset_data(db).settlement_price + == price( bitusd_id(db).amount(1007), core_id(db).amount(100) ) ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).settlement_fund.value, 100 ); // 100 from paul, and 0 from michael + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 1007 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 200); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000); // michael paid nothing for 6 usd + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); // paul paid 100 core for 1001 usd + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + // all call orders are gone after global settle + BOOST_CHECK( !db.find_object(call_paul_id) ); + BOOST_CHECK( !db.find_object(call_michael_id) ); + + // add settle order and check rounding issue + force_settle(rachel_id(db), bitusd_id(db).amount(4)); + generate_block(); + + BOOST_CHECK( bitusd_id(db).bitasset_data(db).settlement_price + == price( bitusd_id(db).amount(1007), core_id(db).amount(100) ) ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).settlement_fund.value, 100 ); // paid nothing + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 1003 ); // settled 4 usd + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 196); // rachel paid 4 usd and got nothing + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + // rachel settle more than 1 core + force_settle(rachel_id(db), bitusd_id(db).amount(13)); + generate_block(); + + BOOST_CHECK( bitusd_id(db).bitasset_data(db).settlement_price + == price( bitusd_id(db).amount(1007), core_id(db).amount(100) ) ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).settlement_fund.value, 99 ); // paid 1 core + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 990 ); // settled 13 usd + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 183); // rachel paid 13 usd and got 1 core + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999900); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( global_settle_rounding_test_after_hf_184 ) +{ + try { + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks(HARDFORK_CORE_184_TIME - mi); // assume that hard fork core-184 and core-342 happen at same time + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + set_expiration( db, trx ); + + ACTORS((paul)(michael)(rachel)(alice)); + + // create assets + const auto& bitusd = create_bitasset("USDBIT", paul_id); + const auto& core = asset_id_type()(db); + asset_id_type bitusd_id = bitusd.id; + asset_id_type core_id = core.id; + + // fund accounts + transfer(committee_account, michael_id, asset( 100000000 ) ); + transfer(committee_account, paul_id, asset( 10000000 ) ); + transfer(committee_account, alice_id, asset( 10000000 ) ); + + // allow global settle in bitusd + asset_update_operation op; + op.issuer = bitusd_id(db).issuer; + op.asset_to_update = bitusd_id; + op.new_options.issuer_permissions = global_settle; + op.new_options.flags = bitusd.options.flags; + op.new_options.core_exchange_rate = price( asset(1,bitusd_id), asset(1,core_id) ); + trx.operations.push_back(op); + sign(trx, paul_private_key); + PUSH_TX(db, trx); + generate_block(); + trx.clear(); + + // add a feed to asset + update_feed_producers( bitusd_id(db), {paul_id} ); + price_feed current_feed; + current_feed.maintenance_collateral_ratio = 1750; + current_feed.maximum_short_squeeze_ratio = 1100; + current_feed.settlement_price = bitusd_id(db).amount( 100 ) / core_id(db).amount(5); + publish_feed( bitusd_id(db), paul_id(db), current_feed ); + + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 10000000); + + // paul gets some bitusd + const call_order_object& call_paul = *borrow( paul_id(db), bitusd_id(db).amount(1001), core_id(db).amount(101)); + call_order_id_type call_paul_id = call_paul.id; + BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), bitusd_id(db) ), 1001 ); + BOOST_REQUIRE_EQUAL( get_balance( paul_id(db), core_id(db) ), 10000000-101); + + // and transfer some to rachel + transfer(paul_id, rachel_id, asset(200, bitusd_id)); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 200); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999899); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + // michael borrow some bitusd + const call_order_object& call_michael = *borrow(michael_id(db), bitusd_id(db).amount(6), core_id(db).amount(8)); + call_order_id_type call_michael_id = call_michael.id; + + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 100000000-8); + + // add global settle + force_global_settle(bitusd_id(db), bitusd_id(db).amount(10) / core_id(db).amount(1)); + generate_block(); + + BOOST_CHECK( bitusd_id(db).bitasset_data(db).settlement_price + == price( bitusd_id(db).amount(1007), core_id(db).amount(102) ) ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).settlement_fund.value, 102 ); // 101 from paul, and 1 from michael + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 1007 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 200); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999999); // michael paid 1 core for 6 usd + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999899); // paul paid 101 core for 1001 usd + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + // all call orders are gone after global settle + BOOST_CHECK( !db.find_object(call_paul_id)); + BOOST_CHECK( !db.find_object(call_michael_id)); + + // settle order will not execute after HF due to too small + GRAPHENE_REQUIRE_THROW( force_settle(rachel_id(db), bitusd_id(db).amount(4)), fc::exception ); + + generate_block(); + + // balances unchanged + BOOST_CHECK( bitusd_id(db).bitasset_data(db).settlement_price + == price( bitusd_id(db).amount(1007), core_id(db).amount(102) ) ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).settlement_fund.value, 102 ); + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 1007 ); + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 0); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 200); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999999); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999899); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + // rachel settle more than 1 core + force_settle(rachel_id(db), bitusd_id(db).amount(13)); + generate_block(); + + BOOST_CHECK( bitusd_id(db).bitasset_data(db).settlement_price + == price( bitusd_id(db).amount(1007), core_id(db).amount(102) ) ); + BOOST_CHECK_EQUAL( bitusd_id(db).bitasset_data(db).settlement_fund.value, 101 ); // paid 1 core + BOOST_CHECK_EQUAL( bitusd_id(db).dynamic_data(db).current_supply.value, 997 ); // settled 10 usd + + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), core_id(db)), 1); + BOOST_CHECK_EQUAL(get_balance(rachel_id(db), bitusd_id(db)), 190); // rachel paid 10 usd and got 1 core, 3 usd returned + BOOST_CHECK_EQUAL(get_balance(michael_id(db), bitusd_id(db)), 6); + BOOST_CHECK_EQUAL(get_balance(michael_id(db), core_id(db)), 99999999); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), core_id(db)), 9999899); + BOOST_CHECK_EQUAL(get_balance(paul_id(db), bitusd_id(db)), 801); + + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/smartcoin_tests.cpp b/tests/tests/smartcoin_tests.cpp new file mode 100644 index 0000000000..9610e456ab --- /dev/null +++ b/tests/tests/smartcoin_tests.cpp @@ -0,0 +1,640 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include +#include +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + + +BOOST_FIXTURE_TEST_SUITE(smartcoin_tests, database_fixture) + + +BOOST_AUTO_TEST_CASE(bsip36) +{ + try + { + /* Issue affects only smartcoins(market pegged assets feeded by active witnesses or committee members). + * Test case reproduces, advance to hardfork and check if solved after it. + */ + + /* References: + * BSIP 36: https://github.com/bitshares/bsips/blob/master/bsip-0036.md + * and the former: CORE Issue 518: https://github.com/bitshares/bitshares-core/issues/518 + */ + + // Create 12 accounts to be witnesses under our control + ACTORS( (witness0)(witness1)(witness2)(witness3)(witness4)(witness5) + (witness6)(witness7)(witness8)(witness9)(witness10)(witness11) ); + + // Upgrade all accounts to LTM + upgrade_to_lifetime_member(witness0_id); + upgrade_to_lifetime_member(witness1_id); + upgrade_to_lifetime_member(witness2_id); + upgrade_to_lifetime_member(witness3_id); + upgrade_to_lifetime_member(witness4_id); + upgrade_to_lifetime_member(witness5_id); + upgrade_to_lifetime_member(witness6_id); + upgrade_to_lifetime_member(witness7_id); + upgrade_to_lifetime_member(witness8_id); + upgrade_to_lifetime_member(witness9_id); + upgrade_to_lifetime_member(witness10_id); + upgrade_to_lifetime_member(witness11_id); + + // Create all the witnesses + const witness_id_type witness0_witness_id = create_witness(witness0_id, witness0_private_key).id; + const witness_id_type witness1_witness_id = create_witness(witness1_id, witness1_private_key).id; + const witness_id_type witness2_witness_id = create_witness(witness2_id, witness2_private_key).id; + const witness_id_type witness3_witness_id = create_witness(witness3_id, witness3_private_key).id; + const witness_id_type witness4_witness_id = create_witness(witness4_id, witness4_private_key).id; + const witness_id_type witness5_witness_id = create_witness(witness5_id, witness5_private_key).id; + const witness_id_type witness6_witness_id = create_witness(witness6_id, witness6_private_key).id; + const witness_id_type witness7_witness_id = create_witness(witness7_id, witness7_private_key).id; + const witness_id_type witness8_witness_id = create_witness(witness8_id, witness8_private_key).id; + const witness_id_type witness9_witness_id = create_witness(witness9_id, witness9_private_key).id; + const witness_id_type witness10_witness_id = create_witness(witness10_id, witness10_private_key).id; + const witness_id_type witness11_witness_id = create_witness(witness11_id, witness11_private_key).id; + + // Create a vector with private key of all witnesses, will be used to activate 11 witnesses at a time + const vector private_keys = { + witness0_private_key, + witness1_private_key, + witness2_private_key, + witness3_private_key, + witness4_private_key, + witness5_private_key, + witness6_private_key, + witness7_private_key, + witness8_private_key, + witness9_private_key, + witness10_private_key + }; + + // create a map with account id and witness id of the first 11 witnesses + const flat_map witness_map = { + {witness0_id, witness0_witness_id}, + {witness1_id, witness1_witness_id}, + {witness2_id, witness2_witness_id}, + {witness3_id, witness3_witness_id}, + {witness4_id, witness4_witness_id}, + {witness5_id, witness5_witness_id}, + {witness6_id, witness6_witness_id}, + {witness7_id, witness7_witness_id}, + {witness8_id, witness8_witness_id}, + {witness9_id, witness9_witness_id}, + {witness10_id, witness10_witness_id} + }; + + // Create the asset + const asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + + // Update the asset to be fed by system witnesses + asset_update_operation op; + const asset_object &asset_obj = bit_usd_id(db); + op.asset_to_update = bit_usd_id; + op.issuer = asset_obj.issuer; + op.new_options = asset_obj.options; + op.new_options.flags &= witness_fed_asset; + op.new_options.issuer_permissions &= witness_fed_asset; + trx.operations.push_back(op); + PUSH_TX(db, trx, ~0); + generate_block(); + trx.clear(); + + // Check current default witnesses, default chain is configured with 10 witnesses + auto witnesses = db.get_global_properties().active_witnesses; + BOOST_CHECK_EQUAL(witnesses.size(), 10u); + BOOST_CHECK_EQUAL(witnesses.begin()[0].instance.value, 1u); + BOOST_CHECK_EQUAL(witnesses.begin()[1].instance.value, 2u); + BOOST_CHECK_EQUAL(witnesses.begin()[2].instance.value, 3u); + BOOST_CHECK_EQUAL(witnesses.begin()[3].instance.value, 4u); + BOOST_CHECK_EQUAL(witnesses.begin()[4].instance.value, 5u); + BOOST_CHECK_EQUAL(witnesses.begin()[5].instance.value, 6u); + BOOST_CHECK_EQUAL(witnesses.begin()[6].instance.value, 7u); + BOOST_CHECK_EQUAL(witnesses.begin()[7].instance.value, 8u); + BOOST_CHECK_EQUAL(witnesses.begin()[8].instance.value, 9u); + BOOST_CHECK_EQUAL(witnesses.begin()[9].instance.value, 10u); + + // We need to activate 11 witnesses by voting for each of them. + // Each witness is voted with incremental stake so last witness created will be the ones with more votes + int c = 0; + for (auto l : witness_map) { + int stake = 100 + c + 1; + transfer(committee_account, l.first, asset(stake)); + { + account_update_operation op; + op.account = l.first; + op.new_options = l.first(db).options; + op.new_options->votes.insert(l.second(db).vote_id); + op.new_options->num_witness = std::count_if(op.new_options->votes.begin(), op.new_options->votes.end(), + [](vote_id_type id) { + return id.type() == vote_id_type::witness; + }); + trx.operations.push_back(op); + sign(trx, private_keys.at(c)); + PUSH_TX(db, trx); + trx.clear(); + } + ++c; + } + + // Trigger the new witnesses + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // Check my witnesses are now in control of the system + witnesses = db.get_global_properties().active_witnesses; + BOOST_CHECK_EQUAL(witnesses.size(), 11u); + BOOST_CHECK_EQUAL(witnesses.begin()[0].instance.value, 11u); + BOOST_CHECK_EQUAL(witnesses.begin()[1].instance.value, 12u); + BOOST_CHECK_EQUAL(witnesses.begin()[2].instance.value, 13u); + BOOST_CHECK_EQUAL(witnesses.begin()[3].instance.value, 14u); + BOOST_CHECK_EQUAL(witnesses.begin()[4].instance.value, 15u); + BOOST_CHECK_EQUAL(witnesses.begin()[5].instance.value, 16u); + BOOST_CHECK_EQUAL(witnesses.begin()[6].instance.value, 17u); + BOOST_CHECK_EQUAL(witnesses.begin()[7].instance.value, 18u); + BOOST_CHECK_EQUAL(witnesses.begin()[8].instance.value, 19u); + BOOST_CHECK_EQUAL(witnesses.begin()[9].instance.value, 20u); + BOOST_CHECK_EQUAL(witnesses.begin()[10].instance.value, 21u); + + // Adding 2 feeds with witnesses 0 and 1, checking if they get inserted + const asset_object &core = asset_id_type()(db); + price_feed feed; + feed.settlement_price = bit_usd_id(db).amount(1) / core.amount(5); + publish_feed(bit_usd_id(db), witness0_id(db), feed); + + asset_bitasset_data_object bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 1u); + auto itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 16u); + + feed.settlement_price = bit_usd_id(db).amount(2) / core.amount(5); + publish_feed(bit_usd_id(db), witness1_id(db), feed); + + bitasset_data = bit_usd_id(db).bitasset_data(db); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 16u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 17u); + + // Activate witness11 with voting stake, will kick the witness with less votes(witness0) out of the active list + transfer(committee_account, witness11_id, asset(121)); + set_expiration(db, trx); + { + account_update_operation op; + op.account = witness11_id; + op.new_options = witness11_id(db).options; + op.new_options->votes.insert(witness11_witness_id(db).vote_id); + op.new_options->num_witness = std::count_if(op.new_options->votes.begin(), op.new_options->votes.end(), + [](vote_id_type id) { + return id.type() == vote_id_type::witness; + }); + trx.operations.push_back(op); + sign(trx, witness11_private_key); + PUSH_TX(db, trx); + trx.clear(); + } + + // Trigger new witness + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // Check active witness list now + witnesses = db.get_global_properties().active_witnesses; + BOOST_CHECK_EQUAL(witnesses.begin()[0].instance.value, 12u); + BOOST_CHECK_EQUAL(witnesses.begin()[1].instance.value, 13u); + BOOST_CHECK_EQUAL(witnesses.begin()[2].instance.value, 14u); + BOOST_CHECK_EQUAL(witnesses.begin()[3].instance.value, 15u); + BOOST_CHECK_EQUAL(witnesses.begin()[4].instance.value, 16u); + BOOST_CHECK_EQUAL(witnesses.begin()[5].instance.value, 17u); + BOOST_CHECK_EQUAL(witnesses.begin()[6].instance.value, 18u); + BOOST_CHECK_EQUAL(witnesses.begin()[7].instance.value, 19u); + BOOST_CHECK_EQUAL(witnesses.begin()[8].instance.value, 20u); + BOOST_CHECK_EQUAL(witnesses.begin()[9].instance.value, 21u); + BOOST_CHECK_EQUAL(witnesses.begin()[10].instance.value, 22u); + + // witness0 has been removed but it was a feeder before + // Feed persist in the blockchain, this reproduces the issue + bitasset_data = bit_usd_id(db).bitasset_data(db); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 16u); + + // Feed persist after expiration + const auto feed_lifetime = bit_usd_id(db).bitasset_data(db).options.feed_lifetime_sec; + generate_blocks(db.head_block_time() + feed_lifetime + 1); + bitasset_data = bit_usd_id(db).bitasset_data(db); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 16u); + + // Other witnesses add more feeds + feed.settlement_price = bit_usd_id(db).amount(4) / core.amount(5); + publish_feed(bit_usd_id(db), witness2_id(db), feed); + feed.settlement_price = bit_usd_id(db).amount(3) / core.amount(5); + publish_feed(bit_usd_id(db), witness3_id(db), feed); + + // But the one from witness0 is never removed + bitasset_data = bit_usd_id(db).bitasset_data(db); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 4u); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 16u); + + // Feed from witness1 is also expired but never deleted + // All feeds should be deleted at this point + const auto minimum_feeds = bit_usd_id(db).bitasset_data(db).options.minimum_feeds; + BOOST_CHECK_EQUAL(minimum_feeds, 1u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 17u); + + // Advancing into HF time + generate_blocks(HARDFORK_CORE_518_TIME); + + // Advancing to next maint + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // All expired feeds are deleted + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 0u); + + // witness1 start feed producing again + feed.settlement_price = bit_usd_id(db).amount(1) / core.amount(5); + publish_feed(bit_usd_id(db), witness1_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 1u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 17u); + + // generate some blocks up to expiration but feed will not be deleted yet as need next maint time + generate_blocks(itr[0].second.first + feed_lifetime + 1); + + // add another feed with witness2 + feed.settlement_price = bit_usd_id(db).amount(1) / core.amount(5); + publish_feed(bit_usd_id(db), witness2_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 17u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 18u); + + // make the first feed expire + generate_blocks(itr[0].second.first + feed_lifetime + 1); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // feed from witness0 expires and gets deleted, feed from witness is on time so persist + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 1u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 18u); + + // expire everything + generate_blocks(itr[0].second.first + feed_lifetime + 1); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 0u); + + // add new feed with witness1 + feed.settlement_price = bit_usd_id(db).amount(1) / core.amount(5); + publish_feed(bit_usd_id(db), witness1_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 1u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 17u); + + // Reactivate witness0 + transfer(committee_account, witness0_id, asset(100)); + set_expiration(db, trx); + { + account_update_operation op; + op.account = witness0_id; + op.new_options = witness0_id(db).options; + op.new_options->votes.insert(witness0_witness_id(db).vote_id); + op.new_options->num_witness = std::count_if(op.new_options->votes.begin(), op.new_options->votes.end(), + [](vote_id_type id) { + return id.type() == vote_id_type::witness; + }); + trx.operations.push_back(op); + sign(trx, witness0_private_key); + PUSH_TX(db, trx); + trx.clear(); + } + + // This will deactivate witness1 as it is the one with less votes + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // Checking + witnesses = db.get_global_properties().active_witnesses; + BOOST_CHECK_EQUAL(witnesses.begin()[0].instance.value, 11u); + BOOST_CHECK_EQUAL(witnesses.begin()[1].instance.value, 13u); + BOOST_CHECK_EQUAL(witnesses.begin()[2].instance.value, 14u); + BOOST_CHECK_EQUAL(witnesses.begin()[3].instance.value, 15u); + BOOST_CHECK_EQUAL(witnesses.begin()[4].instance.value, 16u); + BOOST_CHECK_EQUAL(witnesses.begin()[5].instance.value, 17u); + BOOST_CHECK_EQUAL(witnesses.begin()[6].instance.value, 18u); + BOOST_CHECK_EQUAL(witnesses.begin()[7].instance.value, 19u); + BOOST_CHECK_EQUAL(witnesses.begin()[8].instance.value, 20u); + BOOST_CHECK_EQUAL(witnesses.begin()[9].instance.value, 21u); + BOOST_CHECK_EQUAL(witnesses.begin()[10].instance.value, 22u); + + // feed from witness1 is still here as the witness is no longer a producer but the feed is not yet expired + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 1u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 17u); + + // make feed from witness1 expire + generate_blocks(itr[0].second.first + feed_lifetime + 1); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 0u); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(bsip36_update_feed_producers) +{ + try + { + /* For MPA fed by non witnesses or non committee mmembers but by feed producers changes should do nothing */ + ACTORS( (sam)(alice)(paul)(bob) ); + + // Create the asset + const asset_id_type bit_usd_id = create_bitasset("USDBIT").id; + + // Update asset issuer + const asset_object &asset_obj = bit_usd_id(db); + { + asset_update_operation op; + op.asset_to_update = bit_usd_id; + op.issuer = asset_obj.issuer; + op.new_issuer = bob_id; + op.new_options = asset_obj.options; + op.new_options.flags &= ~witness_fed_asset; + trx.operations.push_back(op); + PUSH_TX(db, trx, ~0); + generate_block(); + trx.clear(); + } + + // Add 3 feed producers for asset + { + asset_update_feed_producers_operation op; + op.asset_to_update = bit_usd_id; + op.issuer = bob_id; + op.new_feed_producers = {sam_id, alice_id, paul_id}; + trx.operations.push_back(op); + sign(trx, bob_private_key); + PUSH_TX(db, trx); + generate_block(); + trx.clear(); + } + + // Bitshares will create entries in the field feed after feed producers are added + auto bitasset_data = bit_usd_id(db).bitasset_data(db); + + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 3u); + auto itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 16u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 17u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 18u); + + // Removing a feed producer + { + asset_update_feed_producers_operation op; + op.asset_to_update = bit_usd_id; + op.issuer = bob_id; + op.new_feed_producers = {alice_id, paul_id}; + trx.operations.push_back(op); + sign(trx, bob_private_key); + PUSH_TX(db, trx); + generate_block(); + trx.clear(); + } + + // Feed for removed producer is removed + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 17u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 18u); + + // Feed persist after expiration + const auto feed_lifetime = bit_usd_id(db).bitasset_data(db).options.feed_lifetime_sec; + generate_blocks(db.head_block_time() + feed_lifetime + 1); + bitasset_data = bit_usd_id(db).bitasset_data(db); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 17u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 18u); + + // Advancing into HF time + generate_blocks(HARDFORK_CORE_518_TIME); + + // Advancing to next maint + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + + // Expired feeds persist, no changes + bitasset_data = bit_usd_id(db).bitasset_data(db); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 17u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 18u); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(bsip36_additional) +{ + try + { + /* Check impact of bsip36 with multiple feeds */ + INVOKE( bsip36 ); + + // get the stuff needed from invoked test + const asset_id_type bit_usd_id = get_asset("USDBIT").id; + const asset_id_type core_id = asset_id_type(); + const account_id_type witness5_id= get_account("witness5").id; + const account_id_type witness6_id= get_account("witness6").id; + const account_id_type witness7_id= get_account("witness7").id; + const account_id_type witness8_id= get_account("witness8").id; + const account_id_type witness9_id= get_account("witness9").id; + const account_id_type witness10_id= get_account("witness10").id; + + + set_expiration( db, trx ); + + // changing lifetime feed to 5 days + // maint interval default is every 1 day + { + asset_update_bitasset_operation op; + op.new_options.minimum_feeds = 3; + op.new_options.feed_lifetime_sec = 86400 * 5; + op.asset_to_update = bit_usd_id; + op.issuer = bit_usd_id(db).issuer; + trx.operations.push_back(op); + PUSH_TX(db, trx, ~0); + generate_block(); + trx.clear(); + } + + price_feed feed; + feed.settlement_price = bit_usd_id(db).amount(1) / core_id(db).amount(5); + publish_feed(bit_usd_id(db), witness5_id(db), feed); + auto bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 1u); + auto itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + feed.settlement_price = bit_usd_id(db).amount(1) / core_id(db).amount(5); + publish_feed(bit_usd_id(db), witness6_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 22u); + + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + feed.settlement_price = bit_usd_id(db).amount(1) / core_id(db).amount(5); + publish_feed(bit_usd_id(db), witness7_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 3u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 22u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 23u); + + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + feed.settlement_price = bit_usd_id(db).amount(1) / core_id(db).amount(5); + publish_feed(bit_usd_id(db), witness8_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 4u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 22u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 23u); + BOOST_CHECK_EQUAL(itr[3].first.instance.value, 24u); + + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + feed.settlement_price = bit_usd_id(db).amount(1) / core_id(db).amount(5); + publish_feed(bit_usd_id(db), witness9_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 5u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 22u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 23u); + BOOST_CHECK_EQUAL(itr[3].first.instance.value, 24u); + BOOST_CHECK_EQUAL(itr[4].first.instance.value, 25u); + + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + feed.settlement_price = bit_usd_id(db).amount(1) / core_id(db).amount(5); + publish_feed(bit_usd_id(db), witness10_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 6u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 22u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 23u); + BOOST_CHECK_EQUAL(itr[3].first.instance.value, 24u); + BOOST_CHECK_EQUAL(itr[4].first.instance.value, 25u); + BOOST_CHECK_EQUAL(itr[5].first.instance.value, 26u); + + // make the older feed expire + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 5u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 22u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 23u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 24u); + BOOST_CHECK_EQUAL(itr[3].first.instance.value, 25u); + BOOST_CHECK_EQUAL(itr[4].first.instance.value, 26u); + + // make older 2 feeds expire + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 3u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 24u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 25u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 26u); + + // witness5 add new feed, feeds are sorted by witness_id not by feed_time + feed.settlement_price = bit_usd_id(db).amount(1) / core_id(db).amount(5); + publish_feed(bit_usd_id(db), witness5_id(db), feed); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 4u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 24u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 25u); + BOOST_CHECK_EQUAL(itr[3].first.instance.value, 26u); + + // another feed expires + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 3u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[1].first.instance.value, 25u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 26u); + + // another feed expires + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + bitasset_data = bit_usd_id(db).bitasset_data(db); + BOOST_CHECK_EQUAL(bitasset_data.feeds.size(), 2u); + itr = bitasset_data.feeds.begin(); + BOOST_CHECK_EQUAL(itr[0].first.instance.value, 21u); + BOOST_CHECK_EQUAL(itr[2].first.instance.value, 26u); + + // and so on + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/swan_tests.cpp b/tests/tests/swan_tests.cpp new file mode 100644 index 0000000000..30031379c0 --- /dev/null +++ b/tests/tests/swan_tests.cpp @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + +namespace graphene { namespace chain { + +struct swan_fixture : database_fixture { + limit_order_id_type init_standard_swan(share_type amount = 1000) { + standard_users(); + standard_asset(); + return trigger_swan(amount, amount); + } + + void standard_users() { + set_expiration( db, trx ); + ACTORS((borrower)(borrower2)(feedproducer)); + _borrower = borrower_id; + _borrower2 = borrower2_id; + _feedproducer = feedproducer_id; + + transfer(committee_account, borrower_id, asset(init_balance)); + transfer(committee_account, borrower2_id, asset(init_balance)); + } + + void standard_asset() { + set_expiration( db, trx ); + const auto& bitusd = create_bitasset("USDBIT", _feedproducer); + _swan = bitusd.id; + _back = asset_id_type(); + update_feed_producers(swan(), {_feedproducer}); + } + + limit_order_id_type trigger_swan(share_type amount1, share_type amount2) { + set_expiration( db, trx ); + // starting out with price 1:1 + set_feed( 1, 1 ); + // start out with 2:1 collateral + borrow(borrower(), swan().amount(amount1), back().amount(2*amount1)); + borrow(borrower2(), swan().amount(amount2), back().amount(4*amount2)); + + FC_ASSERT( get_balance(borrower(), swan()) == amount1 ); + FC_ASSERT( get_balance(borrower2(), swan()) == amount2 ); + FC_ASSERT( get_balance(borrower() , back()) == init_balance - 2*amount1 ); + FC_ASSERT( get_balance(borrower2(), back()) == init_balance - 4*amount2 ); + + set_feed( 1, 2 ); + // this sell order is designed to trigger a black swan + limit_order_id_type oid = create_sell_order( borrower2(), swan().amount(1), back().amount(3) )->id; + + FC_ASSERT( get_balance(borrower(), swan()) == amount1 ); + FC_ASSERT( get_balance(borrower2(), swan()) == amount2 - 1 ); + FC_ASSERT( get_balance(borrower() , back()) == init_balance - 2*amount1 ); + FC_ASSERT( get_balance(borrower2(), back()) == init_balance - 2*amount2 ); + + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + return oid; + } + + void set_feed(share_type usd, share_type core) { + price_feed feed; + feed.maintenance_collateral_ratio = 1750; // need to set this explicitly, testnet has a different default + feed.settlement_price = swan().amount(usd) / back().amount(core); + publish_feed(swan(), feedproducer(), feed); + } + + void expire_feed() { + generate_blocks(db.head_block_time() + GRAPHENE_DEFAULT_PRICE_FEED_LIFETIME); + generate_block(); + FC_ASSERT( swan().bitasset_data(db).current_feed.settlement_price.is_null() ); + } + + void wait_for_hf_core_216() { + generate_blocks( HARDFORK_CORE_216_TIME ); + generate_block(); + } + + void wait_for_maintenance() { + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + generate_block(); + } + + const account_object& borrower() { return _borrower(db); } + const account_object& borrower2() { return _borrower2(db); } + const account_object& feedproducer() { return _feedproducer(db); } + const asset_object& swan() { return _swan(db); } + const asset_object& back() { return _back(db); } + + int64_t init_balance = 1000000; + account_id_type _borrower, _borrower2, _feedproducer; + asset_id_type _swan, _back; +}; + +}} + +BOOST_FIXTURE_TEST_SUITE( swan_tests, swan_fixture ) + +/** + * This test sets up the minimum condition for a black swan to occur but does + * not test the full range of cases that may be possible during a black swan. + */ +BOOST_AUTO_TEST_CASE( black_swan ) +{ try { + init_standard_swan(); + + force_settle( borrower(), swan().amount(100) ); + + expire_feed(); + wait_for_hf_core_216(); + + force_settle( borrower(), swan().amount(100) ); + + set_feed( 100, 150 ); + + BOOST_TEST_MESSAGE( "Verify that we cannot borrow after black swan" ); + GRAPHENE_REQUIRE_THROW( borrow(borrower(), swan().amount(1000), back().amount(2000)), fc::exception ) + trx.operations.clear(); +} catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** + * Black swan occurs when price feed falls, triggered by settlement + * order. + */ +BOOST_AUTO_TEST_CASE( black_swan_issue_346 ) +{ try { + ACTORS((buyer)(seller)(borrower)(borrower2)(settler)(feeder)); + + const asset_object& core = asset_id_type()(db); + + int trial = 0; + + vector< const account_object* > actors{ &buyer, &seller, &borrower, &borrower2, &settler, &feeder }; + + auto top_up = [&]() + { + for( const account_object* actor : actors ) + { + int64_t bal = get_balance( *actor, core ); + if( bal < init_balance ) + transfer( committee_account, actor->id, asset(init_balance - bal) ); + else if( bal > init_balance ) + transfer( actor->id, committee_account, asset(bal - init_balance) ); + } + }; + + auto setup_asset = [&]() -> const asset_object& + { + const asset_object& bitusd = create_bitasset("USDBIT"+fc::to_string(trial)+"X", feeder_id); + update_feed_producers( bitusd, {feeder.id} ); + BOOST_CHECK( !bitusd.bitasset_data(db).has_settlement() ); + trial++; + return bitusd; + }; + + /* + * GRAPHENE_COLLATERAL_RATIO_DENOM + uint16_t maintenance_collateral_ratio = GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO; + uint16_t maximum_short_squeeze_ratio = GRAPHENE_DEFAULT_MAX_SHORT_SQUEEZE_RATIO; + */ + + // situations to test: + // 1. minus short squeeze protection would be black swan, otherwise no + // 2. issue 346 (price feed drops followed by force settle, drop should trigger BS) + // 3. feed price < D/C of least collateralized short < call price < highest bid + + auto set_price = [&]( + const asset_object& bitusd, + const price& settlement_price + ) + { + price_feed feed; + feed.settlement_price = settlement_price; + feed.core_exchange_rate = settlement_price; + wdump( (feed.max_short_squeeze_price()) ); + publish_feed( bitusd, feeder, feed ); + }; + + auto wait_for_settlement = [&]() + { + const auto& idx = db.get_index_type().indices().get(); + const auto& itr = idx.rbegin(); + if( itr == idx.rend() ) + return; + generate_blocks( itr->settlement_date ); + BOOST_CHECK( !idx.empty() ); + generate_block(); + BOOST_CHECK( idx.empty() ); + }; + + { + const asset_object& bitusd = setup_asset(); + top_up(); + set_price( bitusd, bitusd.amount(1) / core.amount(5) ); // $0.20 + borrow(borrower, bitusd.amount(100), asset(1000)); // 2x collat + transfer( borrower, settler, bitusd.amount(100) ); + + // drop to $0.02 and settle + BOOST_CHECK( !bitusd.bitasset_data(db).has_settlement() ); + set_price( bitusd, bitusd.amount(1) / core.amount(50) ); // $0.02 + BOOST_CHECK( bitusd.bitasset_data(db).has_settlement() ); + GRAPHENE_REQUIRE_THROW( borrow( borrower2, bitusd.amount(100), asset(10000) ), fc::exception ); + force_settle( settler, bitusd.amount(100) ); + + // wait for forced settlement to execute + // this would throw on Sep.18 testnet, see #346 + wait_for_settlement(); + } + + // issue 350 + { + // ok, new asset + const asset_object& bitusd = setup_asset(); + top_up(); + set_price( bitusd, bitusd.amount(40) / core.amount(1000) ); // $0.04 + borrow( borrower, bitusd.amount(100), asset(5000) ); // 2x collat + transfer( borrower, seller, bitusd.amount(100) ); + limit_order_id_type oid_019 = create_sell_order( seller, bitusd.amount(39), core.amount(2000) )->id; // this order is at $0.019, we should not be able to match against it + limit_order_id_type oid_020 = create_sell_order( seller, bitusd.amount(40), core.amount(2000) )->id; // this order is at $0.020, we should be able to match against it + set_price( bitusd, bitusd.amount(21) / core.amount(1000) ); // $0.021 + // + // We attempt to match against $0.019 order and black swan, + // and this is intended behavior. See discussion in ticket. + // + BOOST_CHECK( bitusd.bitasset_data(db).has_settlement() ); + BOOST_CHECK( db.find_object( oid_019 ) != nullptr ); + BOOST_CHECK( db.find_object( oid_020 ) == nullptr ); + } + + } catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** Creates a black swan, recover price feed - asset should be revived + */ +BOOST_AUTO_TEST_CASE( revive_recovered ) +{ try { + init_standard_swan( 700 ); + + wait_for_hf_core_216(); + + // revive after price recovers + set_feed( 700, 800 ); + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + set_feed( 701, 800 ); + BOOST_CHECK( !swan().bitasset_data(db).has_settlement() ); +} catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** Creates a black swan, recover price feed - asset should be revived + */ +BOOST_AUTO_TEST_CASE( recollateralize ) +{ try { + init_standard_swan( 700 ); + + // no hardfork yet + GRAPHENE_REQUIRE_THROW( bid_collateral( borrower2(), back().amount(1000), swan().amount(100) ), fc::exception ); + + wait_for_hf_core_216(); + + int64_t b2_balance = get_balance( borrower2(), back() ); + bid_collateral( borrower2(), back().amount(1000), swan().amount(100) ); + BOOST_CHECK_EQUAL( get_balance( borrower2(), back() ), b2_balance - 1000 ); + bid_collateral( borrower2(), back().amount(2000), swan().amount(200) ); + BOOST_CHECK_EQUAL( get_balance( borrower2(), back() ), b2_balance - 2000 ); + bid_collateral( borrower2(), back().amount(1000), swan().amount(0) ); + BOOST_CHECK_EQUAL( get_balance( borrower2(), back() ), b2_balance ); + + // can't bid for non-bitassets + GRAPHENE_REQUIRE_THROW( bid_collateral( borrower2(), swan().amount(100), asset(100) ), fc::exception ); + // can't cancel a non-existant bid + GRAPHENE_REQUIRE_THROW( bid_collateral( borrower2(), back().amount(0), swan().amount(0) ), fc::exception ); + // can't bid zero collateral + GRAPHENE_REQUIRE_THROW( bid_collateral( borrower2(), back().amount(0), swan().amount(100) ), fc::exception ); + // can't bid more than we have + GRAPHENE_REQUIRE_THROW( bid_collateral( borrower2(), back().amount(b2_balance + 100), swan().amount(100) ), fc::exception ); + trx.operations.clear(); + + // can't bid on a live bitasset + const asset_object& bitcny = create_bitasset("CNYBIT", _feedproducer); + GRAPHENE_REQUIRE_THROW( bid_collateral( borrower2(), asset(100), bitcny.amount(100) ), fc::exception ); + update_feed_producers(bitcny, {_feedproducer}); + price_feed feed; + feed.settlement_price = bitcny.amount(1) / asset(1); + publish_feed( bitcny.id, _feedproducer, feed ); + borrow( borrower2(), bitcny.amount(100), asset(1000) ); + + // can't bid wrong collateral type + GRAPHENE_REQUIRE_THROW( bid_collateral( borrower2(), bitcny.amount(100), swan().amount(100) ), fc::exception ); + + BOOST_CHECK( swan().dynamic_data(db).current_supply == 1400 ); + BOOST_CHECK( swan().bitasset_data(db).settlement_fund == 2800 ); + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + BOOST_CHECK( swan().bitasset_data(db).current_feed.settlement_price.is_null() ); + + // doesn't happen without price feed + bid_collateral( borrower(), back().amount(1400), swan().amount(700) ); + bid_collateral( borrower2(), back().amount(1400), swan().amount(700) ); + wait_for_maintenance(); + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + set_feed(1, 2); + // doesn't happen if cover is insufficient + bid_collateral( borrower2(), back().amount(1400), swan().amount(600) ); + wait_for_maintenance(); + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + set_feed(1, 2); + // doesn't happen if some bids have a bad swan price + bid_collateral( borrower2(), back().amount(1050), swan().amount(700) ); + wait_for_maintenance(); + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + set_feed(1, 2); + // works + bid_collateral( borrower(), back().amount(1051), swan().amount(700) ); + bid_collateral( borrower2(), back().amount(2100), swan().amount(1399) ); + + // check get_collateral_bids + graphene::app::database_api db_api(db); + GRAPHENE_REQUIRE_THROW( db_api.get_collateral_bids(back().symbol, 100, 0), fc::assert_exception ); + auto swan_symbol = _swan(db).symbol; + vector bids = db_api.get_collateral_bids(swan_symbol, 100, 1); + BOOST_CHECK_EQUAL( 1u, bids.size() ); + FC_ASSERT( _borrower2 == bids[0].bidder ); + bids = db_api.get_collateral_bids(swan_symbol, 1, 0); + BOOST_CHECK_EQUAL( 1u, bids.size() ); + FC_ASSERT( _borrower == bids[0].bidder ); + bids = db_api.get_collateral_bids(swan_symbol, 100, 0); + BOOST_CHECK_EQUAL( 2u, bids.size() ); + FC_ASSERT( _borrower == bids[0].bidder ); + FC_ASSERT( _borrower2 == bids[1].bidder ); + + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + // revive + wait_for_maintenance(); + BOOST_CHECK( !swan().bitasset_data(db).has_settlement() ); + bids = db_api.get_collateral_bids(swan_symbol, 100, 0); + BOOST_CHECK( bids.empty() ); +} catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** Creates a black swan, settles all debts, recovers price feed - asset should be revived + */ +BOOST_AUTO_TEST_CASE( revive_empty_recovered ) +{ try { + limit_order_id_type oid = init_standard_swan( 1000 ); + + wait_for_hf_core_216(); + + set_expiration( db, trx ); + cancel_limit_order( oid(db) ); + force_settle( borrower(), swan().amount(1000) ); + force_settle( borrower2(), swan().amount(1000) ); + BOOST_CHECK_EQUAL( 0, swan().dynamic_data(db).current_supply.value ); + BOOST_CHECK_EQUAL( 0, swan().bitasset_data(db).settlement_fund.value ); + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + // revive after price recovers + set_feed( 1, 1 ); + BOOST_CHECK( !swan().bitasset_data(db).has_settlement() ); + + auto& call_idx = db.get_index_type().indices().get(); + auto itr = call_idx.find( boost::make_tuple(_feedproducer, _swan) ); + BOOST_CHECK( itr == call_idx.end() ); +} catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** Creates a black swan, settles all debts - asset should be revived in next maintenance + */ +BOOST_AUTO_TEST_CASE( revive_empty ) +{ try { + wait_for_hf_core_216(); + + limit_order_id_type oid = init_standard_swan( 1000 ); + + cancel_limit_order( oid(db) ); + force_settle( borrower(), swan().amount(1000) ); + force_settle( borrower2(), swan().amount(1000) ); + BOOST_CHECK_EQUAL( 0, swan().dynamic_data(db).current_supply.value ); + + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + // revive + wait_for_maintenance(); + BOOST_CHECK( !swan().bitasset_data(db).has_settlement() ); +} catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** Creates a black swan, settles all debts - asset should be revived in next maintenance + */ +BOOST_AUTO_TEST_CASE( revive_empty_with_bid ) +{ try { + wait_for_hf_core_216(); + + standard_users(); + standard_asset(); + + set_feed( 1, 1 ); + borrow(borrower(), swan().amount(1000), back().amount(2000)); + borrow(borrower2(), swan().amount(1000), back().amount(1967)); + + set_feed( 1, 2 ); + // this sell order is designed to trigger a black swan + limit_order_id_type oid = create_sell_order( borrower2(), swan().amount(1), back().amount(3) )->id; + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + cancel_limit_order( oid(db) ); + force_settle( borrower(), swan().amount(500) ); + force_settle( borrower(), swan().amount(500) ); + force_settle( borrower2(), swan().amount(667) ); + force_settle( borrower2(), swan().amount(333) ); + BOOST_CHECK_EQUAL( 0, swan().dynamic_data(db).current_supply.value ); + BOOST_CHECK_EQUAL( 0, swan().bitasset_data(db).settlement_fund.value ); + + bid_collateral( borrower(), back().amount(3000), swan().amount(700) ); + + BOOST_CHECK( swan().bitasset_data(db).has_settlement() ); + + // revive + wait_for_maintenance(); + BOOST_CHECK( !swan().bitasset_data(db).has_settlement() ); + graphene::app::database_api db_api(db); + auto swan_symbol = _swan(db).symbol; + vector bids = db_api.get_collateral_bids(swan_symbol, 100, 0); + BOOST_CHECK( bids.empty() ); + + auto& call_idx = db.get_index_type().indices().get(); + auto itr = call_idx.find( boost::make_tuple(_borrower, _swan) ); + BOOST_CHECK( itr == call_idx.end() ); + itr = call_idx.find( boost::make_tuple(_feedproducer, _swan) ); + BOOST_CHECK( itr == call_idx.end() ); +} catch( const fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +/** Creates a black swan, bids on more than outstanding debt + */ +BOOST_AUTO_TEST_CASE( overflow ) +{ try { + init_standard_swan( 700 ); + + wait_for_hf_core_216(); + + bid_collateral( borrower(), back().amount(2200), swan().amount(GRAPHENE_MAX_SHARE_SUPPLY - 1) ); + bid_collateral( borrower2(), back().amount(2100), swan().amount(1399) ); + set_feed(1, 2); + wait_for_maintenance(); + + auto& call_idx = db.get_index_type().indices().get(); + auto itr = call_idx.find( boost::make_tuple(_borrower, _swan) ); + BOOST_REQUIRE( itr != call_idx.end() ); + BOOST_CHECK_EQUAL( 1, itr->debt.value ); + itr = call_idx.find( boost::make_tuple(_borrower2, _swan) ); + BOOST_REQUIRE( itr != call_idx.end() ); + BOOST_CHECK_EQUAL( 1399, itr->debt.value ); + + BOOST_CHECK( !swan().bitasset_data(db).has_settlement() ); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/uia_tests.cpp b/tests/tests/uia_tests.cpp index d6dc83cb17..56b2116a44 100644 --- a/tests/tests/uia_tests.cpp +++ b/tests/tests/uia_tests.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. + * Copyright (c) 2015-2018 Cryptonomex, Inc., and contributors. * * The MIT License * @@ -34,6 +34,8 @@ #include +#include + #include "../common/database_fixture.hpp" using namespace graphene::chain; @@ -54,7 +56,7 @@ BOOST_AUTO_TEST_CASE( create_advanced_uia ) creator.common_options.market_fee_percent = GRAPHENE_MAX_MARKET_FEE_PERCENT/100; /*1%*/ creator.common_options.issuer_permissions = charge_market_fee|white_list|override_authority|transfer_restricted|disable_confidential; creator.common_options.flags = charge_market_fee|white_list|override_authority|disable_confidential; - creator.common_options.core_exchange_rate = price({asset(2),asset(1,asset_id_type(1))}); + creator.common_options.core_exchange_rate = price(asset(2),asset(1,asset_id_type(1))); creator.common_options.whitelist_authorities = creator.common_options.blacklist_authorities = {account_id_type()}; trx.operations.push_back(std::move(creator)); PUSH_TX( db, trx, ~0 ); @@ -99,7 +101,7 @@ BOOST_AUTO_TEST_CASE( override_transfer_test ) sign( trx, dan_private_key ); GRAPHENE_REQUIRE_THROW( PUSH_TX( db, trx, 0 ), tx_missing_active_auth ); BOOST_TEST_MESSAGE( "Pass with issuer's signature" ); - trx.signatures.clear(); + trx.clear_signatures(); sign( trx, sam_private_key ); PUSH_TX( db, trx, 0 ); @@ -128,7 +130,7 @@ BOOST_AUTO_TEST_CASE( override_transfer_test2 ) sign( trx, dan_private_key ); GRAPHENE_REQUIRE_THROW( PUSH_TX( db, trx, 0 ), fc::exception); BOOST_TEST_MESSAGE( "Fail because overide_authority flag is not set" ); - trx.signatures.clear(); + trx.clear_signatures(); sign( trx, sam_private_key ); GRAPHENE_REQUIRE_THROW( PUSH_TX( db, trx, 0 ), fc::exception ); @@ -375,7 +377,7 @@ BOOST_AUTO_TEST_CASE( transfer_restricted_test ) transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); - PUSH_TX( db, tx, database::skip_authority_check | database::skip_tapos_check | database::skip_transaction_signatures ); + PUSH_TX( db, tx, database::skip_tapos_check | database::skip_transaction_signatures ); } ; const asset_object& uia = create_user_issued_asset( "TXRX", sam, transfer_restricted ); @@ -394,7 +396,7 @@ BOOST_AUTO_TEST_CASE( transfer_restricted_test ) transaction tx; tx.operations.push_back( op ); set_expiration( db, tx ); - PUSH_TX( db, tx, database::skip_authority_check | database::skip_tapos_check | database::skip_transaction_signatures ); + PUSH_TX( db, tx, database::skip_tapos_check | database::skip_transaction_signatures ); } ; BOOST_TEST_MESSAGE( "Enable transfer_restricted, send fails" ); @@ -426,11 +428,78 @@ BOOST_AUTO_TEST_CASE( transfer_restricted_test ) } } +/*** + * Test to see if a asset name is valid + * @param db the database + * @param acct the account that will attempt to create the asset + * @param asset_name the asset_name + * @param allowed whether the creation should be successful + * @returns true if meets expectations + */ +bool test_asset_name(graphene::chain::database_fixture* db, const graphene::chain::account_object& acct, std::string asset_name, bool allowed) +{ + if (allowed) + { + try + { + db->create_user_issued_asset(asset_name, acct, 0); + } catch (...) + { + return false; + } + } + else + { + try + { + db->create_user_issued_asset(asset_name, acct, 0); + return false; + } catch (fc::exception& ex) + { + return true; + } catch (...) + { + return false; + } + } + return true; +} + +/*** + * Test to see if an ascii character can be used in an asset name + * @param c the ascii character (NOTE: includes extended ascii up to 255) + * @param allowed_beginning true if it should be allowed as the first character of an asset name + * @param allowed_middle true if it should be allowed in the middle of an asset name + * @param allowed_end true if it should be allowed at the end of an asset name + * @returns true if tests met expectations + */ +bool test_asset_char(graphene::chain::database_fixture* db, const graphene::chain::account_object& acct, const unsigned char& c, bool allowed_beginning, bool allowed_middle, bool allowed_end) +{ + std::ostringstream asset_name; + // beginning + asset_name << c << "CHARLIE"; + if (!test_asset_name(db, acct, asset_name.str(), allowed_beginning)) + return false; + + // middle + asset_name.str(""); + asset_name.clear(); + asset_name << "CHAR" << c << "LIE"; + if (!test_asset_name(db, acct, asset_name.str(), allowed_middle)) + return false; + + // end + asset_name.str(""); + asset_name.clear(); + asset_name << "CHARLIE" << c; + return test_asset_name(db, acct, asset_name.str(), allowed_end); +} + BOOST_AUTO_TEST_CASE( asset_name_test ) { try { - ACTORS( (alice)(bob) ); + ACTORS( (alice)(bob)(sam) ); auto has_asset = [&]( std::string symbol ) -> bool { @@ -449,23 +518,78 @@ BOOST_AUTO_TEST_CASE( asset_name_test ) GRAPHENE_REQUIRE_THROW( create_user_issued_asset( "ALPHA", alice_id(db), 0 ), fc::exception ); BOOST_CHECK( has_asset("ALPHA") ); BOOST_CHECK( !has_asset("ALPHA.ONE") ); + generate_blocks( HARDFORK_385_TIME ); + generate_block(); + // Bob can't create ALPHA.ONE GRAPHENE_REQUIRE_THROW( create_user_issued_asset( "ALPHA.ONE", bob_id(db), 0 ), fc::exception ); BOOST_CHECK( has_asset("ALPHA") ); BOOST_CHECK( !has_asset("ALPHA.ONE") ); - if( db.head_block_time() <= HARDFORK_409_TIME ) - { - // Alice can't create ALPHA.ONE before hardfork - GRAPHENE_REQUIRE_THROW( create_user_issued_asset( "ALPHA.ONE", alice_id(db), 0 ), fc::exception ); - BOOST_CHECK( has_asset("ALPHA") ); BOOST_CHECK( !has_asset("ALPHA.ONE") ); - generate_blocks( HARDFORK_409_TIME ); - generate_block(); - // Bob can't create ALPHA.ONE after hardfork - GRAPHENE_REQUIRE_THROW( create_user_issued_asset( "ALPHA.ONE", bob_id(db), 0 ), fc::exception ); - BOOST_CHECK( has_asset("ALPHA") ); BOOST_CHECK( !has_asset("ALPHA.ONE") ); - } - // Alice can create it + + // Alice can create ALPHA.ONE create_user_issued_asset( "ALPHA.ONE", alice_id(db), 0 ); BOOST_CHECK( has_asset("ALPHA") ); BOOST_CHECK( has_asset("ALPHA.ONE") ); + + // Sam tries to create asset ending in a number but fails before hf_620 + GRAPHENE_REQUIRE_THROW( create_user_issued_asset( "SP500", sam_id(db), 0 ), fc::assert_exception ); + BOOST_CHECK( !has_asset("SP500") ); + + // create a proposal to create asset ending in a number, this will fail before hf_620 + auto& core = asset_id_type()(db); + asset_create_operation op_p; + op_p.issuer = alice_id; + op_p.symbol = "SP500"; + op_p.common_options.core_exchange_rate = asset( 1 ) / asset( 1, asset_id_type( 1 ) ); + op_p.fee = core.amount(0); + + const auto& curfees = *db.get_global_properties().parameters.current_fees; + const auto& proposal_create_fees = curfees.get(); + proposal_create_operation prop; + prop.fee_paying_account = alice_id; + prop.proposed_ops.emplace_back( op_p ); + prop.expiration_time = db.head_block_time() + fc::days(1); + prop.fee = asset( proposal_create_fees.fee + proposal_create_fees.price_per_kbyte ); + + signed_transaction tx; + tx.operations.push_back( prop ); + db.current_fee_schedule().set_fee( tx.operations.back() ); + set_expiration( db, tx ); + sign( tx, alice_private_key ); + GRAPHENE_REQUIRE_THROW(PUSH_TX( db, tx ), fc::assert_exception); + + generate_blocks( HARDFORK_CORE_620_TIME + 1); + generate_block(); + + // Sam can create asset ending in number after hf_620 + create_user_issued_asset( "NIKKEI225", sam_id(db), 0 ); + BOOST_CHECK( has_asset("NIKKEI225") ); + + // make sure other assets can still be created after hf_620 + create_user_issued_asset( "ALPHA2", alice_id(db), 0 ); + create_user_issued_asset( "ALPHA2.ONE", alice_id(db), 0 ); + BOOST_CHECK( has_asset("ALPHA2") ); + BOOST_CHECK( has_asset("ALPHA2.ONE") ); + + // proposal to create asset ending in number will now be created successfully as we are in > hf_620 time + prop.expiration_time = db.head_block_time() + fc::days(3); + signed_transaction tx_hf620; + tx_hf620.operations.push_back( prop ); + db.current_fee_schedule().set_fee( tx_hf620.operations.back() ); + set_expiration( db, tx_hf620 ); + sign( tx_hf620, alice_private_key ); + PUSH_TX( db, tx_hf620 ); + + // assets with invalid characters should not be allowed + unsigned char c = 0; + do + { + if ( (c >= 48 && c <= 57) ) // numbers + BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, false, true, true), "Failed on good ASCII value " + std::to_string(c) ); + else if ( c >= 65 && c <= 90) // letters + BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, true, true, true), "Failed on good ASCII value " + std::to_string(c) ); + else // everything else + BOOST_CHECK_MESSAGE( test_asset_char(this, alice_id(db), c, false, false, false), "Failed on bad ASCII value " + std::to_string(c) ); + c++; + } while (c != 0); } catch(fc::exception& e) { diff --git a/tests/tests/voting_tests.cpp b/tests/tests/voting_tests.cpp new file mode 100644 index 0000000000..df92ccf061 --- /dev/null +++ b/tests/tests/voting_tests.cpp @@ -0,0 +1,542 @@ +/* + * Copyright (c) 2018 oxarbitrage, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; + + +BOOST_FIXTURE_TEST_SUITE(voting_tests, database_fixture) + +BOOST_AUTO_TEST_CASE(put_my_witnesses) +{ + try + { + graphene::app::database_api db_api1(db); + + ACTORS( (witness0) + (witness1) + (witness2) + (witness3) + (witness4) + (witness5) + (witness6) + (witness7) + (witness8) + (witness9) + (witness10) + (witness11) + (witness12) + (witness13) ); + + // Upgrade all accounts to LTM + upgrade_to_lifetime_member(witness0_id); + upgrade_to_lifetime_member(witness1_id); + upgrade_to_lifetime_member(witness2_id); + upgrade_to_lifetime_member(witness3_id); + upgrade_to_lifetime_member(witness4_id); + upgrade_to_lifetime_member(witness5_id); + upgrade_to_lifetime_member(witness6_id); + upgrade_to_lifetime_member(witness7_id); + upgrade_to_lifetime_member(witness8_id); + upgrade_to_lifetime_member(witness9_id); + upgrade_to_lifetime_member(witness10_id); + upgrade_to_lifetime_member(witness11_id); + upgrade_to_lifetime_member(witness12_id); + upgrade_to_lifetime_member(witness13_id); + + // Create all the witnesses + const witness_id_type witness0_witness_id = create_witness(witness0_id, witness0_private_key).id; + const witness_id_type witness1_witness_id = create_witness(witness1_id, witness1_private_key).id; + const witness_id_type witness2_witness_id = create_witness(witness2_id, witness2_private_key).id; + const witness_id_type witness3_witness_id = create_witness(witness3_id, witness3_private_key).id; + const witness_id_type witness4_witness_id = create_witness(witness4_id, witness4_private_key).id; + const witness_id_type witness5_witness_id = create_witness(witness5_id, witness5_private_key).id; + const witness_id_type witness6_witness_id = create_witness(witness6_id, witness6_private_key).id; + const witness_id_type witness7_witness_id = create_witness(witness7_id, witness7_private_key).id; + const witness_id_type witness8_witness_id = create_witness(witness8_id, witness8_private_key).id; + const witness_id_type witness9_witness_id = create_witness(witness9_id, witness9_private_key).id; + const witness_id_type witness10_witness_id = create_witness(witness10_id, witness10_private_key).id; + const witness_id_type witness11_witness_id = create_witness(witness11_id, witness11_private_key).id; + const witness_id_type witness12_witness_id = create_witness(witness12_id, witness12_private_key).id; + const witness_id_type witness13_witness_id = create_witness(witness13_id, witness13_private_key).id; + + // Create a vector with private key of all witnesses, will be used to activate 11 witnesses at a time + const vector private_keys = { + witness0_private_key, + witness1_private_key, + witness2_private_key, + witness3_private_key, + witness4_private_key, + witness5_private_key, + witness6_private_key, + witness7_private_key, + witness8_private_key, + witness9_private_key, + witness10_private_key, + witness11_private_key, + witness12_private_key, + witness13_private_key + + }; + + // create a map with account id and witness id of the first 11 witnesses + const flat_map witness_map = { + {witness0_id, witness0_witness_id}, + {witness1_id, witness1_witness_id}, + {witness2_id, witness2_witness_id}, + {witness3_id, witness3_witness_id}, + {witness4_id, witness4_witness_id}, + {witness5_id, witness5_witness_id}, + {witness6_id, witness6_witness_id}, + {witness7_id, witness7_witness_id}, + {witness8_id, witness8_witness_id}, + {witness9_id, witness9_witness_id}, + {witness10_id, witness10_witness_id}, + {witness11_id, witness11_witness_id}, + {witness12_id, witness12_witness_id}, + {witness13_id, witness13_witness_id} + }; + + // Check current default witnesses, default chain is configured with 10 witnesses + auto witnesses = db.get_global_properties().active_witnesses; + BOOST_CHECK_EQUAL(witnesses.size(), 10u); + BOOST_CHECK_EQUAL(witnesses.begin()[0].instance.value, 1u); + BOOST_CHECK_EQUAL(witnesses.begin()[1].instance.value, 2u); + BOOST_CHECK_EQUAL(witnesses.begin()[2].instance.value, 3u); + BOOST_CHECK_EQUAL(witnesses.begin()[3].instance.value, 4u); + BOOST_CHECK_EQUAL(witnesses.begin()[4].instance.value, 5u); + BOOST_CHECK_EQUAL(witnesses.begin()[5].instance.value, 6u); + BOOST_CHECK_EQUAL(witnesses.begin()[6].instance.value, 7u); + BOOST_CHECK_EQUAL(witnesses.begin()[7].instance.value, 8u); + BOOST_CHECK_EQUAL(witnesses.begin()[8].instance.value, 9u); + BOOST_CHECK_EQUAL(witnesses.begin()[9].instance.value, 10u); + + // Activate all witnesses + // Each witness is voted with incremental stake so last witness created will be the ones with more votes + int c = 0; + for (auto l : witness_map) { + int stake = 100 + c + 10; + transfer(committee_account, l.first, asset(stake)); + { + set_expiration(db, trx); + account_update_operation op; + op.account = l.first; + op.new_options = l.first(db).options; + op.new_options->votes.insert(l.second(db).vote_id); + + trx.operations.push_back(op); + sign(trx, private_keys.at(c)); + PUSH_TX(db, trx); + trx.clear(); + } + ++c; + } + + // Trigger the new witnesses + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + // Check my witnesses are now in control of the system + witnesses = db.get_global_properties().active_witnesses; + BOOST_CHECK_EQUAL(witnesses.size(), 11u); + BOOST_CHECK_EQUAL(witnesses.begin()[0].instance.value, 14u); + BOOST_CHECK_EQUAL(witnesses.begin()[1].instance.value, 15u); + BOOST_CHECK_EQUAL(witnesses.begin()[2].instance.value, 16u); + BOOST_CHECK_EQUAL(witnesses.begin()[3].instance.value, 17u); + BOOST_CHECK_EQUAL(witnesses.begin()[4].instance.value, 18u); + BOOST_CHECK_EQUAL(witnesses.begin()[5].instance.value, 19u); + BOOST_CHECK_EQUAL(witnesses.begin()[6].instance.value, 20u); + BOOST_CHECK_EQUAL(witnesses.begin()[7].instance.value, 21u); + BOOST_CHECK_EQUAL(witnesses.begin()[8].instance.value, 22u); + BOOST_CHECK_EQUAL(witnesses.begin()[9].instance.value, 23u); + BOOST_CHECK_EQUAL(witnesses.begin()[10].instance.value, 24u); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(track_votes_witnesses_enabled) +{ + try + { + graphene::app::database_api db_api1(db); + + INVOKE(put_my_witnesses); + + const account_id_type witness1_id= get_account("witness1").id; + auto witness1_object = db_api1.get_witness_by_account(witness1_id(db).name); + BOOST_CHECK_EQUAL(witness1_object->total_votes, 111u); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(track_votes_witnesses_disabled) +{ + try + { + graphene::app::database_api db_api1(db); + + INVOKE(put_my_witnesses); + + const account_id_type witness1_id= get_account("witness1").id; + auto witness1_object = db_api1.get_witness_by_account(witness1_id(db).name); + BOOST_CHECK_EQUAL(witness1_object->total_votes, 0u); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(put_my_committee_members) +{ + try + { + graphene::app::database_api db_api1(db); + + ACTORS( (committee0) + (committee1) + (committee2) + (committee3) + (committee4) + (committee5) + (committee6) + (committee7) + (committee8) + (committee9) + (committee10) + (committee11) + (committee12) + (committee13) ); + + // Upgrade all accounts to LTM + upgrade_to_lifetime_member(committee0_id); + upgrade_to_lifetime_member(committee1_id); + upgrade_to_lifetime_member(committee2_id); + upgrade_to_lifetime_member(committee3_id); + upgrade_to_lifetime_member(committee4_id); + upgrade_to_lifetime_member(committee5_id); + upgrade_to_lifetime_member(committee6_id); + upgrade_to_lifetime_member(committee7_id); + upgrade_to_lifetime_member(committee8_id); + upgrade_to_lifetime_member(committee9_id); + upgrade_to_lifetime_member(committee10_id); + upgrade_to_lifetime_member(committee11_id); + upgrade_to_lifetime_member(committee12_id); + upgrade_to_lifetime_member(committee13_id); + + // Create all the committee + const committee_member_id_type committee0_committee_id = create_committee_member(committee0_id(db)).id; + const committee_member_id_type committee1_committee_id = create_committee_member(committee1_id(db)).id; + const committee_member_id_type committee2_committee_id = create_committee_member(committee2_id(db)).id; + const committee_member_id_type committee3_committee_id = create_committee_member(committee3_id(db)).id; + const committee_member_id_type committee4_committee_id = create_committee_member(committee4_id(db)).id; + const committee_member_id_type committee5_committee_id = create_committee_member(committee5_id(db)).id; + const committee_member_id_type committee6_committee_id = create_committee_member(committee6_id(db)).id; + const committee_member_id_type committee7_committee_id = create_committee_member(committee7_id(db)).id; + const committee_member_id_type committee8_committee_id = create_committee_member(committee8_id(db)).id; + const committee_member_id_type committee9_committee_id = create_committee_member(committee9_id(db)).id; + const committee_member_id_type committee10_committee_id = create_committee_member(committee10_id(db)).id; + const committee_member_id_type committee11_committee_id = create_committee_member(committee11_id(db)).id; + const committee_member_id_type committee12_committee_id = create_committee_member(committee12_id(db)).id; + const committee_member_id_type committee13_committee_id = create_committee_member(committee13_id(db)).id; + + // Create a vector with private key of all witnesses, will be used to activate 11 witnesses at a time + const vector private_keys = { + committee0_private_key, + committee1_private_key, + committee2_private_key, + committee3_private_key, + committee4_private_key, + committee5_private_key, + committee6_private_key, + committee7_private_key, + committee8_private_key, + committee9_private_key, + committee10_private_key, + committee11_private_key, + committee12_private_key, + committee13_private_key + }; + + // create a map with account id and committee id of the first 11 witnesses + const flat_map committee_map = { + {committee0_id, committee0_committee_id}, + {committee1_id, committee1_committee_id}, + {committee2_id, committee2_committee_id}, + {committee3_id, committee3_committee_id}, + {committee4_id, committee4_committee_id}, + {committee5_id, committee5_committee_id}, + {committee6_id, committee6_committee_id}, + {committee7_id, committee7_committee_id}, + {committee8_id, committee8_committee_id}, + {committee9_id, committee9_committee_id}, + {committee10_id, committee10_committee_id}, + {committee11_id, committee11_committee_id}, + {committee12_id, committee12_committee_id}, + {committee13_id, committee13_committee_id} + }; + + // Check current default witnesses, default chain is configured with 10 witnesses + auto committee_members = db.get_global_properties().active_committee_members; + + BOOST_CHECK_EQUAL(committee_members.size(), 10u); + BOOST_CHECK_EQUAL(committee_members.begin()[0].instance.value, 0u); + BOOST_CHECK_EQUAL(committee_members.begin()[1].instance.value, 1u); + BOOST_CHECK_EQUAL(committee_members.begin()[2].instance.value, 2u); + BOOST_CHECK_EQUAL(committee_members.begin()[3].instance.value, 3u); + BOOST_CHECK_EQUAL(committee_members.begin()[4].instance.value, 4u); + BOOST_CHECK_EQUAL(committee_members.begin()[5].instance.value, 5u); + BOOST_CHECK_EQUAL(committee_members.begin()[6].instance.value, 6u); + BOOST_CHECK_EQUAL(committee_members.begin()[7].instance.value, 7u); + BOOST_CHECK_EQUAL(committee_members.begin()[8].instance.value, 8u); + BOOST_CHECK_EQUAL(committee_members.begin()[9].instance.value, 9u); + + // Activate all committee + // Each witness is voted with incremental stake so last witness created will be the ones with more votes + int c = 0; + for (auto committee : committee_map) { + int stake = 100 + c + 10; + transfer(committee_account, committee.first, asset(stake)); + { + set_expiration(db, trx); + account_update_operation op; + op.account = committee.first; + op.new_options = committee.first(db).options; + op.new_options->votes.insert(committee.second(db).vote_id); + + trx.operations.push_back(op); + sign(trx, private_keys.at(c)); + PUSH_TX(db, trx); + trx.clear(); + } + ++c; + } + + // Trigger the new committee + generate_blocks(db.get_dynamic_global_properties().next_maintenance_time); + generate_block(); + + // Check my witnesses are now in control of the system + committee_members = db.get_global_properties().active_committee_members; + BOOST_CHECK_EQUAL(committee_members.size(), 11u); + + /* TODO we are not in full control, seems to committee members have votes by default + BOOST_CHECK_EQUAL(committee_members.begin()[0].instance.value, 14); + BOOST_CHECK_EQUAL(committee_members.begin()[1].instance.value, 15); + BOOST_CHECK_EQUAL(committee_members.begin()[2].instance.value, 16); + BOOST_CHECK_EQUAL(committee_members.begin()[3].instance.value, 17); + BOOST_CHECK_EQUAL(committee_members.begin()[4].instance.value, 18); + BOOST_CHECK_EQUAL(committee_members.begin()[5].instance.value, 19); + BOOST_CHECK_EQUAL(committee_members.begin()[6].instance.value, 20); + BOOST_CHECK_EQUAL(committee_members.begin()[7].instance.value, 21); + BOOST_CHECK_EQUAL(committee_members.begin()[8].instance.value, 22); + BOOST_CHECK_EQUAL(committee_members.begin()[9].instance.value, 23); + BOOST_CHECK_EQUAL(committee_members.begin()[10].instance.value, 24); + */ + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(track_votes_committee_enabled) +{ + try + { + graphene::app::database_api db_api1(db); + + INVOKE(put_my_committee_members); + + const account_id_type committee1_id= get_account("committee1").id; + auto committee1_object = db_api1.get_committee_member_by_account(committee1_id(db).name); + BOOST_CHECK_EQUAL(committee1_object->total_votes, 111u); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(track_votes_committee_disabled) +{ + try + { + graphene::app::database_api db_api1(db); + + INVOKE(put_my_committee_members); + + const account_id_type committee1_id= get_account("committee1").id; + auto committee1_object = db_api1.get_committee_member_by_account(committee1_id(db).name); + BOOST_CHECK_EQUAL(committee1_object->total_votes, 0u); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(invalid_voting_account) +{ + try + { + ACTORS((alice)); + + account_id_type invalid_account_id( (uint64_t)999999 ); + + BOOST_CHECK( !db.find( invalid_account_id ) ); + + graphene::chain::account_update_operation op; + op.account = alice_id; + op.new_options = alice.options; + op.new_options->voting_account = invalid_account_id; + trx.operations.push_back(op); + sign(trx, alice_private_key); + + GRAPHENE_REQUIRE_THROW( PUSH_TX( db, trx, ~0 ), fc::exception ); + + } FC_LOG_AND_RETHROW() +} +BOOST_AUTO_TEST_CASE(last_voting_date) +{ + try + { + ACTORS((alice)); + + transfer(committee_account, alice_id, asset(100)); + + // we are going to vote for this witness + auto witness1 = witness_id_type(1)(db); + + auto stats_obj = db.get_account_stats_by_owner(alice_id); + BOOST_CHECK_EQUAL(stats_obj.last_vote_time.sec_since_epoch(), 0u); + + // alice votes + graphene::chain::account_update_operation op; + op.account = alice_id; + op.new_options = alice.options; + op.new_options->votes.insert(witness1.vote_id); + trx.operations.push_back(op); + sign(trx, alice_private_key); + PUSH_TX( db, trx, ~0 ); + + auto now = db.head_block_time().sec_since_epoch(); + + // last_vote_time is updated for alice + stats_obj = db.get_account_stats_by_owner(alice_id); + BOOST_CHECK_EQUAL(stats_obj.last_vote_time.sec_since_epoch(), now); + + } FC_LOG_AND_RETHROW() +} +BOOST_AUTO_TEST_CASE(last_voting_date_proxy) +{ + try + { + ACTORS((alice)(proxy)(bob)); + + transfer(committee_account, alice_id, asset(100)); + transfer(committee_account, bob_id, asset(200)); + transfer(committee_account, proxy_id, asset(300)); + + generate_block(); + + // witness to vote for + auto witness1 = witness_id_type(1)(db); + + // round1: alice changes proxy, this is voting activity + { + graphene::chain::account_update_operation op; + op.account = alice_id; + op.new_options = alice_id(db).options; + op.new_options->voting_account = proxy_id; + trx.operations.push_back(op); + sign(trx, alice_private_key); + PUSH_TX( db, trx, ~0 ); + } + // alice last_vote_time is updated + auto alice_stats_obj = db.get_account_stats_by_owner(alice_id); + auto round1 = db.head_block_time().sec_since_epoch(); + BOOST_CHECK_EQUAL(alice_stats_obj.last_vote_time.sec_since_epoch(), round1); + + generate_block(); + + // round 2: alice update account but no proxy or voting changes are done + { + graphene::chain::account_update_operation op; + op.account = alice_id; + op.new_options = alice_id(db).options; + trx.operations.push_back(op); + sign(trx, alice_private_key); + set_expiration( db, trx ); + PUSH_TX( db, trx, ~0 ); + } + // last_vote_time is not updated + alice_stats_obj = db.get_account_stats_by_owner(alice_id); + BOOST_CHECK_EQUAL(alice_stats_obj.last_vote_time.sec_since_epoch(), round1); + + generate_block(); + + // round 3: bob votes + { + graphene::chain::account_update_operation op; + op.account = bob_id; + op.new_options = bob_id(db).options; + op.new_options->votes.insert(witness1.vote_id); + trx.operations.push_back(op); + sign(trx, bob_private_key); + set_expiration( db, trx ); + PUSH_TX(db, trx, ~0); + } + + // last_vote_time for bob is updated as he voted + auto round3 = db.head_block_time().sec_since_epoch(); + auto bob_stats_obj = db.get_account_stats_by_owner(bob_id); + BOOST_CHECK_EQUAL(bob_stats_obj.last_vote_time.sec_since_epoch(), round3); + + generate_block(); + + // round 4: proxy votes + { + graphene::chain::account_update_operation op; + op.account = proxy_id; + op.new_options = proxy_id(db).options; + op.new_options->votes.insert(witness1.vote_id); + trx.operations.push_back(op); + sign(trx, proxy_private_key); + PUSH_TX(db, trx, ~0); + } + + // proxy just voted so the last_vote_time is updated + auto round4 = db.head_block_time().sec_since_epoch(); + auto proxy_stats_obj = db.get_account_stats_by_owner(proxy_id); + BOOST_CHECK_EQUAL(proxy_stats_obj.last_vote_time.sec_since_epoch(), round4); + + // alice haves proxy, proxy votes but last_vote_time is not updated for alice + alice_stats_obj = db.get_account_stats_by_owner(alice_id); + BOOST_CHECK_EQUAL(alice_stats_obj.last_vote_time.sec_since_epoch(), round1); + + // bob haves nothing to do with proxy so last_vote_time is not updated + bob_stats_obj = db.get_account_stats_by_owner(bob_id); + BOOST_CHECK_EQUAL(bob_stats_obj.last_vote_time.sec_since_epoch(), round3); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/wallet_tests.cpp b/tests/tests/wallet_tests.cpp new file mode 100644 index 0000000000..8601f747d5 --- /dev/null +++ b/tests/tests/wallet_tests.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include +#include +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace graphene::chain; +using namespace graphene::chain::test; +using namespace graphene::wallet; + +BOOST_FIXTURE_TEST_SUITE(wallet_tests, database_fixture) + + /*** + * Check the basic behavior of deriving potential owner keys from a brain key + */ + BOOST_AUTO_TEST_CASE(derive_owner_keys_from_brain_key) { + try { + /*** + * Act + */ + unsigned int nbr_keys_desired = 3; + vector derived_keys = graphene::wallet::utility::derive_owner_keys_from_brain_key("SOME WORDS GO HERE", nbr_keys_desired); + + + /*** + * Assert: Check the number of derived keys + */ + BOOST_CHECK_EQUAL(nbr_keys_desired, derived_keys.size()); + + /*** + * Assert: Check that each derived key is unique + */ + set set_derived_public_keys; + for (auto info : derived_keys) { + string description = (string) info.pub_key; + set_derived_public_keys.emplace(description); + } + BOOST_CHECK_EQUAL(nbr_keys_desired, set_derived_public_keys.size()); + + /*** + * Assert: Check whether every public key begins with the expected prefix + */ + string expected_prefix = GRAPHENE_ADDRESS_PREFIX; + for (auto info : derived_keys) { + string description = (string) info.pub_key; + BOOST_CHECK_EQUAL(0u, description.find(expected_prefix)); + } + + } FC_LOG_AND_RETHROW() + } + +BOOST_AUTO_TEST_SUITE_END() +