diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 89c86af2e7..7374129779 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -68,7 +68,7 @@ jobs: - uses: msys2/setup-msys2@v2 with: update: true - install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound git + install: mingw-w64-x86_64-toolchain make mingw-w64-x86_64-cmake mingw-w64-x86_64-ccache mingw-w64-x86_64-boost mingw-w64-x86_64-openssl mingw-w64-x86_64-zeromq mingw-w64-x86_64-libsodium mingw-w64-x86_64-hidapi mingw-w64-x86_64-libusb mingw-w64-x86_64-unbound git mingw-w64-x86_64-rust - shell: msys2 {0} run: | curl -O https://repo.msys2.org/mingw/mingw64/mingw-w64-x86_64-protobuf-c-1.4.1-1-any.pkg.tar.zst diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml index aafb8e56a9..e5daa4e814 100644 --- a/.github/workflows/depends.yml +++ b/.github/workflows/depends.yml @@ -31,36 +31,47 @@ jobs: toolchain: - name: "RISCV 64bit" host: "riscv64-linux-gnu" + rust_host: "riscv64gc-unknown-linux-gnu" packages: "python3 gperf g++-riscv64-linux-gnu" - name: "ARM v7" host: "arm-linux-gnueabihf" + rust_host: "armv7-unknown-linux-gnueabihf" packages: "python3 gperf g++-arm-linux-gnueabihf" - name: "ARM v8" host: "aarch64-linux-gnu" + rust_host: "aarch64-unknown-linux-gnu" packages: "python3 gperf g++-aarch64-linux-gnu" - name: "i686 Win" host: "i686-w64-mingw32" + rust_host: "i686-pc-windows-gnu" packages: "python3 g++-mingw-w64-i686" - name: "i686 Linux" host: "i686-pc-linux-gnu" + rust_host: "i686-unknown-linux-gnu" packages: "gperf cmake g++-multilib python3-zmq" - name: "Win64" host: "x86_64-w64-mingw32" + rust_host: "x86_64-pc-windows-gnu" packages: "cmake python3 g++-mingw-w64-x86-64" - name: "x86_64 Linux" host: "x86_64-unknown-linux-gnu" + rust_host: "x86_64-unknown-linux-gnu" packages: "gperf cmake python3-zmq libdbus-1-dev libharfbuzz-dev" - name: "Cross-Mac x86_64" host: "x86_64-apple-darwin" + rust_host: "x86_64-apple-darwin" packages: "cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" - name: "Cross-Mac aarch64" host: "aarch64-apple-darwin" + rust_host: "aarch64-apple-darwin" packages: "cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python-dev python3-setuptools-git" - name: "x86_64 Freebsd" host: "x86_64-unknown-freebsd" + rust_host: "x86_64-unknown-freebsd" packages: "clang-8 gperf cmake python3-zmq libdbus-1-dev libharfbuzz-dev" - name: "ARMv8 Android" host: "aarch64-linux-android" + rust_host: "aarch64-linux-android" packages: "gperf cmake python3" name: ${{ matrix.toolchain.name }} steps: @@ -95,6 +106,11 @@ jobs: run: ${{env.APT_SET_CONF}} - name: install dependencies run: sudo apt update; sudo apt -y install build-essential libtool cmake autotools-dev automake pkg-config bsdmainutils curl git ca-certificates ccache ${{ matrix.toolchain.packages }} + - name: install rust target + # We can't use the latest Rust due to LLVM 17 not working with old `ld`s (such as in Ubuntu 20.04) for RISC-V + # We could update ld (a pain), update Ubuntu (requires a large amount of changes), or downgrade Rust + # We can't use Rust 1.70 due to LLVM 16 requiring ld >= 2.40 when building for Windows + run: rustup toolchain install 1.69; rustup default 1.69; rustup target add ${{ matrix.toolchain.rust_host }} - name: prepare w64-mingw32 if: ${{ matrix.toolchain.host == 'x86_64-w64-mingw32' || matrix.toolchain.host == 'i686-w64-mingw32' }} run: | diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6190b40f83..ddada45bf0 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -89,6 +89,7 @@ add_subdirectory(ringct) add_subdirectory(checkpoints) add_subdirectory(cryptonote_basic) add_subdirectory(cryptonote_core) +add_subdirectory(fcmp_pp) add_subdirectory(lmdb) add_subdirectory(multisig) add_subdirectory(net) diff --git a/src/blockchain_db/CMakeLists.txt b/src/blockchain_db/CMakeLists.txt index e94705b221..5bcb16bc87 100644 --- a/src/blockchain_db/CMakeLists.txt +++ b/src/blockchain_db/CMakeLists.txt @@ -45,6 +45,7 @@ target_link_libraries(blockchain_db PUBLIC common cncrypto + fcmp_pp ringct ${LMDB_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index 894eb15c7c..2c0cb50331 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -41,6 +41,60 @@ using epee::string_tools::pod_to_hex; +//--------------------------------------------------------------- +// Helper function to group outputs by unlock block +static void get_outs_by_unlock_block(const cryptonote::transaction &tx, + const std::vector &output_ids, + const uint64_t tx_height, + const bool miner_tx, + fcmp_pp::curve_trees::OutputsByUnlockBlock &outs_by_unlock_block_inout) +{ + const uint64_t unlock_block = cryptonote::get_unlock_block_index(tx.unlock_time, tx_height); + + CHECK_AND_ASSERT_THROW_MES(tx.vout.size() == output_ids.size(), "unexpected size of output ids"); + + for (std::size_t i = 0; i < tx.vout.size(); ++i) + { + const auto &out = tx.vout[i]; + + crypto::public_key output_public_key; + if (!cryptonote::get_output_public_key(out, output_public_key)) + throw std::runtime_error("Could not get an output public key from a tx output."); + + static_assert(CURRENT_TRANSACTION_VERSION == 2, "This section of code was written with 2 tx versions in mind. " + "Revisit this section and update for the new tx version."); + CHECK_AND_ASSERT_THROW_MES(tx.version == 1 || tx.version == 2, "encountered unexpected tx version"); + + if (!miner_tx && tx.version == 2) + CHECK_AND_ASSERT_THROW_MES(tx.rct_signatures.outPk.size() > i, "unexpected size of outPk"); + + rct::key commitment = (miner_tx || tx.version != 2) + ? rct::zeroCommit(out.amount) + : tx.rct_signatures.outPk[i].mask; + + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(output_public_key), + .commitment = std::move(commitment) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_ids[i], + .output_pair = std::move(output_pair) + }; + + if (outs_by_unlock_block_inout.find(unlock_block) == outs_by_unlock_block_inout.end()) + { + auto new_vec = std::vector{std::move(output_context)}; + outs_by_unlock_block_inout[unlock_block] = std::move(new_vec); + } + else + { + outs_by_unlock_block_inout[unlock_block].emplace_back(std::move(output_context)); + } + } +} +//--------------------------------------------------------------- + namespace cryptonote { @@ -179,7 +233,7 @@ void BlockchainDB::pop_block() pop_block(blk, txs); } -void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash* tx_hash_ptr, const crypto::hash* tx_prunable_hash_ptr) +std::vector BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash* tx_hash_ptr, const crypto::hash* tx_prunable_hash_ptr) { const transaction &tx = txp.first; @@ -223,7 +277,7 @@ void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair uint64_t tx_id = add_transaction_data(blk_hash, txp, tx_hash, tx_prunable_hash); - std::vector amount_output_indices(tx.vout.size()); + std::vector output_indices(tx.vout.size()); // iterate tx.vout using indices instead of C++11 foreach syntax because // we need the index @@ -231,21 +285,35 @@ void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const std::pair { // miner v2 txes have their coinbase output in one single out to save space, // and we store them as rct outputs with an identity mask + // note: get_outs_by_unlock_block mirrors this logic if (miner_tx && tx.version == 2) { cryptonote::tx_out vout = tx.vout[i]; rct::key commitment = rct::zeroCommit(vout.amount); vout.amount = 0; - amount_output_indices[i] = add_output(tx_hash, vout, i, tx.unlock_time, + output_indices[i] = add_output(tx_hash, vout, i, tx.unlock_time, &commitment); } else { - amount_output_indices[i] = add_output(tx_hash, tx.vout[i], i, tx.unlock_time, + output_indices[i] = add_output(tx_hash, tx.vout[i], i, tx.unlock_time, tx.version > 1 ? &tx.rct_signatures.outPk[i].mask : NULL); } } + + std::vector amount_output_indices; + std::vector output_ids; + amount_output_indices.reserve(output_indices.size()); + output_ids.reserve(output_indices.size()); + for (const auto &o_idx : output_indices) + { + amount_output_indices.push_back(o_idx.amount_index); + output_ids.push_back(o_idx.output_id); + } + add_tx_amount_output_indices(tx_id, amount_output_indices); + + return output_ids; } uint64_t BlockchainDB::add_block( const std::pair& blck @@ -273,9 +341,12 @@ uint64_t BlockchainDB::add_block( const std::pair& blck time1 = epee::misc_utils::get_tick_count(); + std::vector> output_ids; + output_ids.reserve(txs.size()); + uint64_t num_rct_outs = 0; blobdata miner_bd = tx_to_blob(blk.miner_tx); - add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd))); + std::vector miner_output_ids = add_transaction(blk_hash, std::make_pair(blk.miner_tx, blobdata_ref(miner_bd))); if (blk.miner_tx.version == 2) num_rct_outs += blk.miner_tx.vout.size(); int tx_i = 0; @@ -283,7 +354,7 @@ uint64_t BlockchainDB::add_block( const std::pair& blck for (const std::pair& tx : txs) { tx_hash = blk.tx_hashes[tx_i]; - add_transaction(blk_hash, tx, &tx_hash); + output_ids.push_back(add_transaction(blk_hash, tx, &tx_hash)); for (const auto &vout: tx.first.vout) { if (vout.amount == 0) @@ -294,9 +365,32 @@ uint64_t BlockchainDB::add_block( const std::pair& blck TIME_MEASURE_FINISH(time1); time_add_transaction += time1; + // When adding a block, we also need to keep track of when outputs unlock, so + // we can use them to grow the merkle tree used in fcmp's at that point. + fcmp_pp::curve_trees::OutputsByUnlockBlock outs_by_unlock_block; + + // Get miner tx's leaf tuples + get_outs_by_unlock_block( + blk.miner_tx, + miner_output_ids, + prev_height, + true/*miner_tx*/, + outs_by_unlock_block); + + // Get all other txs' leaf tuples + for (std::size_t i = 0; i < txs.size(); ++i) + { + get_outs_by_unlock_block( + txs[i].first, + output_ids[i], + prev_height, + false/*miner_tx*/, + outs_by_unlock_block); + } + // call out to subclass implementation to add the block & metadata time1 = epee::misc_utils::get_tick_count(); - add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash); + add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, outs_by_unlock_block); TIME_MEASURE_FINISH(time1); time_add_block1 += time1; diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 3e953da30d..68a1cfdec2 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -32,6 +32,8 @@ #include #include +#include +#include #include #include "common/command_line.h" #include "crypto/hash.h" @@ -40,6 +42,7 @@ #include "cryptonote_basic/difficulty.h" #include "cryptonote_basic/hardfork.h" #include "cryptonote_protocol/enums.h" +#include "fcmp_pp/curve_trees.h" /** \file * Cryptonote Blockchain Database Interface @@ -187,6 +190,14 @@ struct txpool_tx_meta_t } }; +/** + * @brief a struct containing output indexes for convenience + */ +struct output_indexes_t +{ + uint64_t amount_index; + uint64_t output_id; +}; #define DBF_SAFE 1 #define DBF_FAST 2 @@ -398,6 +409,7 @@ class BlockchainDB * @param cumulative_difficulty the accumulated difficulty after this block * @param coins_generated the number of coins generated total after this block * @param blk_hash the hash of the block + * @param outs_by_unlock_block the outputs from this block to add to the merkle tree */ virtual void add_block( const block& blk , size_t block_weight @@ -406,6 +418,7 @@ class BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) = 0; /** @@ -470,8 +483,9 @@ class BlockchainDB * future, this tracking (of the number, at least) should be moved to * this class, as it is necessary and the same among all BlockchainDB. * - * It returns an amount output index, which is the index of the output - * for its specified amount. + * It returns the output indexes, which contains an amount output index (the + * index of the output for its specified amount) and output id (the global + * index of the output among all outputs of any amount). * * This data should be stored in such a manner that the only thing needed to * reverse the process is the tx_out. @@ -484,9 +498,9 @@ class BlockchainDB * @param local_index index of the output in its transaction * @param unlock_time unlock time/height of the output * @param commitment the rct commitment to the output amount - * @return amount output index + * @return output indexes */ - virtual uint64_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) = 0; + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) = 0; /** * @brief store amount output indices for a tx's outputs @@ -567,8 +581,10 @@ class BlockchainDB * @param tx the transaction to add * @param tx_hash_ptr the hash of the transaction, if already calculated * @param tx_prunable_hash_ptr the hash of the prunable part of the transaction, if already calculated + * + * @return the global output ids of all outputs inserted */ - void add_transaction(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash* tx_hash_ptr = NULL, const crypto::hash* tx_prunable_hash_ptr = NULL); + std::vector add_transaction(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash* tx_hash_ptr = NULL, const crypto::hash* tx_prunable_hash_ptr = NULL); mutable uint64_t time_tx_exists = 0; //!< a performance metric uint64_t time_commit1 = 0; //!< a performance metric @@ -576,12 +592,14 @@ class BlockchainDB HardFork* m_hardfork; + std::shared_ptr m_curve_trees; + public: /** * @brief An empty constructor. */ - BlockchainDB(): m_hardfork(NULL), m_open(false) { } + BlockchainDB(): m_hardfork(NULL), m_open(false), m_curve_trees() { } /** * @brief An empty destructor. @@ -1685,7 +1703,7 @@ class BlockchainDB * * @return false if the function returns false for any key image, otherwise true */ - virtual bool for_all_key_images(std::function) const = 0; + virtual bool for_all_key_images(std::function) const = 0; /** * @brief runs a function over a range of blocks @@ -1764,6 +1782,15 @@ class BlockchainDB */ virtual bool for_all_alt_blocks(std::function f, bool include_blob = false) const = 0; + // TODO: description and make private + virtual void grow_tree(std::vector &&new_outputs) = 0; + + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) = 0; + + // TODO: description + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const = 0; + virtual uint64_t get_num_leaf_tuples() const = 0; + virtual std::array get_tree_root() const = 0; // // Hard fork related storage diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index d01119249c..0def1e7051 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -54,7 +54,7 @@ using epee::string_tools::pod_to_hex; using namespace crypto; // Increase when the DB structure changes -#define VERSION 5 +#define VERSION 6 namespace { @@ -199,6 +199,10 @@ namespace * * spent_keys input hash - * + * locked_outputs block ID [{output ID, output pubkey, commitment}...] + * leaves leaf_idx {output ID, output pubkey, commitment} + * layers layer_idx [{child_chunk_idx, child_chunk_hash}...] + * * txpool_meta txn hash txn metadata * txpool_blob txn hash txn blob * @@ -210,7 +214,8 @@ namespace * attached as a prefix on the Data to serve as the DUPSORT key. * (DUPFIXED saves 8 bytes per record.) * - * The output_amounts table doesn't use a dummy key, but uses DUPSORT. + * The output_amounts, locked_outputs, and layers tables don't use a + * dummy key, but use DUPSORT. */ const char* const LMDB_BLOCKS = "blocks"; const char* const LMDB_BLOCK_HEIGHTS = "block_heights"; @@ -228,6 +233,11 @@ const char* const LMDB_OUTPUT_TXS = "output_txs"; const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts"; const char* const LMDB_SPENT_KEYS = "spent_keys"; +// Curve trees merkle tree tables +const char* const LMDB_LOCKED_OUTPUTS = "locked_outputs"; +const char* const LMDB_LEAVES = "leaves"; +const char* const LMDB_LAYERS = "layers"; + const char* const LMDB_TXPOOL_META = "txpool_meta"; const char* const LMDB_TXPOOL_BLOB = "txpool_blob"; @@ -326,7 +336,22 @@ typedef struct mdb_block_info_4 uint64_t bi_long_term_block_weight; } mdb_block_info_4; -typedef mdb_block_info_4 mdb_block_info; +typedef struct mdb_block_info_5 +{ + uint64_t bi_height; + uint64_t bi_timestamp; + uint64_t bi_coins; + uint64_t bi_weight; // a size_t really but we need 32-bit compat + uint64_t bi_diff_lo; + uint64_t bi_diff_hi; + crypto::hash bi_hash; + uint64_t bi_cum_rct; + uint64_t bi_long_term_block_weight; + uint64_t bi_n_leaf_tuples; + std::array bi_tree_root; +} mdb_block_info_5; + +typedef mdb_block_info_5 mdb_block_info; typedef struct blk_height { crypto::hash bh_hash; @@ -351,6 +376,16 @@ typedef struct outtx { uint64_t local_index; } outtx; +typedef struct mdb_leaf { + uint64_t leaf_idx; + fcmp_pp::curve_trees::OutputContext output_context; +} mdb_leaf; + +typedef struct layer_val { + uint64_t child_chunk_idx; + std::array child_chunk_hash; +} layer_val; + std::atomic mdb_txn_safe::num_active_txns{0}; std::atomic_flag mdb_txn_safe::creation_gate = ATOMIC_FLAG_INIT; @@ -769,7 +804,7 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks, uin } void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, - uint64_t num_rct_outs, const crypto::hash& blk_hash) + uint64_t num_rct_outs, const crypto::hash& blk_hash, const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -797,6 +832,13 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l throw0(BLOCK_PARENT_DNE("Top block is not new block's parent")); } + // Grow the tree with outputs that unlock at this block height + auto unlocked_outputs = this->get_outs_at_unlock_block_id(m_height); + this->grow_tree(std::move(unlocked_outputs)); + + // Now that we've used the unlocked leaves to grow the tree, we can delete them from the locked outputs table + this->del_locked_outs_at_block_id(m_height); + int result = 0; MDB_val_set(key, m_height); @@ -830,6 +872,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l bi.bi_cum_rct += bi_prev->bi_cum_rct; } bi.bi_long_term_block_weight = long_term_block_weight; + bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); + bi.bi_tree_root = this->get_tree_root(); MDB_val_set(val, bi); result = mdb_cursor_put(m_cur_block_info, (MDB_val *)&zerokval, &val, MDB_APPENDDUP); @@ -840,6 +884,21 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l if (result) throw0(DB_ERROR(lmdb_error("Failed to add block height by hash to db transaction: ", result).c_str())); + CURSOR(locked_outputs) + + // Add the locked outputs from this block to the locked outputs table + for (const auto &unlock_block : outs_by_unlock_block) + { + MDB_val_set(k_block_id, unlock_block.first); + for (const auto &locked_output : unlock_block.second) + { + MDB_val_set(v_output, locked_output); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + } + } + // we use weight as a proxy for size, since we don't have size but weight is >= size // and often actually equal m_cum_size += block_weight; @@ -868,6 +927,8 @@ void BlockchainLMDB::remove_block() // must use h now; deleting from m_block_info will invalidate it mdb_block_info *bi = (mdb_block_info *)h.mv_data; + const uint64_t block_id = bi->bi_height; + const uint64_t old_n_leaf_tuples = bi->bi_n_leaf_tuples; blk_height bh = {bi->bi_hash, 0}; h.mv_data = (void *)&bh; h.mv_size = sizeof(bh); @@ -881,6 +942,13 @@ void BlockchainLMDB::remove_block() if ((result = mdb_cursor_del(m_cur_block_info, 0))) throw1(DB_ERROR(lmdb_error("Failed to add removal of block info to db transaction: ", result).c_str())); + + // Get n_leaf_tuples from the new tip so we can trim the curve trees tree to the new tip + const uint64_t new_n_leaf_tuples = get_top_block_n_leaf_tuples(); + if (new_n_leaf_tuples > old_n_leaf_tuples) + throw1(DB_ERROR("Unexpected: more leaf tuples are in prev block, tree is expected to only grow")); + const uint64_t trim_n_leaf_tuples = old_n_leaf_tuples - new_n_leaf_tuples; + this->trim_tree(trim_n_leaf_tuples, block_id/*trim_block_id*/); } uint64_t BlockchainLMDB::add_transaction_data(const crypto::hash& blk_hash, const std::pair& txp, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) @@ -1046,7 +1114,7 @@ void BlockchainLMDB::remove_transaction_data(const crypto::hash& tx_hash, const throw1(DB_ERROR("Failed to add removal of tx index to db transaction")); } -uint64_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, +output_indexes_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, @@ -1110,7 +1178,10 @@ uint64_t BlockchainLMDB::add_output(const crypto::hash& tx_hash, if ((result = mdb_cursor_put(m_cur_output_amounts, &val_amount, &data, MDB_APPENDDUP))) throw0(DB_ERROR(lmdb_error("Failed to add output pubkey to db transaction: ", result).c_str())); - return ok.amount_index; + return output_indexes_t{ + .amount_index = ok.amount_index, + .output_id = ok.output_id + }; } void BlockchainLMDB::add_tx_amount_output_indices(const uint64_t tx_id, @@ -1167,125 +1238,1172 @@ void BlockchainLMDB::remove_output(const uint64_t amount, const uint64_t& out_in CURSOR(output_amounts); CURSOR(output_txs); - MDB_val_set(k, amount); - MDB_val_set(v, out_index); + MDB_val_set(k, amount); + MDB_val_set(v, out_index); + + auto result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); + else if (result) + throw0(DB_ERROR(lmdb_error("DB error attempting to get an output", result).c_str())); + + const pre_rct_outkey *ok = (const pre_rct_outkey *)v.mv_data; + MDB_val_set(otxk, ok->output_id); + result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &otxk, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + { + throw0(DB_ERROR("Unexpected: global output index not found in m_output_txs")); + } + else if (result) + { + throw1(DB_ERROR(lmdb_error("Error adding removal of output tx to db transaction", result).c_str())); + } + + // Remove output from locked outputs table if present. We expect all valid + // outputs to be in the locked outputs table because remove_output is called + // when removing the top block from the chain, and all outputs from the top + // block are expected to be locked until they are at least 10 blocks old (10 + // is the lower bound). An output might not be in the locked outputs table if + // it is invalid, then gets removed from the locked outputs table upon growing + // the tree. + // TODO: test case where we add an invalid output to the chain, grow the tree + // in the block in which that output unlocks, pop blocks to remove that output + // from the chain, then progress the chain again. + CURSOR(locked_outputs); + + const uint64_t unlock_block = cryptonote::get_unlock_block_index(ok->data.unlock_time, ok->data.height); + + MDB_val_set(k_block_id, unlock_block); + MDB_val_set(v_output, ok->output_id); + + result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, &v_output, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + { + // We expect this output is invalid + } + else if (result) + { + throw1(DB_ERROR(lmdb_error("Error adding removal of locked output to db transaction", result).c_str())); + } + else + { + result = mdb_cursor_del(m_cur_locked_outputs, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting locked output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + } + + result = mdb_cursor_del(m_cur_output_txs, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + + // now delete the amount + result = mdb_cursor_del(m_cur_output_amounts, 0); + if (result) + throw0(DB_ERROR(lmdb_error(std::string("Error deleting amount for output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); +} + +void BlockchainLMDB::prune_outputs(uint64_t amount) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + CURSOR(output_amounts); + CURSOR(output_txs); + + MINFO("Pruning outputs for amount " << amount); + + MDB_val v; + MDB_val_set(k, amount); + int result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_SET); + if (result == MDB_NOTFOUND) + return; + if (result) + throw0(DB_ERROR(lmdb_error("Error looking up outputs: ", result).c_str())); + + // gather output ids + mdb_size_t num_elems; + mdb_cursor_count(m_cur_output_amounts, &num_elems); + MINFO(num_elems << " outputs found"); + std::vector output_ids; + output_ids.reserve(num_elems); + while (1) + { + const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; + output_ids.push_back(okp->output_id); + MDEBUG("output id " << okp->output_id); + result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_NEXT_DUP); + if (result == MDB_NOTFOUND) + break; + if (result) + throw0(DB_ERROR(lmdb_error("Error counting outputs: ", result).c_str())); + } + if (output_ids.size() != num_elems) + throw0(DB_ERROR("Unexpected number of outputs")); + + result = mdb_cursor_del(m_cur_output_amounts, MDB_NODUPDATA); + if (result) + throw0(DB_ERROR(lmdb_error("Error deleting outputs: ", result).c_str())); + + for (uint64_t output_id: output_ids) + { + MDB_val_set(v, output_id); + result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &v, MDB_GET_BOTH); + if (result) + throw0(DB_ERROR(lmdb_error("Error looking up output: ", result).c_str())); + result = mdb_cursor_del(m_cur_output_txs, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Error deleting output: ", result).c_str())); + } +} + +void BlockchainLMDB::add_spent_key(const crypto::key_image& k_image) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(spent_keys) + + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k = {sizeof(k_image_y), (void *)&k_image_y}; + if (auto result = mdb_cursor_put(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_NODUPDATA)) { + if (result == MDB_KEYEXIST) + throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); + else + throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str())); + } +} + +void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(spent_keys) + + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k = {sizeof(k_image_y), (void *)&k_image_y}; + auto result = mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH); + if (result != 0 && result != MDB_NOTFOUND) + throw1(DB_ERROR(lmdb_error("Error finding spent key to remove", result).c_str())); + if (!result) + { + result = mdb_cursor_del(m_cur_spent_keys, 0); + if (result) + throw1(DB_ERROR(lmdb_error("Error adding removal of key image to db transaction", result).c_str())); + } +} + +void BlockchainLMDB::grow_tree(std::vector &&new_outputs) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + if (new_outputs.empty()) + return; + + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to grow tree"); + + CURSOR(leaves) + + // Get the number of leaf tuples that exist in the tree + const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + + // Read every layer's last hash + const auto last_hashes = this->get_tree_last_hashes(); + + // Use the number of leaf tuples and the existing last hashes to get a struct we can use to extend the tree + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + auto tree_extension = m_curve_trees->get_tree_extension(old_n_leaf_tuples, last_hashes, std::move(new_outputs)); + + // Insert the leaves + // TODO: grow_leaves + auto &leaves = tree_extension.leaves; + for (uint64_t i = 0; i < leaves.tuples.size(); ++i) + { + const uint64_t leaf_idx = i + leaves.start_leaf_tuple_idx; + mdb_leaf val{.leaf_idx = leaf_idx, .output_context = std::move(leaves.tuples[i])}; + MDB_val_set(v, val); + + int result = mdb_cursor_put(m_cur_leaves, (MDB_val *)&zerokval, &v, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); + } + + // Grow the layers + // TODO: grow_layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + + bool use_c2 = true; + uint64_t c2_idx = 0; + uint64_t c1_idx = 0; + for (uint64_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + const uint64_t layer_idx = c2_idx + c1_idx; + MDEBUG("Growing layer " << layer_idx); + + if (use_c2) + { + if (layer_idx % 2 != 0) + throw0(DB_ERROR(("Growing odd c2 layer, expected even layer idx for c2: " + + std::to_string(layer_idx)).c_str())); + + this->grow_layer(m_curve_trees->m_c2, + c2_extensions, + c2_idx, + layer_idx); + + ++c2_idx; + } + else + { + if (layer_idx % 2 == 0) + throw0(DB_ERROR(("Growing even c1 layer, expected odd layer idx for c1: " + + std::to_string(layer_idx)).c_str())); + + this->grow_layer(m_curve_trees->m_c1, + c1_extensions, + c1_idx, + layer_idx); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} + +template +void BlockchainLMDB::grow_layer(const std::unique_ptr &curve, + const std::vector> &layer_extensions, + const uint64_t ext_idx, + const uint64_t layer_idx) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(layers) + + CHECK_AND_ASSERT_THROW_MES(ext_idx < layer_extensions.size(), "unexpected layer extension"); + const auto &ext = layer_extensions[ext_idx]; + + CHECK_AND_ASSERT_THROW_MES(!ext.hashes.empty(), "empty layer extension"); + + // TODO: make sure ext.start_idx lines up with the end of the layer + + MDB_val_copy k(layer_idx); + + if (ext.update_existing_last_hash) + { + // We updated the last hash, so update it + layer_val lv; + lv.child_chunk_idx = ext.start_idx; + lv.child_chunk_hash = curve->to_bytes(ext.hashes.front()); + MDB_val_set(v, lv); + + // We expect to overwrite the existing hash + // TODO: make sure the hash already exists and is the existing last hash + int result = mdb_cursor_put(m_cur_layers, &k, &v, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to update chunk hash: ", result).c_str())); + } + + // Now add all the new hashes found in the extension + for (uint64_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i) + { + layer_val lv; + lv.child_chunk_idx = i + ext.start_idx; + lv.child_chunk_hash = curve->to_bytes(ext.hashes[i]); + MDB_val_set(v, lv); + + int result = mdb_cursor_put(m_cur_layers, &k, &v, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add hash: ", result).c_str())); + } +} + +void BlockchainLMDB::trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + if (trim_n_leaf_tuples == 0) + return; + + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(leaves) + CURSOR(locked_outputs) + CURSOR(layers) + + CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to trim tree"); + + const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); + + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + const auto trim_instructions = m_curve_trees->get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + + // Do initial tree reads + const auto last_chunk_children_to_trim = this->get_last_chunk_children_to_trim(trim_instructions); + const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); + + // Get the new hashes, wrapped in a simple struct we can use to trim the tree + const auto tree_reduction = m_curve_trees->get_tree_reduction( + trim_instructions, + last_chunk_children_to_trim, + last_hashes_to_trim); + + // Use tree reduction to trim tree + CHECK_AND_ASSERT_THROW_MES((tree_reduction.new_total_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, + "unexpected new total leaves"); + + // Trim the leaves + // TODO: trim_leaves + MDB_val_set(k_block_id, trim_block_id); + for (uint64_t i = 0; i < trim_n_leaf_tuples; ++i) + { + uint64_t leaf_tuple_idx = (old_n_leaf_tuples - trim_n_leaf_tuples + i); + + MDB_val_copy k(leaf_tuple_idx); + MDB_val v = k; + int result = mdb_cursor_get(m_cur_leaves, (MDB_val *)&zerokval, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); + + // Re-add the output to the locked output table in order. The output should + // be in the outputs tables. + const auto *o = (mdb_leaf *)v.mv_data; + MDB_val_set(v_output, o->output_context); + MDEBUG("Re-adding locked output_id: " << o->output_context.output_id << " , unlock block: " << trim_block_id); + result = mdb_cursor_put(m_cur_locked_outputs, &k_block_id, &v_output, MDB_APPENDDUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to re-add locked output: ", result).c_str())); + + // Delete the leaf + result = mdb_cursor_del(m_cur_leaves, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing leaf: ", result).c_str())); + + MDEBUG("Successfully removed leaf at leaf_tuple_idx: " << leaf_tuple_idx); + } + + // Trim the layers + // TODO: trim_layers + const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; + const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; + + const std::size_t n_layers = c2_layer_reductions.size() + c1_layer_reductions.size(); + + bool use_c2 = true; + uint64_t c2_idx = 0; + uint64_t c1_idx = 0; + for (uint64_t i = 0; i < n_layers; ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction"); + const auto &c2_reduction = c2_layer_reductions[c2_idx]; + this->trim_layer(m_curve_trees->m_c2, c2_reduction, i); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction"); + const auto &c1_reduction = c1_layer_reductions[c1_idx]; + this->trim_layer(m_curve_trees->m_c1, c1_reduction, i); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + // Trim any remaining layers in layers after the root + // TODO: trim_leftovers_after_root + if (n_layers > 0) + { + const uint64_t expected_root_idx = n_layers - 1; + while (1) + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + + const uint64_t last_layer_idx = *(uint64_t *)k.mv_data; + if (last_layer_idx > expected_root_idx) + { + // Delete all elements in layers after the root + result = mdb_cursor_del(m_cur_layers, MDB_NODUPDATA); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing elems after root: ", result).c_str())); + } + else if (last_layer_idx < expected_root_idx) + { + throw0(DB_ERROR("Encountered unexpected last elem in tree before the root")); + } + else // last_layer_idx == expected_root_idx + { + // We've trimmed all layers past the root, we're done + break; + } + } + } + else // n_layers == 0 + { + // Empty the layers table, no elems should remain + int result = mdb_drop(*m_write_txn, m_layers, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error emptying layers table: ", result).c_str())); + } +} + +template +void BlockchainLMDB::trim_layer(const std::unique_ptr &curve, + const fcmp_pp::curve_trees::LayerReduction &layer_reduction, + const uint64_t layer_idx) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + mdb_txn_cursors *m_cursors = &m_wcursors; + + CURSOR(layers) + + MDEBUG("Trimming layer " << layer_idx); + MDB_val_copy k(layer_idx); + + // Get the number of existing elements in the layer + // TODO: get_num_elems_in_layer + uint64_t old_n_elems_in_layer = 0; + { + // Get the first record in a layer so we can then get the last record + MDB_val v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_SET); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); + + result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + old_n_elems_in_layer = (1 + lv->child_chunk_idx); + } + + CHECK_AND_ASSERT_THROW_MES(old_n_elems_in_layer >= layer_reduction.new_total_parents, + "unexpected old n elems in layer"); + const uint64_t trim_n_elems_in_layer = old_n_elems_in_layer - layer_reduction.new_total_parents; + + // Delete the elements + for (uint64_t i = 0; i < trim_n_elems_in_layer; ++i) + { + uint64_t last_elem_idx = (old_n_elems_in_layer - 1 - i); + MDB_val_set(v, last_elem_idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get elem: ", result).c_str())); + + result = mdb_cursor_del(m_cur_layers, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Error removing elem: ", result).c_str())); + + MDEBUG("Successfully removed elem at layer_idx: " << layer_idx << " , last_elem_idx: " << last_elem_idx); + } + + // Update the last element if needed + if (layer_reduction.update_existing_last_hash) + { + layer_val lv; + lv.child_chunk_idx = layer_reduction.new_total_parents - 1; + lv.child_chunk_hash = curve->to_bytes(layer_reduction.new_last_hash); + MDB_val_set(v, lv); + + // We expect to overwrite the existing hash + // TODO: make sure the hash already exists and is the existing last hash + int result = mdb_cursor_put(m_cur_layers, &k, &v, 0); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to update chunk hash: ", result).c_str())); + } +} + +uint64_t BlockchainLMDB::get_num_leaf_tuples() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(leaves) + + // Get the number of leaf tuples in the tree + std::uint64_t n_leaf_tuples = 0; + + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_LAST); + if (result == MDB_NOTFOUND) + n_leaf_tuples = 0; + else if (result == MDB_SUCCESS) + n_leaf_tuples = 1 + ((const mdb_leaf*)v.mv_data)->leaf_idx; + else + throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); + } + + TXN_POSTFIX_RDONLY(); + + return n_leaf_tuples; +} + +uint64_t BlockchainLMDB::get_top_block_n_leaf_tuples() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(block_info); + + // if no blocks, return 0 + uint64_t m_height = height(); + if (m_height == 0) + { + return 0; + } + + MDB_val_copy k(m_height - 1); + MDB_val h = k; + int result = 0; + if ((result = mdb_cursor_get(m_cur_block_info, (MDB_val *)&zerokval, &h, MDB_GET_BOTH))) + throw1(BLOCK_DNE(lmdb_error("Failed to get top block: ", result).c_str())); + + const uint64_t n_leaf_tuples = ((mdb_block_info *)h.mv_data)->bi_n_leaf_tuples; + TXN_POSTFIX_RDONLY(); + return n_leaf_tuples; +} + +std::array BlockchainLMDB::get_tree_root() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + std::array root; + + { + MDB_val k, v; + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); + if (result == MDB_SUCCESS) + { + auto *lv = (layer_val *)v.mv_data; + root = std::move(lv->child_chunk_hash); + } + else if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); + } + + TXN_POSTFIX_RDONLY(); + + return root; +} + +fcmp_pp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes() const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes; + auto &c1_last_hashes = last_hashes.c1_last_hashes; + auto &c2_last_hashes = last_hashes.c2_last_hashes; + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer + uint64_t layer_idx = 0; + while (1) + { + MDB_val_copy k(layer_idx); + MDB_val v; + + // Get the first record in a layer so we can then get the last record + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_SET); + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); + + // Get the last record in a layer + result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get last record in layer: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + MDEBUG("Reading last hash at layer_idx: " << layer_idx << " , lv->child_chunk_idx: " << lv->child_chunk_idx); + + const bool use_c2 = (layer_idx % 2) == 0; + if (use_c2) + { + auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); + c2_last_hashes.emplace_back(std::move(point)); + } + else + { + auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); + c1_last_hashes.emplace_back(std::move(point)); + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return last_hashes; +} + +fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_last_chunk_children_to_trim( + const std::vector &trim_instructions) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + + fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim last_chunk_children_to_trim; + if (trim_instructions.empty()) + return last_chunk_children_to_trim; + + check_open(); + + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + auto &c1_last_children_out = last_chunk_children_to_trim.c1_children; + auto &c2_last_children_out = last_chunk_children_to_trim.c2_children; + + // Get the leaves to trim + // TODO: separate function for leaves + { + CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); + const auto &trim_leaf_layer_instructions = trim_instructions[0]; + + std::vector leaves_to_trim; + + if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) + { + leaves_to_trim.reserve(trim_leaf_layer_instructions.end_trim_idx - trim_leaf_layer_instructions.start_trim_idx); + + uint64_t idx = trim_leaf_layer_instructions.start_trim_idx; + CHECK_AND_ASSERT_THROW_MES(idx % fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, + "expected divisble by leaf tuple size"); + + const uint64_t leaf_tuple_idx = idx / fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + MDB_val k = zerokval; + MDB_val_copy v(leaf_tuple_idx); + + MDB_cursor_op leaf_op = MDB_GET_BOTH; + do + { + int result = mdb_cursor_get(m_cur_leaves, &k, &v, leaf_op); + leaf_op = MDB_NEXT; + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("leaf not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get leaf: ", result).c_str())); + + const auto *db_leaf = (mdb_leaf *)v.mv_data; + + // TODO: parallelize calls to this function + auto leaf = m_curve_trees->leaf_tuple(db_leaf->output_context.output_pair); + + leaves_to_trim.emplace_back(std::move(leaf.O_x)); + leaves_to_trim.emplace_back(std::move(leaf.I_x)); + leaves_to_trim.emplace_back(std::move(leaf.C_x)); + + idx += fcmp_pp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; + } + while (idx < trim_leaf_layer_instructions.end_trim_idx); + } + + c2_last_children_out.emplace_back(std::move(leaves_to_trim)); + } + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer, getting children to trim + // TODO: separate function for layers + bool parent_is_c1 = true; + for (uint64_t i = 1; i < trim_instructions.size(); ++i) + { + const auto &trim_layer_instructions = trim_instructions[i]; + + std::vector c1_children; + std::vector c2_children; + + if (trim_layer_instructions.end_trim_idx > trim_layer_instructions.start_trim_idx) + { + const uint64_t layer_idx = (i - 1); + uint64_t idx = trim_layer_instructions.start_trim_idx; + + MDB_val_set(k, layer_idx); + MDB_val_set(v, idx); + MDB_cursor_op op = MDB_GET_BOTH; + do + { + MDEBUG("Getting child to trim at layer_idx: " << layer_idx << " , idx: " << idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, op); + op = MDB_NEXT_DUP; + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("layer elem not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + if (parent_is_c1) + { + const auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c2->point_to_cycle_scalar(point); + c1_children.emplace_back(std::move(child_scalar)); + } + else + { + const auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); + auto child_scalar = m_curve_trees->m_c1->point_to_cycle_scalar(point); + c2_children.emplace_back(std::move(child_scalar)); + } + + ++idx; + } + while (idx < trim_layer_instructions.end_trim_idx); + } + + if (parent_is_c1) + c1_last_children_out.emplace_back(std::move(c1_children)); + else + c2_last_children_out.emplace_back(std::move(c2_children)); + + parent_is_c1 = !parent_is_c1; + } + + TXN_POSTFIX_RDONLY(); + + return last_chunk_children_to_trim; +} + +fcmp_pp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_trim( + const std::vector &trim_instructions) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; + if (trim_instructions.empty()) + return last_hashes_out; + + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(layers) + + + // Traverse the tree layer-by-layer starting at the layer closest to leaf layer + uint64_t layer_idx = 0; + for (const auto &trim_layer_instructions : trim_instructions) + { + const uint64_t new_last_idx = trim_layer_instructions.new_total_parents - 1; + + MDB_val_copy k(layer_idx); + MDB_val_set(v, new_last_idx); + + int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); + if (result == MDB_NOTFOUND) + throw0(DB_ERROR("layer elem not found")); // TODO: specific error type instead of DB_ERROR + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get layer elem: ", result).c_str())); + + const auto *lv = (layer_val *)v.mv_data; + if ((layer_idx % 2) == 0) + { + auto point = m_curve_trees->m_c2->from_bytes(lv->child_chunk_hash); + last_hashes_out.c2_last_hashes.emplace_back(std::move(point)); + } + else + { + auto point = m_curve_trees->m_c1->from_bytes(lv->child_chunk_hash); + last_hashes_out.c1_last_hashes.emplace_back(std::move(point)); + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return last_hashes_out; +} + +bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + RCURSOR(leaves) + RCURSOR(layers) + + const uint64_t actual_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(actual_n_leaf_tuples == expected_n_leaf_tuples, false, "unexpected num leaf tuples"); + + MDEBUG("Auditing tree with " << actual_n_leaf_tuples << " leaf tuples"); + + if (actual_n_leaf_tuples == 0) + { + // Make sure layers table is also empty + MDB_stat db_stats; + int result = mdb_stat(m_txn, m_layers, &db_stats); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to query m_layers: ", result).c_str())); + CHECK_AND_ASSERT_MES(db_stats.ms_entries == 0, false, "unexpected num layer entries"); + return true; + } + + CHECK_AND_ASSERT_THROW_MES(m_curve_trees != nullptr, "curve trees must be set"); + + // Check chunks of leaves hash into first layer as expected + uint64_t layer_idx = 0; + uint64_t child_chunk_idx = 0; + MDB_cursor_op leaf_op = MDB_FIRST; + MDB_cursor_op parent_op = MDB_FIRST; + + MDB_val_copy k_parent(layer_idx); + MDB_val_set(v_parent, child_chunk_idx); + + while (1) + { + // Get next leaf chunk + std::vector leaf_tuples_chunk; + leaf_tuples_chunk.reserve(m_curve_trees->m_c2_width); + + if (child_chunk_idx && child_chunk_idx % 1000 == 0) + MINFO("Auditing layer " << layer_idx << ", child_chunk_idx " << child_chunk_idx); + + // Iterate until chunk is full or we get to the end of all leaves + MDB_val k_leaf, v_leaf; + while (1) + { + int result = mdb_cursor_get(m_cur_leaves, &k_leaf, &v_leaf, leaf_op); + leaf_op = MDB_NEXT; + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add leaf: ", result).c_str())); + + const auto *o = (mdb_leaf *)v_leaf.mv_data; + auto leaf = m_curve_trees->leaf_tuple(o->output_context.output_pair); + + leaf_tuples_chunk.emplace_back(std::move(leaf)); + + if (leaf_tuples_chunk.size() == m_curve_trees->m_c2_width) + break; + } + + // Get the actual leaf chunk hash from the db + MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx); + int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, parent_op); + parent_op = MDB_NEXT_DUP; + + // Check end condition: no more leaf tuples in the leaf layer + if (leaf_tuples_chunk.empty()) + { + // No more leaves, expect to be done with parent chunks as well + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected leaf chunk parent result found at child_chunk_idx " + + std::to_string(child_chunk_idx), result).c_str())); + + MDEBUG("Successfully audited leaf layer"); + break; + } + + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str())); + if (layer_idx != *(uint64_t*)k_parent.mv_data || child_chunk_idx != ((layer_val *)v_parent.mv_data)->child_chunk_idx) + throw0(DB_ERROR("unexpected parent encountered")); + + // Get the expected leaf chunk hash + const auto leaves = m_curve_trees->flatten_leaves(std::move(leaf_tuples_chunk)); + const fcmp_pp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; + + // Hash the chunk of leaves + for (uint64_t i = 0; i < leaves.size(); ++i) + MDEBUG("Hashing " << m_curve_trees->m_c2->to_string(leaves[i])); + + const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(m_curve_trees->m_c2, chunk); + MDEBUG("chunk_hash " << m_curve_trees->m_c2->to_string(chunk_hash) << " , hash init point: " + << m_curve_trees->m_c2->to_string(m_curve_trees->m_c2->hash_init_point()) << " (" << leaves.size() << " leaves)"); + + // Now compare to value from the db + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual leaf chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); + + const auto expected_bytes = m_curve_trees->m_c2->to_bytes(chunk_hash); + const auto actual_bytes = lv->child_chunk_hash; + CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash"); + CHECK_AND_ASSERT_MES(lv->child_chunk_idx == child_chunk_idx, false, "unexpected child chunk idx"); + + ++child_chunk_idx; + } + + MDEBUG("Successfully audited leaf layer"); + + // Traverse up the tree auditing each layer until we've audited every layer in the tree + bool audit_complete = false; + while (!audit_complete) + { + MDEBUG("Auditing layer " << layer_idx); + + // Alternate starting with c1 as parent (we already audited c2 leaf parents), then c2 as parent, then c1, etc. + const bool parent_is_c1 = layer_idx % 2 == 0; + if (parent_is_c1) + { + audit_complete = this->audit_layer( + /*c_child*/ m_curve_trees->m_c2, + /*c_parent*/ m_curve_trees->m_c1, + layer_idx, + /*chunk_width*/ m_curve_trees->m_c1_width); + } + else + { + audit_complete = this->audit_layer( + /*c_child*/ m_curve_trees->m_c1, + /*c_parent*/ m_curve_trees->m_c2, + layer_idx, + /*chunk_width*/ m_curve_trees->m_c2_width); + } + + ++layer_idx; + } + + TXN_POSTFIX_RDONLY(); + + return true; +} + +template +bool BlockchainLMDB::audit_layer(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const uint64_t child_layer_idx, + const uint64_t chunk_width) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + TXN_PREFIX_RDONLY(); + + // Open two separate cursors for child and parent layer + MDB_cursor *child_layer_cursor, *parent_layer_cursor; + + int c_result = mdb_cursor_open(m_txn, m_layers, &child_layer_cursor); + if (c_result) + throw0(DB_ERROR(lmdb_error("Failed to open child cursor: ", c_result).c_str())); + int p_result = mdb_cursor_open(m_txn, m_layers, &parent_layer_cursor); + if (p_result) + throw0(DB_ERROR(lmdb_error("Failed to open parent cursor: ", p_result).c_str())); + + // Set the cursors to the start of each layer + const uint64_t parent_layer_idx = child_layer_idx + 1; + + MDB_val_set(k_child, child_layer_idx); + MDB_val_set(k_parent, parent_layer_idx); + + MDB_val v_child, v_parent; + + c_result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, MDB_SET); + p_result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, MDB_SET); + + if (c_result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get child: ", c_result).c_str())); + if (p_result != MDB_SUCCESS && p_result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get parent: ", p_result).c_str())); + + // Begin to audit the layer + MDB_cursor_op op_child = MDB_FIRST_DUP; + MDB_cursor_op op_parent = MDB_FIRST_DUP; + bool audit_complete = false; + uint64_t child_chunk_idx = 0; + while (1) + { + if (child_chunk_idx && child_chunk_idx % 1000 == 0) + MINFO("Auditing layer " << parent_layer_idx << ", child_chunk_idx " << child_chunk_idx); + + // Get next child chunk + std::vector child_chunk; + child_chunk.reserve(chunk_width); + while (1) + { + int result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, op_child); + op_child = MDB_NEXT_DUP; + if (result == MDB_NOTFOUND) + break; + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str())); + + const auto *lv = (layer_val *)v_child.mv_data; + auto child_point = c_child->from_bytes(lv->child_chunk_hash); + + child_chunk.emplace_back(std::move(child_point)); + + if (child_chunk.size() == chunk_width) + break; + } + + // Get the actual chunk hash from the db + int result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, op_parent); + op_parent = MDB_NEXT_DUP; + + // Check for end conditions + // End condition A (audit_complete=false): finished auditing layer and ready to move up a layer + // End condition B (audit_complete=true ): finished auditing the tree, no more layers remaining + + // End condition A: check if finished auditing this layer + if (child_chunk.empty()) + { + // No more children, expect to be done auditing layer and ready to move up a layer + if (result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + + MDEBUG("Finished auditing layer " << child_layer_idx); + audit_complete = false; + break; + } + + // End condition B: check if finished auditing the tree + if (child_chunk_idx == 0 && child_chunk.size() == 1) + { + if (p_result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("unexpected parent of root at parent_layer_idx " + std::to_string(parent_layer_idx) + + " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str())); + + MDEBUG("Encountered root at layer_idx " << child_layer_idx); + audit_complete = true; + break; + } + + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get parent: ", result).c_str())); + + if (child_layer_idx != *(uint64_t*)k_child.mv_data) + throw0(DB_ERROR("unexpected child encountered")); + if (parent_layer_idx != *(uint64_t*)k_parent.mv_data) + throw0(DB_ERROR("unexpected parent encountered")); - auto result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_GET_BOTH); - if (result == MDB_NOTFOUND) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR(lmdb_error("DB error attempting to get an output", result).c_str())); + // Get the expected chunk hash + std::vector child_scalars; + child_scalars.reserve(child_chunk.size()); + for (const auto &child : child_chunk) + child_scalars.emplace_back(c_child->point_to_cycle_scalar(child)); + const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; - const pre_rct_outkey *ok = (const pre_rct_outkey *)v.mv_data; - MDB_val_set(otxk, ok->output_id); - result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &otxk, MDB_GET_BOTH); - if (result == MDB_NOTFOUND) - { - throw0(DB_ERROR("Unexpected: global output index not found in m_output_txs")); - } - else if (result) - { - throw1(DB_ERROR(lmdb_error("Error adding removal of output tx to db transaction", result).c_str())); + for (uint64_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Hashing " << c_parent->to_string(child_scalars[i])); + + const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(c_parent, chunk); + MDEBUG("Expected chunk_hash " << c_parent->to_string(chunk_hash) << " (" << child_scalars.size() << " children)"); + + const auto *lv = (layer_val *)v_parent.mv_data; + MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash)); + + const auto actual_bytes = lv->child_chunk_hash; + const auto expected_bytes = c_parent->to_bytes(chunk_hash); + if (actual_bytes != expected_bytes) + throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str())); + if (lv->child_chunk_idx != child_chunk_idx) + throw0(DB_ERROR(("unexpected child_chunk_idx, epxected " + std::to_string(child_chunk_idx)).c_str())); + + ++child_chunk_idx; } - result = mdb_cursor_del(m_cur_output_txs, 0); - if (result) - throw0(DB_ERROR(lmdb_error(std::string("Error deleting output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); - // now delete the amount - result = mdb_cursor_del(m_cur_output_amounts, 0); - if (result) - throw0(DB_ERROR(lmdb_error(std::string("Error deleting amount for output index ").append(boost::lexical_cast(out_index).append(": ")).c_str(), result).c_str())); + TXN_POSTFIX_RDONLY(); + + return audit_complete; } -void BlockchainLMDB::prune_outputs(uint64_t amount) +std::vector BlockchainLMDB::get_outs_at_unlock_block_id( + uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); - mdb_txn_cursors *m_cursors = &m_wcursors; - CURSOR(output_amounts); - CURSOR(output_txs); - MINFO("Pruning outputs for amount " << amount); + TXN_PREFIX_RDONLY(); + RCURSOR(locked_outputs) - MDB_val v; - MDB_val_set(k, amount); - int result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_SET); - if (result == MDB_NOTFOUND) - return; - if (result) - throw0(DB_ERROR(lmdb_error("Error looking up outputs: ", result).c_str())); + MDB_val_set(k_block_id, block_id); + MDB_val v_output; - // gather output ids - mdb_size_t num_elems; - mdb_cursor_count(m_cur_output_amounts, &num_elems); - MINFO(num_elems << " outputs found"); - std::vector output_ids; - output_ids.reserve(num_elems); + // Get all the locked outputs at the provided block id + std::vector outs; + + MDB_cursor_op op = MDB_SET; while (1) { - const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; - output_ids.push_back(okp->output_id); - MDEBUG("output id " << okp->output_id); - result = mdb_cursor_get(m_cur_output_amounts, &k, &v, MDB_NEXT_DUP); + int result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, &v_output, op); if (result == MDB_NOTFOUND) break; - if (result) - throw0(DB_ERROR(lmdb_error("Error counting outputs: ", result).c_str())); - } - if (output_ids.size() != num_elems) - throw0(DB_ERROR("Unexpected number of outputs")); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str())); + op = MDB_NEXT_MULTIPLE; - result = mdb_cursor_del(m_cur_output_amounts, MDB_NODUPDATA); - if (result) - throw0(DB_ERROR(lmdb_error("Error deleting outputs: ", result).c_str())); + const uint64_t blk_id = *(const uint64_t*)k_block_id.mv_data; + if (blk_id != block_id) + throw0(DB_ERROR(("Blk id " + std::to_string(blk_id) + " not the expected" + std::to_string(block_id)).c_str())); - for (uint64_t output_id: output_ids) - { - MDB_val_set(v, output_id); - result = mdb_cursor_get(m_cur_output_txs, (MDB_val *)&zerokval, &v, MDB_GET_BOTH); - if (result) - throw0(DB_ERROR(lmdb_error("Error looking up output: ", result).c_str())); - result = mdb_cursor_del(m_cur_output_txs, 0); - if (result) - throw0(DB_ERROR(lmdb_error("Error deleting output: ", result).c_str())); - } -} + const auto range_begin = ((const fcmp_pp::curve_trees::OutputContext*)v_output.mv_data); + const auto range_end = range_begin + v_output.mv_size / sizeof(fcmp_pp::curve_trees::OutputContext); -void BlockchainLMDB::add_spent_key(const crypto::key_image& k_image) -{ - LOG_PRINT_L3("BlockchainLMDB::" << __func__); - check_open(); - mdb_txn_cursors *m_cursors = &m_wcursors; + auto it = range_begin; - CURSOR(spent_keys) + // The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it + if (outs.size() == 1) + ++it; - MDB_val k = {sizeof(k_image), (void *)&k_image}; - if (auto result = mdb_cursor_put(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_NODUPDATA)) { - if (result == MDB_KEYEXIST) - throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); - else - throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str())); + while (it < range_end) + { + outs.push_back(*it); + ++it; + } } + + TXN_POSTFIX_RDONLY(); + + return outs; } -void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image) +void BlockchainLMDB::del_locked_outs_at_block_id(uint64_t block_id) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); mdb_txn_cursors *m_cursors = &m_wcursors; - CURSOR(spent_keys) + CURSOR(locked_outputs) - MDB_val k = {sizeof(k_image), (void *)&k_image}; - auto result = mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH); - if (result != 0 && result != MDB_NOTFOUND) - throw1(DB_ERROR(lmdb_error("Error finding spent key to remove", result).c_str())); - if (!result) - { - result = mdb_cursor_del(m_cur_spent_keys, 0); - if (result) - throw1(DB_ERROR(lmdb_error("Error adding removal of key image to db transaction", result).c_str())); - } + MDB_val_set(k_block_id, block_id); + + int result = mdb_cursor_get(m_cur_locked_outputs, &k_block_id, NULL, MDB_SET); + if (result == MDB_NOTFOUND) + return; + if (result != MDB_SUCCESS) + throw1(DB_ERROR(lmdb_error("Error finding locked outputs to remove: ", result).c_str())); + + result = mdb_cursor_del(m_cur_locked_outputs, MDB_NODUPDATA); + if (result) + throw1(DB_ERROR(lmdb_error("Error removing locked outputs: ", result).c_str())); } BlockchainLMDB::~BlockchainLMDB() @@ -1302,7 +2420,7 @@ BlockchainLMDB::~BlockchainLMDB() BlockchainLMDB::close(); } -BlockchainLMDB::BlockchainLMDB(bool batch_transactions): BlockchainDB() +BlockchainLMDB::BlockchainLMDB(bool batch_transactions, std::shared_ptr curve_trees): BlockchainDB() { LOG_PRINT_L3("BlockchainLMDB::" << __func__); // initialize folder to something "safe" just in case @@ -1319,6 +2437,8 @@ BlockchainLMDB::BlockchainLMDB(bool batch_transactions): BlockchainDB() // reset may also need changing when initialize things here m_hardfork = nullptr; + + m_curve_trees = curve_trees; } void BlockchainLMDB::open(const std::string& filename, const int db_flags) @@ -1331,6 +2451,9 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) if (m_open) throw0(DB_OPEN_FAILURE("Attempted to open db, but it's already open")); + if (m_curve_trees == nullptr) + throw0(DB_OPEN_FAILURE("curve trees not set yet, must be set before opening db")); + boost::filesystem::path direc(filename); if (!boost::filesystem::exists(direc) && !boost::filesystem::create_directories(direc)) { @@ -1437,6 +2560,10 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); + lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); + lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); + lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); + lmdb_db_open(txn, LMDB_TXPOOL_META, MDB_CREATE, m_txpool_meta, "Failed to open db handle for m_txpool_meta"); lmdb_db_open(txn, LMDB_TXPOOL_BLOB, MDB_CREATE, m_txpool_blob, "Failed to open db handle for m_txpool_blob"); @@ -1456,6 +2583,9 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) mdb_set_dupsort(txn, m_block_heights, compare_hash32); mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_output_amounts, compare_uint64); + mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); + mdb_set_dupsort(txn, m_leaves, compare_uint64); + mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64); mdb_set_dupsort(txn, m_block_info, compare_uint64); if (!(mdb_flags & MDB_RDONLY)) @@ -1512,7 +2642,10 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags) // We don't handle the old format previous to that commit. txn.commit(); m_open = true; + // Decrement num active txs so db can resize if needed + mdb_txn_safe::increment_txns(-1); migrate(db_version); + mdb_txn_safe::increment_txns(1); return; } #endif @@ -1633,6 +2766,12 @@ void BlockchainLMDB::reset() throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str())); if (auto result = mdb_drop(txn, m_spent_keys, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); + if (auto result = mdb_drop(txn, m_locked_outputs, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_outputs: ", result).c_str())); + if (auto result = mdb_drop(txn, m_leaves, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); + if (auto result = mdb_drop(txn, m_layers, 0)) + throw0(DB_ERROR(lmdb_error("Failed to drop m_layers: ", result).c_str())); (void)mdb_drop(txn, m_hf_starting_heights, 0); // this one is dropped in new code if (auto result = mdb_drop(txn, m_hf_versions, 0)) throw0(DB_ERROR(lmdb_error("Failed to drop m_hf_versions: ", result).c_str())); @@ -3543,14 +4682,17 @@ bool BlockchainLMDB::has_key_image(const crypto::key_image& img) const TXN_PREFIX_RDONLY(); RCURSOR(spent_keys); - MDB_val k = {sizeof(img), (void *)&img}; + crypto::key_image_y img_y; + crypto::key_image_to_y(img, img_y); + + MDB_val k = {sizeof(img_y), (void *)&img_y}; ret = (mdb_cursor_get(m_cur_spent_keys, (MDB_val *)&zerokval, &k, MDB_GET_BOTH) == 0); TXN_POSTFIX_RDONLY(); return ret; } -bool BlockchainLMDB::for_all_key_images(std::function f) const +bool BlockchainLMDB::for_all_key_images(std::function f) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -3571,8 +4713,8 @@ bool BlockchainLMDB::for_all_key_images(std::functionm_txn; + + /* the spent_keys table name is the same but the old version and new version + * have different data. Create a new table. We want the name to be similar + * to the old name so that it will occupy the same location in the DB. + */ + lmdb_db_open(txn, "spent_keyr", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keyr"); + mdb_set_dupsort(txn, m_spent_keys, compare_hash32); + + MDB_cursor *c_new_spent_keys, *c_old_spent_keys; + MDB_val k, v_img; + MDB_cursor_op op = MDB_FIRST; + + uint64_t n_old_key_images; + { + MDB_stat db_stats; + if ((result = mdb_stat(txn, o_spent_keys, &db_stats))) + throw0(DB_ERROR(lmdb_error("Failed to query m_spent_keys: ", result).c_str())); + n_old_key_images = db_stats.ms_entries; + } + + uint64_t n_new_key_images; + { + MDB_stat db_stats; + if ((result = mdb_stat(txn, m_spent_keys, &db_stats))) + throw0(DB_ERROR(lmdb_error("Failed to query m_spent_keys: ", result).c_str())); + n_new_key_images = db_stats.ms_entries; + } + + const uint64_t n_key_images = n_old_key_images + n_new_key_images; + + i = n_new_key_images; + while (i < n_key_images) + { + if (!(i % BATCH_SIZE)) + { + if (i) + { + LOGIF(el::Level::Info) + { + const uint64_t percent = std::min((i * 100) / n_key_images, (uint64_t)99); + std::cout << i << " / " << n_key_images << " key images (" << percent << "% of step 1/3) \r" << std::flush; + } + + // Start a new batch so resizing can occur as needed + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; + } + + // Open all cursors + result = mdb_cursor_open(txn, m_spent_keys, &c_new_spent_keys); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keyr: ", result).c_str())); + result = mdb_cursor_open(txn, o_spent_keys, &c_old_spent_keys); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keys: ", result).c_str())); + op = MDB_FIRST; + } + + // Get old key image and use it to set the new key image y + result = mdb_cursor_get(c_old_spent_keys, &k, &v_img, op); + op = MDB_NEXT; + if (result) + throw0(DB_ERROR(lmdb_error("Failed to get a record from spent_keys: ", result).c_str())); + const crypto::key_image k_image = *(const crypto::key_image*)v_img.mv_data; + + crypto::key_image_y k_image_y; + crypto::key_image_to_y(k_image, k_image_y); + + MDB_val k_y = {sizeof(k_image_y), (void *)&k_image_y}; + if (auto result = mdb_cursor_put(c_new_spent_keys, (MDB_val *)&zerokval, &k_y, MDB_NODUPDATA)) { + if (result == MDB_KEYEXIST) + throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); + else + throw1(DB_ERROR(lmdb_error("Error adding spent key image to db transaction: ", result).c_str())); + } + + /* we delete the old records immediately, so the overall DB and mapsize should not be + * larger than it needs to be. + * This is a little slower than just letting mdb_drop() delete it all at the end, but + * it saves a significant amount of disk space. + */ + result = mdb_cursor_del(c_old_spent_keys, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); + + ++i; + } + batch_stop(); + } + + // 2. Prepare all valid outputs to be inserted into the merkle tree and + // place them in a locked outputs table. The key to this new table is the + // block id in which the outputs unlock. + { + MINFO("Setting up a locked outputs table (step 2/3 of full-chain membership proof migration)"); + + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + lmdb_db_open(txn, "tmp_last_output", MDB_INTEGERKEY | MDB_CREATE, m_tmp_last_output, "Failed to open db handle for m_tmp_last_output"); + txn.commit(); + + if (!m_batch_transactions) + set_batch_transactions(true); + const std::size_t BATCH_SIZE = 10000; + batch_start(); + txn.m_txn = m_write_txn->m_txn; + + // Use this cache to know how to restart the migration if the process is killed + struct tmp_output_cache { uint64_t n_outputs_read; uint64_t amount; outkey ok; }; + tmp_output_cache last_output; + + MDB_cursor *c_output_amounts, *c_locked_outputs, *c_tmp_last_output; + MDB_val k, v; + + i = 0; + const uint64_t n_outputs = this->num_outputs(); + MDB_cursor_op op = MDB_FIRST; + while (1) + { + if (!(i % BATCH_SIZE)) + { + if (i) + { + LOGIF(el::Level::Info) + { + const uint64_t percent = std::min((i * 100) / n_outputs, (uint64_t)99); + std::cout << i << " / " << n_outputs << " outputs (" << percent << "% of step 2/3) \r" << std::flush; + } + + // Update last output read + MDB_val_set(v_last_output, last_output); + result = mdb_cursor_put(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update max output id: ", result).c_str())); + + // Commit and start a new txn + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; + + // Reset k and v so we continue migration from the last output + k = {sizeof(last_output.amount), (void *)&last_output.amount}; + + const std::size_t outkey_size = (last_output.amount == 0) ? sizeof(outkey) : sizeof(pre_rct_outkey); + v = {outkey_size, (void *)&last_output.ok}; + } + + // Open all cursors + result = mdb_cursor_open(txn, m_output_amounts, &c_output_amounts); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str())); + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); + result = mdb_cursor_open(txn, m_tmp_last_output, &c_tmp_last_output); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for temp last output: ", result).c_str())); + + // Get the cached last output from the db + bool found_cached_output = false; + tmp_output_cache cached_last_o; + if (i == 0) + { + MDB_val v_last_output; + result = mdb_cursor_get(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, MDB_SET); + if (result != MDB_SUCCESS && result != MDB_NOTFOUND) + throw0(DB_ERROR(lmdb_error("Failed to get max output id: ", result).c_str())); + if (result != MDB_NOTFOUND) + { + cached_last_o = *(const tmp_output_cache*)v_last_output.mv_data; + + if (n_outputs < cached_last_o.n_outputs_read) + throw0(DB_ERROR("Unexpected n_outputs_read on cached last output")); + if (n_outputs == cached_last_o.n_outputs_read) + break; + + MDEBUG("Found cached output " << cached_last_o.ok.output_id + << ", migrated " << cached_last_o.n_outputs_read << " outputs already"); + found_cached_output = true; + + // Set k and v so we can continue the migration from that output + k = {sizeof(cached_last_o.amount), (void *)&cached_last_o.amount}; + + const std::size_t outkey_size = (cached_last_o.amount == 0) ? sizeof(outkey) : sizeof(pre_rct_outkey); + v = {outkey_size, (void *)&cached_last_o.ok}; + + i = cached_last_o.n_outputs_read; + op = MDB_NEXT; + } + } + + // Advance the output_amounts cursor to the last output read + if (i || found_cached_output) + { + result = mdb_cursor_get(c_output_amounts, &k, &v, MDB_GET_BOTH); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to advance cursor for output amounts: ", result).c_str())); + } + } + + // Get the next output from the db + result = mdb_cursor_get(c_output_amounts, &k, &v, op); + op = MDB_NEXT; + if (result == MDB_NOTFOUND) + { + // Indicate we've read all outputs so we know the migration step is complete + last_output.n_outputs_read = n_outputs; + MDB_val_set(v_last_output, last_output); + result = mdb_cursor_put(c_tmp_last_output, (MDB_val*)&zerokval, &v_last_output, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update max output id: ", result).c_str())); + + batch_stop(); + break; + } + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to get a record from output amounts: ", result).c_str())); + + ++i; + const bool commit_next_iter = i && !(i % BATCH_SIZE); + + // Read the output data + uint64_t amount = *(const uint64_t*)k.mv_data; + output_data_t output_data; + uint64_t output_id; + if (amount == 0) + { + const outkey *okp = (const outkey *)v.mv_data; + output_data = okp->data; + output_id = okp->output_id; + if (commit_next_iter) + memcpy(&last_output.ok, okp, sizeof(outkey)); + } + else + { + const pre_rct_outkey *okp = (const pre_rct_outkey *)v.mv_data; + memcpy(&output_data, &okp->data, sizeof(pre_rct_output_data_t)); + output_data.commitment = rct::zeroCommit(amount); + output_id = okp->output_id; + if (commit_next_iter) + memcpy(&last_output.ok, okp, sizeof(pre_rct_outkey)); + } + + if (commit_next_iter) + { + // Set last output metadata + last_output.amount = amount; + last_output.n_outputs_read = i; + } + + // Prepare the output for insertion to the tree + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(output_data.pubkey), + .commitment = std::move(output_data.commitment) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_id, + .output_pair = std::move(output_pair) + }; + + // Get the block in which the output will unlock + const uint64_t unlock_block = cryptonote::get_unlock_block_index(output_data.unlock_time, output_data.height); + + // Now add the output to the locked outputs table + MDB_val_set(k_block_id, unlock_block); + MDB_val_set(v_output, output_context); + + // MDB_NODUPDATA because all output id's should be unique + // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by output_id + result = mdb_cursor_put(c_locked_outputs, &k_block_id, &v_output, MDB_NODUPDATA); + if (result != MDB_SUCCESS) + throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); + } + } + + // 3. Set up the curve trees merkle tree by growing the tree block by block, + // with leaves that unlock in each respective block + { + MINFO("Setting up a merkle tree using existing cryptonote outputs (step 3/3 of full-chain membership proof migration)"); + + if (!m_batch_transactions) + set_batch_transactions(true); + const std::size_t BATCH_SIZE = 50; + batch_start(); + txn.m_txn = m_write_txn->m_txn; + + /* the block_info table name is the same but the old version and new version + * have incompatible data. Create a new table. We want the name to be similar + * to the old name so that it will occupy the same location in the DB. + */ + MDB_dbi o_block_info = m_block_info; + lmdb_db_open(txn, "block_infn", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); + mdb_set_dupsort(txn, m_block_info, compare_uint64); + + MDB_cursor *c_locked_outputs, *c_new_block_info, *c_old_block_info; + MDB_val k_blk, v_blk; + + i = 0; + const uint64_t n_blocks = height(); + while (i < n_blocks) + { + if (!(i % BATCH_SIZE)) + { + if (i) + { + LOGIF(el::Level::Info) + { + const uint64_t percent = std::min((i * 100) / n_blocks, (uint64_t)99); + std::cout << i << " / " << n_blocks << " blocks (" << percent << "% of step 3/3) \r" << std::flush; + } + + batch_stop(); + batch_start(); + txn.m_txn = m_write_txn->m_txn; + } + + // Open all cursors + result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); + result = mdb_cursor_open(txn, m_block_info, &c_new_block_info); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_infn: ", result).c_str())); + result = mdb_cursor_open(txn, o_block_info, &c_old_block_info); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_info: ", result).c_str())); + + // See what the last block inserted into the new table was + if (i == 0) + { + MDB_stat db_stats; + result = mdb_stat(txn, m_block_info, &db_stats); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to query m_block_info: ", result).c_str())); + i = db_stats.ms_entries; + if (i == n_blocks) + break; + } + } + + // Get the leaf tuples that unlock at the given block + auto unlocked_outputs = this->get_outs_at_unlock_block_id(i); + this->grow_tree(std::move(unlocked_outputs)); + + // Now that we've used the unlocked leaves to grow the tree, we delete them from the locked outputs table + this->del_locked_outs_at_block_id(i); + + // Get old block_info and use it to set the new one with new values + result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to get a record from block_info: ", result).c_str())); + const mdb_block_info_4 *bi_old = (const mdb_block_info_4*)v_blk.mv_data; + if (i != bi_old->bi_height) + throw0(DB_ERROR(std::string("Unexpected block retrieved, retrieved: " + std::to_string(bi_old->bi_height) + " , expected: " + std::to_string(i)).c_str())); + mdb_block_info_5 bi; + bi.bi_height = bi_old->bi_height; + bi.bi_timestamp = bi_old->bi_timestamp; + bi.bi_coins = bi_old->bi_coins; + bi.bi_weight = bi_old->bi_weight; + bi.bi_diff_lo = bi_old->bi_diff_lo; + bi.bi_diff_hi = bi_old->bi_diff_hi; + bi.bi_hash = bi_old->bi_hash; + bi.bi_cum_rct = bi_old->bi_cum_rct; + bi.bi_long_term_block_weight = bi_old->bi_long_term_block_weight; + bi.bi_n_leaf_tuples = this->get_num_leaf_tuples(); + bi.bi_tree_root = this->get_tree_root(); + + LOGIF(el::Level::Info) + { + if ((bi.bi_height % 1000) == 0) + { + const std::string tree_root = epee::string_tools::pod_to_hex(bi.bi_tree_root); + MINFO("Height: " << i << ", block: " << bi.bi_hash << ", tree root: " << tree_root << ", leaves: " << bi.bi_n_leaf_tuples); + } + } + + MDB_val_set(nv, bi); + result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to put a record into block_infn: ", result).c_str())); + + /* we delete the old records immediately, so the overall DB and mapsize should not be + * larger than it needs to be. + * This is a little slower than just letting mdb_drop() delete it all at the end, but + * it saves a significant amount of disk space. + */ + result = mdb_cursor_del(c_old_block_info, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete a record from block_info: ", result).c_str())); + + ++i; + } + batch_stop(); + + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + /* Delete the old table */ + result = mdb_drop(txn, o_block_info, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete old block_info table: ", result).c_str())); + + MDB_cursor *c_cur; + result = mdb_cursor_open(txn, m_block_info, &c_cur); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for block_infn: ", result).c_str())); + RENAME_DB("block_infn"); + mdb_dbi_close(m_env, m_block_info); + + lmdb_db_open(txn, "block_info", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_block_info, "Failed to open db handle for block_infn"); + mdb_set_dupsort(txn, m_block_info, compare_uint64); + + txn.commit(); + } + } while(0); + + // Update db version + uint32_t version = 6; + v.mv_data = (void *)&version; + v.mv_size = sizeof(version); + MDB_val_str(vk, "version"); + result = mdb_txn_begin(m_env, NULL, 0, txn); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); + result = mdb_put(txn, m_properties, &vk, &v, 0); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to update version for the db: ", result).c_str())); + + // Drop the old spent keys table. We keep it until here so we know if the key image migration is complete. + result = mdb_drop(txn, o_spent_keys, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to delete old spent_keys table: ", result).c_str())); + + // Rename the spent keyr table to the new spent keys table + MDB_cursor *c_cur; + result = mdb_cursor_open(txn, m_spent_keys, &c_cur); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to open a cursor for spent_keyr: ", result).c_str())); + RENAME_DB("spent_keyr"); + mdb_dbi_close(m_env, m_spent_keys); + + lmdb_db_open(txn, "spent_keys", MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); + mdb_set_dupsort(txn, m_spent_keys, compare_hash32); + + // We only needed the temp last output table for this migration, drop it + result = mdb_drop(txn, m_tmp_last_output, 1); + if (result) + throw0(DB_ERROR(lmdb_error("Failed to drop temp last output table: ", result).c_str())); + + txn.commit(); +} + void BlockchainLMDB::migrate(const uint32_t oldversion) { if (oldversion < 1) @@ -5689,6 +7312,8 @@ void BlockchainLMDB::migrate(const uint32_t oldversion) migrate_3_4(); if (oldversion < 5) migrate_4_5(); + if (oldversion < 6) + migrate_5_6(); } } // namespace cryptonote diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index 6eeb942dc2..b8d087412a 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -30,6 +30,7 @@ #include "blockchain_db/blockchain_db.h" #include "cryptonote_basic/blobdatatype.h" // for type blobdata +#include "fcmp_pp/curve_trees.h" #include "ringct/rctTypes.h" #include @@ -64,6 +65,10 @@ typedef struct mdb_txn_cursors MDB_cursor *m_txc_spent_keys; + MDB_cursor *m_txc_locked_outputs; + MDB_cursor *m_txc_leaves; + MDB_cursor *m_txc_layers; + MDB_cursor *m_txc_txpool_meta; MDB_cursor *m_txc_txpool_blob; @@ -87,6 +92,9 @@ typedef struct mdb_txn_cursors #define m_cur_tx_indices m_cursors->m_txc_tx_indices #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_spent_keys m_cursors->m_txc_spent_keys +#define m_cur_locked_outputs m_cursors->m_txc_locked_outputs +#define m_cur_leaves m_cursors->m_txc_leaves +#define m_cur_layers m_cursors->m_txc_layers #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta #define m_cur_txpool_blob m_cursors->m_txc_txpool_blob #define m_cur_alt_blocks m_cursors->m_txc_alt_blocks @@ -109,6 +117,9 @@ typedef struct mdb_rflags bool m_rf_tx_indices; bool m_rf_tx_outputs; bool m_rf_spent_keys; + bool m_rf_locked_outputs; + bool m_rf_leaves; + bool m_rf_layers; bool m_rf_txpool_meta; bool m_rf_txpool_blob; bool m_rf_alt_blocks; @@ -183,7 +194,7 @@ struct mdb_txn_safe class BlockchainLMDB : public BlockchainDB { public: - BlockchainLMDB(bool batch_transactions=true); + BlockchainLMDB(bool batch_transactions=true, std::shared_ptr curve_trees = fcmp_pp::curve_trees::curve_trees_v1()); ~BlockchainLMDB(); virtual void open(const std::string& filename, const int mdb_flags=0); @@ -303,7 +314,7 @@ class BlockchainLMDB : public BlockchainDB virtual bool for_all_txpool_txes(std::function f, bool include_blob = false, relay_category category = relay_category::broadcasted) const; - virtual bool for_all_key_images(std::function) const; + virtual bool for_all_key_images(std::function) const; virtual bool for_blocks_range(const uint64_t& h1, const uint64_t& h2, std::function) const; virtual bool for_all_transactions(std::function, bool pruned) const; virtual bool for_all_outputs(std::function f) const; @@ -356,6 +367,13 @@ class BlockchainLMDB : public BlockchainDB static int compare_hash32(const MDB_val *a, const MDB_val *b); static int compare_string(const MDB_val *a, const MDB_val *b); + // make private + virtual void grow_tree(std::vector &&new_outputs); + + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id); + + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const; + private: void do_resize(uint64_t size_increase=0); @@ -370,6 +388,7 @@ class BlockchainLMDB : public BlockchainDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& block_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ); virtual void remove_block(); @@ -378,7 +397,7 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx); - virtual uint64_t add_output(const crypto::hash& tx_hash, + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, @@ -399,6 +418,41 @@ class BlockchainLMDB : public BlockchainDB virtual void remove_spent_key(const crypto::key_image& k_image); + template + void grow_layer(const std::unique_ptr &curve, + const std::vector> &layer_extensions, + const uint64_t c_idx, + const uint64_t layer_idx); + + template + void trim_layer(const std::unique_ptr &curve, + const fcmp_pp::curve_trees::LayerReduction &layer_reduction, + const uint64_t layer_idx); + + virtual uint64_t get_num_leaf_tuples() const; + + uint64_t get_top_block_n_leaf_tuples() const; + + virtual std::array get_tree_root() const; + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes get_tree_last_hashes() const; + + fcmp_pp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim get_last_chunk_children_to_trim( + const std::vector &trim_instructions) const; + + fcmp_pp::curve_trees::CurveTreesV1::LastHashes get_last_hashes_to_trim( + const std::vector &trim_instructions) const; + + template + bool audit_layer(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const uint64_t child_layer_idx, + const uint64_t chunk_width) const; + + std::vector get_outs_at_unlock_block_id(uint64_t block_id); + + void del_locked_outs_at_block_id(uint64_t block_id); + uint64_t num_outputs() const; // Hard fork @@ -441,6 +495,9 @@ class BlockchainLMDB : public BlockchainDB // migrate from DB version 4 to 5 void migrate_4_5(); + // migrate from DB version 5 to 6 + void migrate_5_6(); + void cleanup_batch(); private: @@ -463,6 +520,10 @@ class BlockchainLMDB : public BlockchainDB MDB_dbi m_spent_keys; + MDB_dbi m_locked_outputs; + MDB_dbi m_leaves; + MDB_dbi m_layers; + MDB_dbi m_txpool_meta; MDB_dbi m_txpool_blob; diff --git a/src/blockchain_db/testdb.h b/src/blockchain_db/testdb.h index 308bdd4c24..84d1d3a801 100644 --- a/src/blockchain_db/testdb.h +++ b/src/blockchain_db/testdb.h @@ -112,12 +112,17 @@ class BaseTestDB: public cryptonote::BlockchainDB { virtual void remove_block() override { } virtual uint64_t add_transaction_data(const crypto::hash& blk_hash, const std::pair& tx, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) override {return 0;} virtual void remove_transaction_data(const crypto::hash& tx_hash, const cryptonote::transaction& tx) override {} - virtual uint64_t add_output(const crypto::hash& tx_hash, const cryptonote::tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) override {return 0;} + virtual output_indexes_t add_output(const crypto::hash& tx_hash, const cryptonote::tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) override {return {0, 0};} virtual void add_tx_amount_output_indices(const uint64_t tx_index, const std::vector& amount_output_indices) override {} virtual void add_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {} + virtual void grow_tree(std::vector &&new_outputs) override {}; + virtual void trim_tree(const uint64_t trim_n_leaf_tuples, const uint64_t trim_block_id) override {}; + virtual bool audit_tree(const uint64_t expected_n_leaf_tuples) const override { return false; }; + virtual std::array get_tree_root() const override { return {}; }; + virtual uint64_t get_num_leaf_tuples() const override { return 0; }; - virtual bool for_all_key_images(std::function) const override { return true; } + virtual bool for_all_key_images(std::function) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function) const override { return true; } virtual bool for_all_transactions(std::function, bool pruned) const override { return true; } virtual bool for_all_outputs(std::function f) const override { return true; } @@ -144,6 +149,7 @@ class BaseTestDB: public cryptonote::BlockchainDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} diff --git a/src/crypto/crypto-ops-data.c b/src/crypto/crypto-ops-data.c index edaa4644fd..57304c41da 100644 --- a/src/crypto/crypto-ops-data.c +++ b/src/crypto/crypto-ops-data.c @@ -870,6 +870,7 @@ const fe fe_fffb1 = {-31702527, -2466483, -26106795, -12203692, -12169197, -3210 const fe fe_fffb2 = {8166131, -6741800, -17040804, 3154616, 21461005, 1466302, -30876704, -6368709, 10503587, -13363080}; /* sqrt(2 * A * (A + 2)) */ const fe fe_fffb3 = {-13620103, 14639558, 4532995, 7679154, 16815101, -15883539, -22863840, -14813421, 13716513, -6477756}; /* sqrt(-sqrt(-1) * A * (A + 2)) */ const fe fe_fffb4 = {-21786234, -12173074, 21573800, 4524538, -4645904, 16204591, 8012863, -8444712, 3212926, 6885324}; /* sqrt(sqrt(-1) * A * (A + 2)) */ +const fe fe_a_inv_3 = {-22207407, 11184811, 22369621, -11184811, -22369621, 11184811, 22369621, -11184811, -22369621, 11184811}; /* A / 3*/ const ge_p3 ge_p3_identity = { {0}, {1, 0}, {1, 0}, {0} }; const ge_p3 ge_p3_H = { {7329926, -15101362, 31411471, 7614783, 27996851, -3197071, -11157635, -6878293, 466949, -7986503}, diff --git a/src/crypto/crypto-ops.c b/src/crypto/crypto-ops.c index 314fe448a2..9dd9ff7ddf 100644 --- a/src/crypto/crypto-ops.c +++ b/src/crypto/crypto-ops.c @@ -30,6 +30,8 @@ #include #include +#include +#include #include "warnings.h" #include "crypto-ops.h" @@ -90,7 +92,7 @@ void fe_0(fe h) { h = 1 */ -static void fe_1(fe h) { +void fe_1(fe h) { h[0] = 1; h[1] = 0; h[2] = 0; @@ -313,6 +315,39 @@ void fe_invert(fe out, const fe z) { return; } +// Montgomery's trick +// https://iacr.org/archive/pkc2004/29470042/29470042.pdf 2.2 +int fe_batch_invert(fe *out, const fe *in, const int n) { + if (n == 0) { + return 0; + } + + // Step 1: collect initial muls + fe *init_muls = (fe *) malloc(n * sizeof(fe)); + if (!init_muls) { + return 1; + } + memcpy(&init_muls[0], &in[0], sizeof(fe)); + for (int i = 1; i < n; ++i) { + fe_mul(init_muls[i], init_muls[i-1], in[i]); + } + + // Step 2: get the inverse of all elems multiplied together + fe a; + fe_invert(a, init_muls[n-1]); + + // Step 3: get each inverse + for (int i = n; i > 1; --i) { + fe_mul(out[i-1], a, init_muls[i-2]); + fe_mul(a, a, in[i-1]); + } + memcpy(&out[0], &a, sizeof(fe)); + + free(init_muls); + + return 0; +} + /* From fe_isnegative.c */ /* @@ -958,7 +993,7 @@ Can overlap h with f or g. |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static void fe_sub(fe h, const fe f, const fe g) { +void fe_sub(fe h, const fe f, const fe g) { int32_t f0 = f[0]; int32_t f1 = f[1]; int32_t f2 = f[2]; @@ -1328,16 +1363,9 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *r3, const unsigned char *a, con } } -/* From ge_frombytes.c, modified */ - -int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { - fe u; - fe v; - fe vxx; - fe check; - - /* From fe_frombytes.c */ +/* From fe_frombytes.c */ +int fe_frombytes_vartime(fe y, const unsigned char *s) { int64_t h0 = load_4(s); int64_t h1 = load_3(s + 4) << 6; int64_t h2 = load_3(s + 7) << 5; @@ -1378,18 +1406,31 @@ int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { carry6 = (h6 + (int64_t) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26; carry8 = (h8 + (int64_t) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26; - h->Y[0] = h0; - h->Y[1] = h1; - h->Y[2] = h2; - h->Y[3] = h3; - h->Y[4] = h4; - h->Y[5] = h5; - h->Y[6] = h6; - h->Y[7] = h7; - h->Y[8] = h8; - h->Y[9] = h9; + y[0] = h0; + y[1] = h1; + y[2] = h2; + y[3] = h3; + y[4] = h4; + y[5] = h5; + y[6] = h6; + y[7] = h7; + y[8] = h8; + y[9] = h9; - /* End fe_frombytes.c */ + return 0; +} + +/* From ge_frombytes.c, modified */ + +int ge_frombytes_vartime(ge_p3 *h, const unsigned char *s) { + fe u; + fe v; + fe vxx; + fe check; + + if (fe_frombytes_vartime(h->Y, s) != 0) { + return -1; + } fe_1(h->Z); fe_sq(u, h->Y); @@ -1606,7 +1647,7 @@ static void ge_precomp_cmov(ge_precomp *t, const ge_precomp *u, unsigned char b) fe_cmov(t->xy2d, u->xy2d, b); } -static void select(ge_precomp *t, int pos, signed char b) { +static void _select(ge_precomp *t, int pos, signed char b) { ge_precomp minust; unsigned char bnegative = negative(b); unsigned char babs = b - (((-bnegative) & b) << 1); @@ -1662,7 +1703,7 @@ void ge_scalarmult_base(ge_p3 *h, const unsigned char *a) { ge_p3_0(h); for (i = 1; i < 64; i += 2) { - select(&t, i / 2, e[i]); + _select(&t, i / 2, e[i]); ge_madd(&r, h, &t); ge_p1p1_to_p3(h, &r); } @@ -1672,7 +1713,7 @@ void ge_scalarmult_base(ge_p3 *h, const unsigned char *a) { ge_p2_dbl(&r, &s); ge_p1p1_to_p3(h, &r); for (i = 0; i < 64; i += 2) { - select(&t, i / 2, e[i]); + _select(&t, i / 2, e[i]); ge_madd(&r, h, &t); ge_p1p1_to_p3(h, &r); } } @@ -3877,3 +3918,16 @@ int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p) { // Y/Z = 0/0 return 0; } + +// https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 +void fe_ed_y_derivatives_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y) +{ + // (1/(1-y))*(1+y) + fe inv_one_minus_y_mul_one_plus_y; + fe_mul(inv_one_minus_y_mul_one_plus_y, inv_one_minus_y, one_plus_y); + + // wei x = (1/(1-y))*(1+y) + (A/3) + fe wei_x_fe; + fe_add(wei_x_fe, inv_one_minus_y_mul_one_plus_y, fe_a_inv_3); + fe_tobytes(wei_x, wei_x_fe); +} diff --git a/src/crypto/crypto-ops.h b/src/crypto/crypto-ops.h index c103f1f789..b5976c7621 100644 --- a/src/crypto/crypto-ops.h +++ b/src/crypto/crypto-ops.h @@ -88,6 +88,7 @@ void ge_double_scalarmult_base_vartime_p3(ge_p3 *, const unsigned char *, const extern const fe fe_sqrtm1; extern const fe fe_d; +int fe_frombytes_vartime(fe, const unsigned char *); int ge_frombytes_vartime(ge_p3 *, const unsigned char *); /* From ge_p1p1_to_p2.c */ @@ -143,6 +144,7 @@ extern const fe fe_fffb1; extern const fe fe_fffb2; extern const fe fe_fffb3; extern const fe fe_fffb4; +extern const fe fe_a_inv_3; extern const ge_p3 ge_p3_identity; extern const ge_p3 ge_p3_H; void ge_fromfe_frombytes_vartime(ge_p2 *, const unsigned char *); @@ -163,7 +165,12 @@ void ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); void fe_add(fe h, const fe f, const fe g); void fe_tobytes(unsigned char *, const fe); void fe_invert(fe out, const fe z); +int fe_batch_invert(fe *out, const fe *in, const int n); void fe_mul(fe out, const fe, const fe); +void fe_sub(fe h, const fe f, const fe g); void fe_0(fe h); +void fe_1(fe h); int ge_p3_is_point_at_infinity_vartime(const ge_p3 *p); + +void fe_ed_y_derivatives_to_wei_x(unsigned char *wei_x, const fe inv_one_minus_y, const fe one_plus_y); diff --git a/src/crypto/crypto.cpp b/src/crypto/crypto.cpp index f6c94fa039..2145b06a60 100644 --- a/src/crypto/crypto.cpp +++ b/src/crypto/crypto.cpp @@ -618,6 +618,12 @@ namespace crypto { ge_p1p1_to_p3(&res, &point2); } + void crypto_ops::derive_key_image_generator(const public_key &pub, ec_point &ki_gen) { + ge_p3 point; + hash_to_ec(pub, point); + ge_p3_tobytes(&ki_gen, &point); + } + void crypto_ops::generate_key_image(const public_key &pub, const secret_key &sec, key_image &image) { ge_p3 point; ge_p2 point2; @@ -773,4 +779,21 @@ POP_WARNINGS static_assert(sizeof(crypto::view_tag) <= sizeof(view_tag_full), "view tag should not be larger than hash result"); memcpy(&view_tag, &view_tag_full, sizeof(crypto::view_tag)); } + + bool crypto_ops::key_image_to_y(const key_image &ki, key_image_y &ki_y) { + static_assert(sizeof(key_image) == 32 && sizeof(key_image_y) == 32, "unexpected size of key image"); + memcpy(&ki_y, &ki, 32); + // clear the sign bit, leaving us with the y coord + ki_y.data[31] &= 0x7F; + // return true if sign bit is set on the original key image + return (ki.data[31] & 0x80) > 0; + } + + void crypto_ops::key_image_from_y(const key_image_y &ki_y, const bool sign, key_image &ki) { + static_assert(sizeof(key_image) == 32 && sizeof(key_image_y) == 32, "unexpected size of key image"); + memcpy(&ki, &ki_y, 32); + if (sign) { + ki.data[31] ^= 0x80; + } + } } diff --git a/src/crypto/crypto.h b/src/crypto/crypto.h index 6b4126246d..86e7a97ffd 100644 --- a/src/crypto/crypto.h +++ b/src/crypto/crypto.h @@ -95,6 +95,10 @@ namespace crypto { friend class crypto_ops; }; + POD_CLASS key_image_y: ec_point { + friend class crypto_ops; + }; + POD_CLASS signature { ec_scalar c, r; friend class crypto_ops; @@ -110,7 +114,7 @@ namespace crypto { static_assert(sizeof(ec_point) == 32 && sizeof(ec_scalar) == 32 && sizeof(public_key) == 32 && sizeof(public_key_memsafe) == 32 && sizeof(secret_key) == 32 && - sizeof(key_derivation) == 32 && sizeof(key_image) == 32 && + sizeof(key_derivation) == 32 && sizeof(key_image) == 32 && sizeof(key_image_y) == 32 && sizeof(signature) == 64 && sizeof(view_tag) == 1, "Invalid structure size"); class crypto_ops { @@ -145,6 +149,8 @@ namespace crypto { friend void generate_tx_proof_v1(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const secret_key &, signature &); static bool check_tx_proof(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const signature &, const int); friend bool check_tx_proof(const hash &, const public_key &, const public_key &, const boost::optional &, const public_key &, const signature &, const int); + static void derive_key_image_generator(const public_key &, ec_point &); + friend void derive_key_image_generator(const public_key &, ec_point &); static void generate_key_image(const public_key &, const secret_key &, key_image &); friend void generate_key_image(const public_key &, const secret_key &, key_image &); static void generate_ring_signature(const hash &, const key_image &, @@ -157,6 +163,10 @@ namespace crypto { const public_key *const *, std::size_t, const signature *); static void derive_view_tag(const key_derivation &, std::size_t, view_tag &); friend void derive_view_tag(const key_derivation &, std::size_t, view_tag &); + static bool key_image_to_y(const key_image &, key_image_y &); + friend bool key_image_to_y(const key_image &, key_image_y &); + static void key_image_from_y(const key_image_y &, const bool, key_image &); + friend void key_image_from_y(const key_image_y &, const bool, key_image &); }; void generate_random_bytes_thread_safe(size_t N, uint8_t *bytes); @@ -268,6 +278,10 @@ namespace crypto { return crypto_ops::check_tx_proof(prefix_hash, R, A, B, D, sig, version); } + inline void derive_key_image_generator(const public_key &pub, ec_point &ki_gen) { + crypto_ops::derive_key_image_generator(pub, ki_gen); + } + /* To send money to a key: * * The sender generates an ephemeral key and includes it in transaction output. * * To spend the money, the receiver generates a key image from it. @@ -311,6 +325,21 @@ namespace crypto { crypto_ops::derive_view_tag(derivation, output_index, vt); } + /** Clear the sign bit on the key image (i.e. get just the y coordinate). + * Return true if the sign bit is set, false if not. + * Since fcmp's allow construction of key images with sign bit cleared, while + * the same key image with sign bit set may already exist in the chain, we + * prevent double spends by converting all existing key images in the chain to + * their y coordinate and preventing duplicate key image y's. + */ + inline bool key_image_to_y(const key_image &ki, key_image_y &ki_y) { + return crypto_ops::key_image_to_y(ki, ki_y); + } + + inline void key_image_from_y(const key_image_y &ki_y, const bool sign, key_image &ki) { + return crypto_ops::key_image_from_y(ki_y, sign, ki); + } + inline std::ostream &operator <<(std::ostream &o, const crypto::public_key &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } @@ -323,6 +352,9 @@ namespace crypto { inline std::ostream &operator <<(std::ostream &o, const crypto::key_image &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } + inline std::ostream &operator <<(std::ostream &o, const crypto::key_image_y &v) { + epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; + } inline std::ostream &operator <<(std::ostream &o, const crypto::signature &v) { epee::to_hex::formatted(o, epee::as_byte_span(v)); return o; } @@ -337,6 +369,8 @@ namespace crypto { inline bool operator>(const public_key &p1, const public_key &p2) { return p2 < p1; } inline bool operator<(const key_image &p1, const key_image &p2) { return memcmp(&p1, &p2, sizeof(key_image)) < 0; } inline bool operator>(const key_image &p1, const key_image &p2) { return p2 < p1; } + inline bool operator<(const key_image_y &p1, const key_image_y &p2) { return memcmp(&p1, &p2, sizeof(key_image_y)) < 0; } + inline bool operator>(const key_image_y &p1, const key_image_y &p2) { return p2 < p1; } } // type conversions for easier calls to sc_add(), sc_sub(), hash functions @@ -349,5 +383,6 @@ CRYPTO_MAKE_HASHABLE(public_key) CRYPTO_MAKE_HASHABLE_CONSTANT_TIME(secret_key) CRYPTO_MAKE_HASHABLE_CONSTANT_TIME(public_key_memsafe) CRYPTO_MAKE_HASHABLE(key_image) +CRYPTO_MAKE_HASHABLE(key_image_y) CRYPTO_MAKE_COMPARABLE(signature) CRYPTO_MAKE_COMPARABLE(view_tag) diff --git a/src/cryptonote_basic/cryptonote_basic.h b/src/cryptonote_basic/cryptonote_basic.h index a50ae9c32d..9462476093 100644 --- a/src/cryptonote_basic/cryptonote_basic.h +++ b/src/cryptonote_basic/cryptonote_basic.h @@ -306,7 +306,8 @@ namespace cryptonote ar.tag("rctsig_prunable"); ar.begin_object(); r = rct_signatures.p.serialize_rctsig_prunable(ar, rct_signatures.type, vin.size(), vout.size(), - vin.size() > 0 && vin[0].type() == typeid(txin_to_key) ? boost::get(vin[0]).key_offsets.size() - 1 : 0); + (vin.empty() || vin[0].type() != typeid(txin_to_key) || rct_signatures.type == rct::RCTTypeFcmpPlusPlus) + ? 0 : boost::get(vin[0]).key_offsets.size() - 1); if (!r || !ar.good()) return false; ar.end_object(); } diff --git a/src/cryptonote_basic/cryptonote_boost_serialization.h b/src/cryptonote_basic/cryptonote_boost_serialization.h index 8948c650cd..cbdaf507bd 100644 --- a/src/cryptonote_basic/cryptonote_boost_serialization.h +++ b/src/cryptonote_basic/cryptonote_boost_serialization.h @@ -330,7 +330,7 @@ namespace boost a & x.type; if (x.type == rct::RCTTypeNull) return; - if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus) + if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus && x.type != rct::RCTTypeFcmpPlusPlus) throw boost::archive::archive_exception(boost::archive::archive_exception::other_exception, "Unsupported rct type"); // a & x.message; message is not serialized, as it can be reconstructed from the tx data // a & x.mixRing; mixRing is not serialized, as it can be reconstructed from the offsets @@ -339,6 +339,8 @@ namespace boost a & x.ecdhInfo; serializeOutPk(a, x.outPk, ver); a & x.txnFee; + if (x.type == rct::RCTTypeFcmpPlusPlus) + a & x.referenceBlock; } template @@ -354,6 +356,11 @@ namespace boost a & x.MGs; if (ver >= 1u) a & x.CLSAGs; + if (ver >= 3u) + { + a & x.curve_trees_tree_depth; + a & x.fcmp_pp; + } if (x.rangeSigs.empty()) a & x.pseudoOuts; } @@ -364,7 +371,7 @@ namespace boost a & x.type; if (x.type == rct::RCTTypeNull) return; - if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus) + if (x.type != rct::RCTTypeFull && x.type != rct::RCTTypeSimple && x.type != rct::RCTTypeBulletproof && x.type != rct::RCTTypeBulletproof2 && x.type != rct::RCTTypeCLSAG && x.type != rct::RCTTypeBulletproofPlus && x.type != rct::RCTTypeFcmpPlusPlus) throw boost::archive::archive_exception(boost::archive::archive_exception::other_exception, "Unsupported rct type"); // a & x.message; message is not serialized, as it can be reconstructed from the tx data // a & x.mixRing; mixRing is not serialized, as it can be reconstructed from the offsets @@ -373,6 +380,8 @@ namespace boost a & x.ecdhInfo; serializeOutPk(a, x.outPk, ver); a & x.txnFee; + if (x.type == rct::RCTTypeFcmpPlusPlus) + a & x.referenceBlock; //-------------- a & x.p.rangeSigs; if (x.p.rangeSigs.empty()) @@ -384,7 +393,12 @@ namespace boost a & x.p.MGs; if (ver >= 1u) a & x.p.CLSAGs; - if (x.type == rct::RCTTypeBulletproof || x.type == rct::RCTTypeBulletproof2 || x.type == rct::RCTTypeCLSAG || x.type == rct::RCTTypeBulletproofPlus) + if (ver >= 3u) + { + a & x.p.curve_trees_tree_depth; + a & x.p.fcmp_pp; + } + if (x.type == rct::RCTTypeBulletproof || x.type == rct::RCTTypeBulletproof2 || x.type == rct::RCTTypeCLSAG || x.type == rct::RCTTypeBulletproofPlus || x.type == rct::RCTTypeFcmpPlusPlus) a & x.p.pseudoOuts; } @@ -425,6 +439,6 @@ namespace boost } } -BOOST_CLASS_VERSION(rct::rctSigPrunable, 2) -BOOST_CLASS_VERSION(rct::rctSig, 2) +BOOST_CLASS_VERSION(rct::rctSigPrunable, 3) +BOOST_CLASS_VERSION(rct::rctSig, 3) BOOST_CLASS_VERSION(rct::multisig_out, 1) diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index ca56c2bc34..094cd28a32 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -106,7 +106,7 @@ namespace cryptonote uint64_t get_transaction_weight_clawback(const transaction &tx, size_t n_padded_outputs) { const rct::rctSig &rv = tx.rct_signatures; - const bool plus = rv.type == rct::RCTTypeBulletproofPlus; + const bool plus = rv.type == rct::RCTTypeBulletproofPlus || rv.type == rct::RCTTypeFcmpPlusPlus; const uint64_t bp_base = (32 * ((plus ? 6 : 9) + 7 * 2)) / 2; // notional size of a 2 output proof, normalized to 1 proof (ie, divided by 2) const size_t n_outputs = tx.vout.size(); if (n_padded_outputs <= 2) @@ -484,6 +484,7 @@ namespace cryptonote weight += extra; // calculate deterministic CLSAG/MLSAG data size + // TODO: update for fcmp_pp const size_t ring_size = boost::get(tx.vin[0]).key_offsets.size(); if (rct::is_rct_clsag(tx.rct_signatures.type)) extra = tx.vin.size() * (ring_size + 2) * 32; @@ -1292,7 +1293,8 @@ namespace cryptonote binary_archive ba(ss); const size_t inputs = t.vin.size(); const size_t outputs = t.vout.size(); - const size_t mixin = t.vin.empty() ? 0 : t.vin[0].type() == typeid(txin_to_key) ? boost::get(t.vin[0]).key_offsets.size() - 1 : 0; + const size_t mixin = (t.vin.empty() || t.rct_signatures.type == rct::RCTTypeFcmpPlusPlus || t.vin[0].type() != typeid(txin_to_key)) + ? 0 : boost::get(t.vin[0]).key_offsets.size() - 1; bool r = tt.rct_signatures.p.serialize_rctsig_prunable(ba, t.rct_signatures.type, inputs, outputs, mixin); CHECK_AND_ASSERT_MES(r, false, "Failed to serialize rct signatures prunable"); cryptonote::get_blob_hash(ss.str(), res); @@ -1644,4 +1646,62 @@ namespace cryptonote sc_sub((unsigned char*)key.data, (const unsigned char*)key.data, (const unsigned char*)hash.data); return key; } + //--------------------------------------------------------------- + // TODO: write tests for this func that match with current daemon logic + uint64_t get_unlock_block_index(uint64_t unlock_time, uint64_t block_included_in_chain) + { + uint64_t unlock_block_index = 0; + + static_assert(CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE > 0, "unexpected default spendable age"); + const uint64_t default_block_index = block_included_in_chain + (CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE - 1); + + if (unlock_time == 0) + { + unlock_block_index = default_block_index; + } + else if (unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) + { + // The unlock_time in this case is supposed to be the chain height at which the output unlocks + // The chain height is 1 higher than the highest block index, so we subtract 1 for this delta + unlock_block_index = unlock_time > 0 ? (unlock_time - 1) : 0; + } + else + { + // Interpret the unlock_time as time + // TODO: hardcode correct times for each network and take in nettype + const auto hf_v15_time = 1656629118; + const auto hf_v15_height = 2689608; + + // Use the last hard fork's time and block combo to convert the time-based timelock into an unlock block + // TODO: consider taking into account 60s block times when that was consensus + if (hf_v15_time > unlock_time) + { + const auto seconds_since_unlock = hf_v15_time - unlock_time; + const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2; + + unlock_block_index = hf_v15_height > blocks_since_unlock + ? (hf_v15_height - blocks_since_unlock) + : default_block_index; + } + else + { + const auto seconds_until_unlock = unlock_time - hf_v15_time; + const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2; + unlock_block_index = hf_v15_height + blocks_until_unlock; + } + + /* Note: since this function was introduced for the hf that included fcmp's, it's possible for an output to be + spent before it reaches the unlock_block_index going by the old rules; this is ok. It can't be spent again b/c + it'll have a duplicate key image. It's also possible for an output to unlock by old rules, and then re-lock + again at the fork. This is also ok, we just need to be sure that the new hf rules use this unlock_block_index + starting at the fork for fcmp's. + */ + + // TODO: double check the accuracy of this calculation + MDEBUG("unlock time: " << unlock_time << " , unlock_block_index: " << unlock_block_index); + } + + // Can't unlock earlier than the default unlock block + return std::max(unlock_block_index, default_block_index); + } } diff --git a/src/cryptonote_basic/cryptonote_format_utils.h b/src/cryptonote_basic/cryptonote_format_utils.h index fc7dfcd859..e3a4644030 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.h +++ b/src/cryptonote_basic/cryptonote_format_utils.h @@ -37,6 +37,7 @@ #include "include_base_utils.h" #include "crypto/crypto.h" #include "crypto/hash.h" +#include "fcmp_pp/curve_trees.h" #include #include @@ -265,6 +266,10 @@ namespace cryptonote crypto::secret_key encrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); crypto::secret_key decrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); + + // Returns the block index in which the provided unlock_time unlocks + uint64_t get_unlock_block_index(uint64_t unlock_time, uint64_t block_included_in_chain); + #define CHECKED_GET_SPECIFIC_VARIANT(variant_var, specific_type, variable_name, fail_return_val) \ CHECK_AND_ASSERT_MES(variant_var.type() == typeid(specific_type), fail_return_val, "wrong variant type: " << variant_var.type().name() << ", expected " << typeid(specific_type).name()); \ specific_type& variable_name = boost::get(variant_var); diff --git a/src/cryptonote_core/blockchain.cpp b/src/cryptonote_core/blockchain.cpp index 8d34f0e858..45a4b54141 100644 --- a/src/cryptonote_core/blockchain.cpp +++ b/src/cryptonote_core/blockchain.cpp @@ -156,7 +156,9 @@ bool Blockchain::scan_outputkeys_for_indexes(size_t tx_version, const txin_to_ke auto it = m_scan_table.find(tx_prefix_hash); if (it != m_scan_table.end()) { - auto its = it->second.find(tx_in_to_key.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(tx_in_to_key.k_image, ki_y); + auto its = it->second.find(ki_y); if (its != it->second.end()) { outputs = its->second; @@ -2909,7 +2911,9 @@ bool Blockchain::check_for_double_spend(const transaction& tx, key_images_contai // if the insert into the block-wide spent keys container succeeds, // check the blockchain-wide spent keys container and make sure the // key wasn't used in another block already. - auto r = m_spent_keys.insert(ki); + crypto::key_image_y ki_y; + crypto::key_image_to_y(ki, ki_y); + auto r = m_spent_keys.insert(ki_y); if(!r.second || m_db->has_key_image(ki)) { //double spend detected @@ -5150,7 +5154,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector>()); + m_scan_table.emplace(tx_prefix_hash, std::unordered_map>()); its = m_scan_table.find(tx_prefix_hash); assert(its != m_scan_table.end()); @@ -5160,7 +5164,9 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector (txin); // check for duplicate - auto it = its->second.find(in_to_key.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(in_to_key.k_image, ki_y); + auto it = its->second.find(ki_y); if (it != its->second.end()) SCAN_TABLE_QUIT("Duplicate key_image found from incoming blocks."); @@ -5277,7 +5283,9 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vectorsecond.emplace(in_to_key.k_image, outputs); + crypto::key_image_y ki_y; + crypto::key_image_to_y(in_to_key.k_image, ki_y); + its->second.emplace(ki_y, outputs); } } } @@ -5576,7 +5584,7 @@ void Blockchain::unlock() m_blockchain_lock.unlock(); } -bool Blockchain::for_all_key_images(std::function f) const +bool Blockchain::for_all_key_images(std::function f) const { return m_db->for_all_key_images(f); } diff --git a/src/cryptonote_core/blockchain.h b/src/cryptonote_core/blockchain.h index 503ea92ddf..1a4e08ef48 100644 --- a/src/cryptonote_core/blockchain.h +++ b/src/cryptonote_core/blockchain.h @@ -955,7 +955,7 @@ namespace cryptonote * * @return false if any key image fails the check, otherwise true */ - bool for_all_key_images(std::function) const; + bool for_all_key_images(std::function) const; /** * @brief perform a check on all blocks in the blockchain in the given range @@ -1125,7 +1125,7 @@ namespace cryptonote #endif // TODO: evaluate whether or not each of these typedefs are left over from blockchain_storage - typedef std::unordered_set key_images_container; + typedef std::unordered_set key_images_container; typedef std::vector blocks_container; @@ -1143,7 +1143,7 @@ namespace cryptonote size_t m_current_block_cumul_weight_median; // metadata containers - std::unordered_map>> m_scan_table; + std::unordered_map>> m_scan_table; std::unordered_map m_blocks_longhash_table; // Keccak hashes for each block and for fast pow checking diff --git a/src/cryptonote_core/cryptonote_core.cpp b/src/cryptonote_core/cryptonote_core.cpp index 08c72573b6..03c6cefdb7 100644 --- a/src/cryptonote_core/cryptonote_core.cpp +++ b/src/cryptonote_core/cryptonote_core.cpp @@ -1292,11 +1292,13 @@ namespace cryptonote //----------------------------------------------------------------------------------------------- bool core::check_tx_inputs_keyimages_diff(const transaction& tx) const { - std::unordered_set ki; + std::unordered_set ki; for(const auto& in: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, tokey_in, false); - if(!ki.insert(tokey_in.k_image).second) + crypto::key_image_y ki_y; + crypto::key_image_to_y(tokey_in.k_image, ki_y); + if(!ki.insert(ki_y).second) return false; } return true; diff --git a/src/cryptonote_core/tx_pool.cpp b/src/cryptonote_core/tx_pool.cpp index 2d01b2bb28..fdbefcfadd 100644 --- a/src/cryptonote_core/tx_pool.cpp +++ b/src/cryptonote_core/tx_pool.cpp @@ -523,7 +523,9 @@ namespace cryptonote for(const auto& in: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, txin, false); - std::unordered_set& kei_image_set = m_spent_key_images[txin.k_image]; + crypto::key_image_y ki_y; + const bool ki_sign = crypto::key_image_to_y(txin.k_image, ki_y); + auto& kei_image_set = m_spent_key_images[ki_y]; // Only allow multiple txes per key-image if kept-by-block. Only allow // the same txid if going from local/stem->fluff. @@ -531,14 +533,14 @@ namespace cryptonote if (tx_relay != relay_method::block) { const bool one_txid = - (kei_image_set.empty() || (kei_image_set.size() == 1 && *(kei_image_set.cbegin()) == id)); + (kei_image_set.empty() || (kei_image_set.size() == 1 && (*(kei_image_set.cbegin())).tx_hash == id)); CHECK_AND_ASSERT_MES(one_txid, false, "internal error: tx_relay=" << unsigned(tx_relay) << ", kei_image_set.size()=" << kei_image_set.size() << ENDL << "txin.k_image=" << txin.k_image << ENDL << "tx_id=" << id); } const bool new_or_previously_private = - kei_image_set.insert(id).second || + kei_image_set.insert({id, ki_sign}).second || !m_blockchain.txpool_tx_matches_category(id, relay_category::legacy); CHECK_AND_ASSERT_MES(new_or_previously_private, false, "internal error: try to insert duplicate iterator in key_image set"); } @@ -557,14 +559,16 @@ namespace cryptonote for(const txin_v& vi: tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(vi, const txin_to_key, txin, false); - auto it = m_spent_key_images.find(txin.k_image); + crypto::key_image_y ki_y; + const bool ki_sign = crypto::key_image_to_y(txin.k_image, ki_y); + auto it = m_spent_key_images.find(ki_y); CHECK_AND_ASSERT_MES(it != m_spent_key_images.end(), false, "failed to find transaction input in key images. img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); - std::unordered_set& key_image_set = it->second; + auto& key_image_set = it->second; CHECK_AND_ASSERT_MES(key_image_set.size(), false, "empty key_image set, img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); - auto it_in_set = key_image_set.find(actual_hash); + auto it_in_set = key_image_set.find({actual_hash, ki_sign}); CHECK_AND_ASSERT_MES(it_in_set != key_image_set.end(), false, "transaction id not found in key_image set, img=" << txin.k_image << ENDL << "transaction id = " << actual_hash); key_image_set.erase(it_in_set); @@ -1099,7 +1103,7 @@ namespace cryptonote backlog.clear(); uint64_t w = 0; - std::unordered_set k_images; + std::unordered_set k_images; for (const tx_block_template_backlog_entry& e : tmp) { @@ -1262,19 +1266,40 @@ namespace cryptonote }, true, category); for (const key_images_container::value_type& kee : m_spent_key_images) { - const crypto::key_image& k_image = kee.first; - const std::unordered_set& kei_image_set = kee.second; - spent_key_image_info ki; - ki.id_hash = epee::string_tools::pod_to_hex(k_image); - for (const crypto::hash& tx_id_hash : kei_image_set) - { - if (m_blockchain.txpool_tx_matches_category(tx_id_hash, category)) - ki.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_id_hash)); + // id_hash corresponds to key image as the daemon received it, so we need + // to derive key image from key_image_y and sign bit to prevent a breaking + // change to clients. After the fcmp fork, all key images should have sign + // bit cleared so this can be cleaned up further. + const crypto::key_image_y& k_image_y = kee.first; + const auto& kei_image_set = kee.second; + spent_key_image_info ki_info_sign; + spent_key_image_info ki_info_no_sign; + for (const auto& ki_context : kei_image_set) + { + const crypto::hash &tx_hash = ki_context.tx_hash; + const bool sign = ki_context.sign; + if (m_blockchain.txpool_tx_matches_category(tx_hash, category)) + { + crypto::key_image ki; + crypto::key_image_from_y(k_image_y, sign, ki); + if (sign) + { + ki_info_sign.id_hash = epee::string_tools::pod_to_hex(ki); + ki_info_sign.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_hash)); + } + else + { + ki_info_no_sign.id_hash = epee::string_tools::pod_to_hex(ki); + ki_info_no_sign.txs_hashes.push_back(epee::string_tools::pod_to_hex(tx_hash)); + } + } } // Only return key images for which we have at least one tx that we can show for them - if (!ki.txs_hashes.empty()) - key_image_infos.push_back(std::move(ki)); + if (!ki_info_sign.txs_hashes.empty()) + key_image_infos.push_back(std::move(ki_info_sign)); + if (!ki_info_no_sign.txs_hashes.empty()) + key_image_infos.push_back(std::move(ki_info_no_sign)); } return true; } @@ -1314,11 +1339,11 @@ namespace cryptonote for (const key_images_container::value_type& kee : m_spent_key_images) { std::vector tx_hashes; - const std::unordered_set& kei_image_set = kee.second; - for (const crypto::hash& tx_id_hash : kei_image_set) + const auto& kei_image_set = kee.second; + for (const auto& ki_context : kei_image_set) { - if (m_blockchain.txpool_tx_matches_category(tx_id_hash, relay_category::broadcasted)) - tx_hashes.push_back(tx_id_hash); + if (m_blockchain.txpool_tx_matches_category(ki_context.tx_hash, relay_category::broadcasted)) + tx_hashes.push_back(ki_context.tx_hash); } if (!tx_hashes.empty()) @@ -1337,11 +1362,13 @@ namespace cryptonote for (const auto& image : key_images) { bool is_spent = false; - const auto found = m_spent_key_images.find(image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(image, ki_y); + const auto found = m_spent_key_images.find(ki_y); if (found != m_spent_key_images.end()) { - for (const crypto::hash& tx_hash : found->second) - is_spent |= m_blockchain.txpool_tx_matches_category(tx_hash, relay_category::broadcasted); + for (const auto& ki_context : found->second) + is_spent |= m_blockchain.txpool_tx_matches_category(ki_context.tx_hash, relay_category::broadcasted); } spent.push_back(is_spent); } @@ -1402,12 +1429,14 @@ namespace cryptonote bool tx_memory_pool::have_tx_keyimg_as_spent(const crypto::key_image& key_im, const crypto::hash& txid) const { CRITICAL_REGION_LOCAL(m_transactions_lock); - const auto found = m_spent_key_images.find(key_im); + crypto::key_image_y ki_y; + crypto::key_image_to_y(key_im, ki_y); + const auto found = m_spent_key_images.find(ki_y); if (found != m_spent_key_images.end() && !found->second.empty()) { // If another tx is using the key image, always return as spent. // See `insert_key_images`. - if (1 < found->second.size() || *(found->second.cbegin()) != txid) + if (1 < found->second.size() || (*(found->second.cbegin())).tx_hash != txid) return true; return m_blockchain.txpool_tx_matches_category(txid, relay_category::legacy); } @@ -1515,23 +1544,27 @@ namespace cryptonote return is_transaction_ready_to_go(txd, txid, cryptonote::blobdata_ref{txblob.data(), txblob.size()}, tx); } //--------------------------------------------------------------------------------- - bool tx_memory_pool::have_key_images(const std::unordered_set& k_images, const transaction_prefix& tx) + bool tx_memory_pool::have_key_images(const std::unordered_set& k_images, const transaction_prefix& tx) { for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, false); - if(k_images.count(itk.k_image)) + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + if(k_images.count(ki_y)) return true; } return false; } //--------------------------------------------------------------------------------- - bool tx_memory_pool::append_key_images(std::unordered_set& k_images, const transaction_prefix& tx) + bool tx_memory_pool::append_key_images(std::unordered_set& k_images, const transaction_prefix& tx) { for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, false); - auto i_res = k_images.insert(itk.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + auto i_res = k_images.insert(ki_y); CHECK_AND_ASSERT_MES(i_res.second, false, "internal error: key images pool cache - inserted duplicate image in set: " << itk.k_image); } return true; @@ -1546,11 +1579,14 @@ namespace cryptonote for(size_t i = 0; i!= tx.vin.size(); i++) { CHECKED_GET_SPECIFIC_VARIANT(tx.vin[i], const txin_to_key, itk, void()); - const key_images_container::const_iterator it = m_spent_key_images.find(itk.k_image); + crypto::key_image_y ki_y; + crypto::key_image_to_y(itk.k_image, ki_y); + const key_images_container::const_iterator it = m_spent_key_images.find(ki_y); if (it != m_spent_key_images.end()) { - for (const crypto::hash &txid: it->second) + for (const auto &ki_context: it->second) { + const auto &txid = ki_context.tx_hash; txpool_tx_meta_t meta; if (!m_blockchain.get_txpool_tx_meta(txid, meta)) { @@ -1634,7 +1670,7 @@ namespace cryptonote size_t max_total_weight_pre_v5 = (130 * median_weight) / 100 - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE; size_t max_total_weight_v5 = 2 * median_weight - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE; size_t max_total_weight = version >= 5 ? max_total_weight_v5 : max_total_weight_pre_v5; - std::unordered_set k_images; + std::unordered_set k_images; LOG_PRINT_L2("Filling block template, median weight " << median_weight << ", " << m_txs_by_fee_and_receive_time.size() << " txes in the pool"); diff --git a/src/cryptonote_core/tx_pool.h b/src/cryptonote_core/tx_pool.h index 69a123fc9e..86ae947f4c 100644 --- a/src/cryptonote_core/tx_pool.h +++ b/src/cryptonote_core/tx_pool.h @@ -52,6 +52,29 @@ #include "rpc/core_rpc_server_commands_defs.h" #include "rpc/message_data_structs.h" +namespace cryptonote +{ + //! key image's contextual data + struct ki_context_t + { + crypto::hash tx_hash; + bool sign; // original key image had sign bit set + bool operator==(const ki_context_t rhs) const { return rhs.tx_hash == tx_hash && rhs.sign == sign; }; + }; +}//cryptonote + +namespace std +{ + template<> struct hash + { + std::size_t operator()(const cryptonote::ki_context_t &_ki_context) const + { + const std::size_t h = reinterpret_cast(_ki_context.tx_hash); + return h + (_ki_context.sign ? 1 : 0); + } + }; +}//std + namespace cryptonote { class Blockchain; @@ -553,7 +576,7 @@ namespace cryptonote * * @return true if any key images present in the set, otherwise false */ - static bool have_key_images(const std::unordered_set& kic, const transaction_prefix& tx); + static bool have_key_images(const std::unordered_set& kic, const transaction_prefix& tx); /** * @brief append the key images from a transaction to the given set @@ -563,7 +586,7 @@ namespace cryptonote * * @return false if any append fails, otherwise true */ - static bool append_key_images(std::unordered_set& kic, const transaction_prefix& tx); + static bool append_key_images(std::unordered_set& kic, const transaction_prefix& tx); /** * @brief check if a transaction is a valid candidate for inclusion in a block @@ -602,8 +625,12 @@ namespace cryptonote * in the event of a reorg where someone creates a new/different * transaction on the assumption that the original will not be in a * block again. + *! we use key_image_y as the key since we need to prevent double spends of + * key image y coordinates (fcmp's enables constructing key images with + * sign bit cleared for key images which may already exist in the chain + * with sign bit set) */ - typedef std::unordered_map> key_images_container; + typedef std::unordered_map> key_images_container; #if defined(DEBUG_CREATE_BLOCK_TEMPLATE) public: diff --git a/src/fcmp_pp/CMakeLists.txt b/src/fcmp_pp/CMakeLists.txt new file mode 100644 index 0000000000..54ebad4a85 --- /dev/null +++ b/src/fcmp_pp/CMakeLists.txt @@ -0,0 +1,59 @@ +# Copyright (c) 2024, The Monero Project +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of +# conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be +# used to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +set(fcmp_pp_sources + curve_trees.cpp + fcmp_pp_crypto.cpp + tower_cycle.cpp) + +monero_find_all_headers(fcmp_pp_headers "${CMAKE_CURRENT_SOURCE_DIR}") + +add_subdirectory(fcmp_pp_rust) + +monero_add_library_with_deps( + NAME fcmp_pp + DEPENDS fcmp_pp_rust + SOURCES + ${fcmp_pp_sources} + ${fcmp_pp_headers}) + +if(WIN32) + set(EXTRA_RUST_LIBRARIES ws2_32 ntdll userenv) +else() + set(EXTRA_RUST_LIBRARIES ) +endif() + +target_link_libraries(fcmp_pp + PUBLIC + cncrypto + common + epee + PRIVATE + ${CMAKE_CURRENT_BINARY_DIR}/fcmp_pp_rust/libfcmp_pp_rust.a + ${EXTRA_LIBRARIES} + ${EXTRA_RUST_LIBRARIES}) diff --git a/src/fcmp_pp/curve_trees.cpp b/src/fcmp_pp/curve_trees.cpp new file mode 100644 index 0000000000..f4701d6f97 --- /dev/null +++ b/src/fcmp_pp/curve_trees.cpp @@ -0,0 +1,1239 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "curve_trees.h" + +#include "common/threadpool.h" +#include "ringct/rctOps.h" + +#include + +namespace fcmp_pp +{ +namespace curve_trees +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Instantiate the tower cycle types +template class CurveTrees; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Public helper functions +//---------------------------------------------------------------------------------------------------------------------- +template +typename C::Point get_new_parent(const std::unique_ptr &curve, const typename C::Chunk &new_children) +{ + return curve->hash_grow( + curve->hash_init_point(), + 0,/*offset*/ + curve->zero_scalar(), + new_children + ); +}; +template Helios::Point get_new_parent(const std::unique_ptr &curve, + const typename Helios::Chunk &new_children); +template Selene::Point get_new_parent(const std::unique_ptr &curve, + const typename Selene::Chunk &new_children); +//---------------------------------------------------------------------------------------------------------------------- +std::shared_ptr curve_trees_v1(const std::size_t helios_chunk_width, const std::size_t selene_chunk_width) +{ + std::unique_ptr helios(new Helios()); + std::unique_ptr selene(new Selene()); + return std::shared_ptr( + new CurveTreesV1( + std::move(helios), + std::move(selene), + helios_chunk_width, + selene_chunk_width + ) + ); +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Static functions +//---------------------------------------------------------------------------------------------------------------------- +// After hashing a layer of children points, convert those children x-coordinates into their respective cycle +// scalars, and prepare them to be hashed for the next layer +template +static std::vector next_child_scalars_from_children(const std::unique_ptr &c_child, + const typename C_CHILD::Point *last_root, + const LayerExtension &children) +{ + std::vector child_scalars_out; + child_scalars_out.reserve(1 + children.hashes.size()); + + // If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when + // hashing the *existing* root layer + if (last_root != nullptr) + { + // If the children don't already include the existing root, then we need to include it to be hashed + // - the children would include the existing root already if the existing root was updated in the child + // layer (the start_idx would be 0) + if (children.start_idx > 0) + { + MDEBUG("Updating root layer and including the existing root in next children"); + child_scalars_out.emplace_back(c_child->point_to_cycle_scalar(*last_root)); + } + } + + // Convert child points to scalars + tower_cycle::extend_scalars_from_cycle_points(c_child, children.hashes, child_scalars_out); + + return child_scalars_out; +}; +//---------------------------------------------------------------------------------------------------------------------- +template +static void hash_first_chunk(const std::unique_ptr &curve, + const typename C::Scalar *old_last_child, + const typename C::Point *old_last_parent, + const std::size_t start_offset, + const std::vector &new_child_scalars, + const std::size_t chunk_size, + typename C::Point &hash_out) +{ + // Prepare to hash + const auto &existing_hash = old_last_parent != nullptr + ? *old_last_parent + : curve->hash_init_point(); + + const auto &prior_child_after_offset = old_last_child != nullptr + ? *old_last_child + : curve->zero_scalar(); + + const auto chunk_start = new_child_scalars.data(); + const typename C::Chunk chunk{chunk_start, chunk_size}; + + MDEBUG("existing_hash: " << curve->to_string(existing_hash) << " , start_offset: " << start_offset + << " , prior_child_after_offset: " << curve->to_string(prior_child_after_offset)); + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing child in first chunk " << curve->to_string(chunk_start[i])); + + // Do the hash + auto chunk_hash = curve->hash_grow( + existing_hash, + start_offset, + prior_child_after_offset, + chunk + ); + + MDEBUG("Child chunk_start_idx " << 0 << " result: " << curve->to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + hash_out = std::move(chunk_hash); +} +//---------------------------------------------------------------------------------------------------------------------- +template +static void hash_next_chunk(const std::unique_ptr &curve, + const std::size_t chunk_start_idx, + const std::vector &new_child_scalars, + const std::size_t chunk_size, + typename C::Point &hash_out) +{ + const auto chunk_start = new_child_scalars.data() + chunk_start_idx; + const typename C::Chunk chunk{chunk_start, chunk_size}; + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " hashing child " << curve->to_string(chunk_start[i])); + + auto chunk_hash = get_new_parent(curve, chunk); + + MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve->to_string(chunk_hash) + << " , chunk_size: " << chunk_size); + + // We've got our hash + hash_out = std::move(chunk_hash); +} +//---------------------------------------------------------------------------------------------------------------------- +// Hash chunks of a layer of new children, outputting the next layer's parents +template +static LayerExtension hash_children_chunks(const std::unique_ptr &curve, + const typename C::Scalar *old_last_child, + const typename C::Point *old_last_parent, + const std::size_t start_offset, + const uint64_t next_parent_start_index, + const std::vector &new_child_scalars, + const std::size_t chunk_width) +{ + LayerExtension parents_out; + parents_out.start_idx = next_parent_start_index; + parents_out.update_existing_last_hash = old_last_parent != nullptr; + + CHECK_AND_ASSERT_THROW_MES(!new_child_scalars.empty(), "empty child scalars"); + CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); + + // See how many children we need to fill up the existing last chunk + std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); + + CHECK_AND_ASSERT_THROW_MES(new_child_scalars.size() >= chunk_size, "unexpected first chunk size"); + + const std::size_t n_chunks = 1 // first chunk + + (new_child_scalars.size() - chunk_size) / chunk_width // middle chunks + + (((new_child_scalars.size() - chunk_size) % chunk_width > 0) ? 1 : 0); // final chunk + + parents_out.hashes.resize(n_chunks); + + MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() + << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); + + // Hash all chunks in parallel + tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); + tools::threadpool::waiter waiter(tpool); + + // Hash the first chunk + tpool.submit(&waiter, + [ + &curve, + &old_last_child, + &old_last_parent, + &new_child_scalars, + &parents_out, + start_offset, + chunk_size + ]() + { + auto &hash_out = parents_out.hashes[0]; + hash_first_chunk(curve, + old_last_child, + old_last_parent, + start_offset, + new_child_scalars, + chunk_size, + hash_out); + }, + true + ); + + // Hash chunks of child scalars to create the parent hashes + std::size_t chunk_start_idx = chunk_size; + std::size_t chunk_idx = 1; + while (chunk_start_idx < new_child_scalars.size()) + { + // Fill a complete chunk, or add the remaining new children to the last chunk + chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); + + CHECK_AND_ASSERT_THROW_MES(chunk_idx < parents_out.hashes.size(), "unexpected chunk_idx"); + + tpool.submit(&waiter, + [ + &curve, + &new_child_scalars, + &parents_out, + chunk_start_idx, + chunk_size, + chunk_idx + ]() + { + auto &hash_out = parents_out.hashes[chunk_idx]; + hash_next_chunk(curve, chunk_start_idx, new_child_scalars, chunk_size, hash_out); + }, + true + ); + + // Advance to the next chunk + chunk_start_idx += chunk_size; + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx <= new_child_scalars.size(), "unexpected chunk start idx"); + + ++chunk_idx; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_idx == n_chunks, "unexpected n chunks"); + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to hash chunks"); + + return parents_out; +}; +//---------------------------------------------------------------------------------------------------------------------- +static GrowLayerInstructions get_grow_layer_instructions(const uint64_t old_total_children, + const uint64_t new_total_children, + const std::size_t parent_chunk_width, + const bool last_child_will_change) +{ + // 1. Check pre-conditions on total number of children + // - If there's only 1 old child, it must be the old root, and we must be setting a new parent layer after old root + const bool setting_next_layer_after_old_root = old_total_children == 1; + if (setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES(new_total_children > old_total_children, + "new_total_children must be > old_total_children when setting next layer after old root"); + } + else + { + CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, + "new_total_children must be >= old_total_children"); + } + + // 2. Calculate old and new total number of parents using totals for children + // If there's only 1 child, then it must be the old root and thus it would have no old parents + const uint64_t old_total_parents = old_total_children > 1 + ? (1 + ((old_total_children - 1) / parent_chunk_width)) + : 0; + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + + // 3. Check pre-conditions on total number of parents + CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, + "new_total_parents must be >= old_total_parents"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents < new_total_children, + "new_total_parents must be < new_total_children"); + + if (setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES(old_total_parents == 0, + "old_total_parents expected to be 0 when setting next layer after old root"); + } + + // 4. Set the current offset in the last chunk + // - Note: this value starts at the last child in the last chunk, but it might need to be decremented by 1 if we're + // changing that last child + std::size_t offset = old_total_parents > 0 + ? (old_total_children % parent_chunk_width) + : 0; + + // 5. Check if the last chunk is full (keep in mind it's also possible it's empty) + const bool last_chunk_is_full = offset == 0; + + // 6. When the last child changes, we'll need to use its old value to update the parent + // - We only care if the child has a parent, otherwise we won't need the child's old value to update the parent + // (since there is no parent to update) + const bool need_old_last_child = old_total_parents > 0 && last_child_will_change; + + // 7. If we're changing the last child, we need to subtract the offset by 1 to account for that child + if (need_old_last_child) + { + CHECK_AND_ASSERT_THROW_MES(old_total_children > 0, "no old children but last child is supposed to change"); + + // If the chunk is full, must subtract the chunk width by 1 + offset = offset == 0 ? (parent_chunk_width - 1) : (offset - 1); + } + + // 8. When the last parent changes, we'll need to use its old value to update itself + const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full + && new_total_children > old_total_children; + const bool need_old_last_parent = need_old_last_child || adding_members_to_existing_last_chunk; + + // 9. Set the next parent's start index + uint64_t next_parent_start_index = old_total_parents; + if (need_old_last_parent) + { + // If we're updating the last parent, we need to bring the starting parent index back 1 + CHECK_AND_ASSERT_THROW_MES(old_total_parents > 0, "no old parents but last parent is supposed to change1"); + --next_parent_start_index; + } + + // Done + MDEBUG("parent_chunk_width: " << parent_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , setting_next_layer_after_old_root: " << setting_next_layer_after_old_root + << " , need_old_last_child: " << need_old_last_child + << " , need_old_last_parent: " << need_old_last_parent + << " , start_offset: " << offset + << " , next_parent_start_index: " << next_parent_start_index); + + return GrowLayerInstructions{ + .parent_chunk_width = parent_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .setting_next_layer_after_old_root = setting_next_layer_after_old_root, + .need_old_last_child = need_old_last_child, + .need_old_last_parent = need_old_last_parent, + .start_offset = offset, + .next_parent_start_index = next_parent_start_index, + }; + +}; +//---------------------------------------------------------------------------------------------------------------------- +static GrowLayerInstructions get_leaf_layer_grow_instructions(const uint64_t old_n_leaf_tuples, + const uint64_t new_n_leaf_tuples, + const std::size_t leaf_tuple_size, + const std::size_t leaf_layer_chunk_width) +{ + // The leaf layer can never be the root layer + const bool setting_next_layer_after_old_root = false; + + const uint64_t old_total_children = old_n_leaf_tuples * leaf_tuple_size; + const uint64_t new_total_children = (old_n_leaf_tuples + new_n_leaf_tuples) * leaf_tuple_size; + + const uint64_t old_total_parents = old_total_children > 0 + ? (1 + ((old_total_children - 1) / leaf_layer_chunk_width)) + : 0; + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / leaf_layer_chunk_width); + + CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, + "new_total_children must be >= old_total_children"); + CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, + "new_total_parents must be >= old_total_parents"); + + // Since leaf layer is append-only, no leaf can ever change and we'll never need an old leaf + const bool need_old_last_child = false; + + const std::size_t offset = old_total_children % leaf_layer_chunk_width; + + const bool last_chunk_is_full = offset == 0; + const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full + && new_total_children > old_total_children; + const bool need_old_last_parent = adding_members_to_existing_last_chunk; + + uint64_t next_parent_start_index = old_total_parents; + if (need_old_last_parent) + { + // If we're updating the last parent, we need to bring the starting parent index back 1 + CHECK_AND_ASSERT_THROW_MES(old_total_parents > 0, "no old parents but last parent is supposed to change2"); + --next_parent_start_index; + } + + MDEBUG("parent_chunk_width: " << leaf_layer_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , setting_next_layer_after_old_root: " << setting_next_layer_after_old_root + << " , need_old_last_child: " << need_old_last_child + << " , need_old_last_parent: " << need_old_last_parent + << " , start_offset: " << offset + << " , next_parent_start_index: " << next_parent_start_index); + + return GrowLayerInstructions{ + .parent_chunk_width = leaf_layer_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .setting_next_layer_after_old_root = setting_next_layer_after_old_root, + .need_old_last_child = need_old_last_child, + .need_old_last_parent = need_old_last_parent, + .start_offset = offset, + .next_parent_start_index = next_parent_start_index, + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +// Helper function used to get the next layer extension used to grow the next layer in the tree +// - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent +// layer of the leaf layer +template +static LayerExtension get_next_layer_extension(const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const GrowLayerInstructions &grow_layer_instructions, + const std::vector &child_last_hashes, + const std::vector &parent_last_hashes, + const std::vector> child_layer_extensions, + const std::size_t last_updated_child_idx, + const std::size_t last_updated_parent_idx) +{ + // TODO: comments + const auto *child_last_hash = (last_updated_child_idx >= child_last_hashes.size()) + ? nullptr + : &child_last_hashes[last_updated_child_idx]; + + const auto *parent_last_hash = (last_updated_parent_idx >= parent_last_hashes.size()) + ? nullptr + : &parent_last_hashes[last_updated_parent_idx]; + + // Pre-conditions + CHECK_AND_ASSERT_THROW_MES(last_updated_child_idx < child_layer_extensions.size(), "missing child layer"); + const auto &child_extension = child_layer_extensions[last_updated_child_idx]; + + if (grow_layer_instructions.setting_next_layer_after_old_root) + { + CHECK_AND_ASSERT_THROW_MES((last_updated_child_idx + 1) == child_last_hashes.size(), + "unexpected last updated child idx"); + CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child when setting layer after old root"); + } + + const auto child_scalars = next_child_scalars_from_children(c_child, + grow_layer_instructions.setting_next_layer_after_old_root ? child_last_hash : nullptr, + child_extension); + + if (grow_layer_instructions.need_old_last_parent) + CHECK_AND_ASSERT_THROW_MES(parent_last_hash != nullptr, "missing last parent"); + + typename C_PARENT::Scalar last_child_scalar; + if (grow_layer_instructions.need_old_last_child) + { + CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child"); + last_child_scalar = c_child->point_to_cycle_scalar(*child_last_hash); + } + + // Do the hashing + LayerExtension layer_extension = hash_children_chunks( + c_parent, + grow_layer_instructions.need_old_last_child ? &last_child_scalar : nullptr, + grow_layer_instructions.need_old_last_parent ? parent_last_hash : nullptr, + grow_layer_instructions.start_offset, + grow_layer_instructions.next_parent_start_index, + child_scalars, + grow_layer_instructions.parent_chunk_width + ); + + CHECK_AND_ASSERT_THROW_MES((layer_extension.start_idx + layer_extension.hashes.size()) == + grow_layer_instructions.new_total_parents, + "unexpected num parents extended"); + + return layer_extension; +} +//---------------------------------------------------------------------------------------------------------------------- +static TrimLayerInstructions get_trim_layer_instructions( + const uint64_t old_total_children, + const uint64_t new_total_children, + const std::size_t parent_chunk_width, + const bool last_child_will_change) +{ + CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0"); + CHECK_AND_ASSERT_THROW_MES(old_total_children >= new_total_children, + "old_total_children must be >= new_total_children"); + + // Calculate old and new total number of parents using totals for children + const uint64_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width); + const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); + + CHECK_AND_ASSERT_THROW_MES(old_total_parents >= new_total_parents, + "old_total_parents must be >= new_total_parents"); + CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents, + "new_total_children must be > new_total_parents"); + + const std::size_t old_offset = old_total_children % parent_chunk_width; + const std::size_t new_offset = new_total_children % parent_chunk_width; + + // Get the number of existing children in what will become the new last chunk after trimming + const uint64_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0) + ? parent_chunk_width + : old_offset; + + MDEBUG("new_last_chunk_old_num_children: " << new_last_chunk_old_num_children << ", new_offset: " << new_offset); + + CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_children >= new_offset, + "unexpected new_last_chunk_old_num_children"); + + // Get the number of children we'll be trimming from the new last chunk + const std::size_t trim_n_children = new_offset == 0 + ? 0 // The last chunk wil remain full when the new_offset == 0 + : new_last_chunk_old_num_children - new_offset; + + // We use hash trim if we're trimming fewer elems in the last chunk than the number of elems remaining + const bool need_last_chunk_children_to_trim = trim_n_children > 0 && trim_n_children <= new_offset; + + // Otherwise we use hash_grow + const bool need_last_chunk_remaining_children = trim_n_children > 0 && trim_n_children > new_offset; + + CHECK_AND_ASSERT_THROW_MES(!(need_last_chunk_children_to_trim && need_last_chunk_remaining_children), + "cannot both need last children to trim and need the remaining children"); + + // If we're trimming from the new last chunk OR an element in the new last chunk will change, then we're going to + // update the existing last hash, since its children are changing + const bool update_existing_last_hash = trim_n_children > 0 || last_child_will_change; + + // If we're trimming using remaining children, then we're just going to call hash_grow as if the chunk is being + // hashed for the first time, and so we don't need the existing last hash in that case, even if the hash is updating + const bool need_existing_last_hash = update_existing_last_hash && !need_last_chunk_remaining_children; + + // Set the hash_offset to use when calling hash_grow or hash_trim + std::size_t hash_offset = 0; + if (need_last_chunk_children_to_trim) + { + CHECK_AND_ASSERT_THROW_MES(new_offset > 0, "new_offset must be > 0 when trimming last chunk children"); + hash_offset = new_offset; + + if (last_child_will_change) + { + // We decrement the offset we use to hash the chunk if the last child is changing, since we're going to + // use the old value of the last child when trimming + --hash_offset; + } + } + else if (need_last_chunk_remaining_children) + { + // If we're trimming using remaining children, then we're just going to call hash_grow with offset 0 + hash_offset = 0; + } + else if (last_child_will_change) + { + // We're not trimming at all in this case, we're only updating the existing last hash with hash_trim. We need + // hash_offset to be equal to 1 - this existing last hash's position + hash_offset = new_offset == 0 + ? (parent_chunk_width - 1) // chunk is full, so decrement full width by 1 + : (new_offset - 1); + } + + // Set the child index range so the caller knows which children to read from the tree + uint64_t start_trim_idx = 0; + uint64_t end_trim_idx = 0; + if (need_last_chunk_children_to_trim) + { + // We'll call hash_trim to trim the children between [offset, last chunk end] + const uint64_t chunk_boundary_start = (new_total_parents - 1) * parent_chunk_width; + const uint64_t chunk_boundary_end = chunk_boundary_start + parent_chunk_width; + + start_trim_idx = chunk_boundary_start + hash_offset; + end_trim_idx = std::min(chunk_boundary_end, old_total_children); + } + else if (need_last_chunk_remaining_children) + { + // We'll call hash_grow with the remaining children between [0, offset] + CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "new_offset is unexpectedly high"); + start_trim_idx = new_total_children - new_offset; + end_trim_idx = new_total_children; + + if (last_child_will_change) + { + // We don't need the last old child if it's changing, we'll just use its new value. Decrement the + // end_trim_idx by 1 so we know not to read and use the last old child from the tree in this case. + CHECK_AND_ASSERT_THROW_MES(end_trim_idx > 0, "end_trim_idx cannot be 0"); + --end_trim_idx; + } + } + + MDEBUG("parent_chunk_width: " << parent_chunk_width + << " , old_total_children: " << old_total_children + << " , new_total_children: " << new_total_children + << " , old_total_parents: " << old_total_parents + << " , new_total_parents: " << new_total_parents + << " , need_last_chunk_children_to_trim: " << need_last_chunk_children_to_trim + << " , need_last_chunk_remaining_children: " << need_last_chunk_remaining_children + << " , need_existing_last_hash: " << need_existing_last_hash + << " , need_new_last_child: " << last_child_will_change + << " , update_existing_last_hash: " << update_existing_last_hash + << " , hash_offset: " << hash_offset + << " , start_trim_idx: " << start_trim_idx + << " , end_trim_idx: " << end_trim_idx); + + return TrimLayerInstructions{ + .parent_chunk_width = parent_chunk_width, + .old_total_children = old_total_children, + .new_total_children = new_total_children, + .old_total_parents = old_total_parents, + .new_total_parents = new_total_parents, + .update_existing_last_hash = update_existing_last_hash, + .need_last_chunk_children_to_trim = need_last_chunk_children_to_trim, + .need_last_chunk_remaining_children = need_last_chunk_remaining_children, + .need_existing_last_hash = need_existing_last_hash, + .need_new_last_child = last_child_will_change, + .hash_offset = hash_offset, + .start_trim_idx = start_trim_idx, + .end_trim_idx = end_trim_idx, + }; +} +//---------------------------------------------------------------------------------------------------------------------- +template +static typename fcmp_pp::curve_trees::LayerReduction get_next_layer_reduction( + const std::unique_ptr &c_child, + const std::unique_ptr &c_parent, + const TrimLayerInstructions &trim_layer_instructions, + const std::vector &parent_last_hashes, + const std::vector> &children_to_trim, + const std::vector &child_last_hashes, + const std::size_t parent_layer_idx, + const std::size_t child_layer_idx, + const std::vector> &child_reductions) +{ + LayerReduction layer_reduction_out; + + layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents; + layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash; + + if (!trim_layer_instructions.need_last_chunk_children_to_trim && + !trim_layer_instructions.need_last_chunk_remaining_children && + !trim_layer_instructions.need_new_last_child) + { + // In this case we're just trimming to the boundary, and don't need to get a new hash + CHECK_AND_ASSERT_THROW_MES(!layer_reduction_out.update_existing_last_hash, "unexpected update last hash"); + MDEBUG("Trimming to chunk boundary"); + return layer_reduction_out; + } + + if (trim_layer_instructions.need_existing_last_hash) + CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash"); + + const typename C_PARENT::Point &existing_hash = trim_layer_instructions.need_existing_last_hash + ? parent_last_hashes[parent_layer_idx] + : c_parent->hash_init_point(); + + std::vector child_scalars; + if (trim_layer_instructions.need_last_chunk_children_to_trim + || trim_layer_instructions.need_last_chunk_remaining_children) + { + CHECK_AND_ASSERT_THROW_MES(children_to_trim.size() > parent_layer_idx, "missing children to trim"); + child_scalars = children_to_trim[parent_layer_idx]; + } + + typename C_PARENT::Scalar new_last_child_scalar = c_parent->zero_scalar(); + if (trim_layer_instructions.need_new_last_child) + { + CHECK_AND_ASSERT_THROW_MES(child_layer_idx > 0, "child index cannot be 0 here"); + CHECK_AND_ASSERT_THROW_MES(child_reductions.size() == child_layer_idx, "unexpected child layer idx"); + CHECK_AND_ASSERT_THROW_MES(child_reductions.back().update_existing_last_hash, "expected new last child"); + + const typename C_CHILD::Point &new_last_child = child_reductions.back().new_last_hash; + new_last_child_scalar = c_child->point_to_cycle_scalar(new_last_child); + + if (trim_layer_instructions.need_last_chunk_remaining_children) + { + child_scalars.emplace_back(std::move(new_last_child_scalar)); + } + else if (!trim_layer_instructions.need_last_chunk_children_to_trim) + { + // Falling to this conditional means we're not trimming at all, just updating the old last child + const std::size_t last_child_layer_idx = child_layer_idx - 1; + CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); + + const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; + auto old_last_child_scalar = c_child->point_to_cycle_scalar(old_last_child); + + child_scalars.emplace_back(std::move(old_last_child_scalar)); + } + } + + for (std::size_t i = 0; i < child_scalars.size(); ++i) + MDEBUG("Hashing child " << c_parent->to_string(child_scalars[i])); + + if (trim_layer_instructions.need_last_chunk_remaining_children) + { + MDEBUG("hash_grow: existing_hash: " << c_parent->to_string(existing_hash) + << " , hash_offset: " << trim_layer_instructions.hash_offset); + + layer_reduction_out.new_last_hash = c_parent->hash_grow( + existing_hash, + trim_layer_instructions.hash_offset, + c_parent->zero_scalar(), + typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}); + } + else + { + MDEBUG("hash_trim: existing_hash: " << c_parent->to_string(existing_hash) + << " , hash_offset: " << trim_layer_instructions.hash_offset + << " , child_to_grow_back: " << c_parent->to_string(new_last_child_scalar)); + + layer_reduction_out.new_last_hash = c_parent->hash_trim( + existing_hash, + trim_layer_instructions.hash_offset, + typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()}, + new_last_child_scalar); + } + + MDEBUG("Result hash: " << c_parent->to_string(layer_reduction_out.new_last_hash)); + + return layer_reduction_out; +} +//---------------------------------------------------------------------------------------------------------------------- +static PreLeafTuple output_to_pre_leaf_tuple(const OutputPair &output_pair) +{ + const crypto::public_key &output_pubkey = output_pair.output_pubkey; + const rct::key &commitment = output_pair.commitment; + + rct::key O, C; + if (!fcmp_pp::clear_torsion(rct::pk2rct(output_pubkey), O)) + throw std::runtime_error("output pubkey is invalid"); + if (!fcmp_pp::clear_torsion(commitment, C)) + throw std::runtime_error("commitment is invalid"); + + if (O == rct::I) + throw std::runtime_error("O cannot equal identity"); + if (C == rct::I) + throw std::runtime_error("C cannot equal identity"); + + // Must use the original output pubkey to derive I to prevent double spends, since torsioned outputs yield a + // a distinct I and key image from their respective torsion cleared output (and torsioned outputs are spendable + // before fcmp++) + crypto::ec_point I; + crypto::derive_key_image_generator(output_pubkey, I); + + PreLeafTuple plt; + if (!fcmp_pp::point_to_ed_y_derivatives(O, plt.O_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from O"); + if (!fcmp_pp::point_to_ed_y_derivatives(rct::pt2rct(I), plt.I_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from I"); + if (!fcmp_pp::point_to_ed_y_derivatives(C, plt.C_pre_x)) + throw std::runtime_error("failed to get ed y derivatives from C"); + + return plt; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTrees public member functions +//---------------------------------------------------------------------------------------------------------------------- +template<> +CurveTrees::LeafTuple CurveTrees::leaf_tuple(const OutputPair &output_pair) const +{ + const auto plt = output_to_pre_leaf_tuple(output_pair); + + rct::key O_x, I_x, C_x; + fcmp_pp::ed_y_derivatives_to_wei_x(plt.O_pre_x, O_x); + fcmp_pp::ed_y_derivatives_to_wei_x(plt.I_pre_x, I_x); + fcmp_pp::ed_y_derivatives_to_wei_x(plt.C_pre_x, C_x); + + return LeafTuple{ + .O_x = tower_cycle::selene_scalar_from_bytes(O_x), + .I_x = tower_cycle::selene_scalar_from_bytes(I_x), + .C_x = tower_cycle::selene_scalar_from_bytes(C_x) + }; +}; +//---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::flatten_leaves(std::vector &&leaves) const +{ + std::vector flattened_leaves; + flattened_leaves.reserve(leaves.size() * LEAF_TUPLE_SIZE); + + for (auto &l : leaves) + { + flattened_leaves.emplace_back(std::move(l.O_x)); + flattened_leaves.emplace_back(std::move(l.I_x)); + flattened_leaves.emplace_back(std::move(l.C_x)); + } + + return flattened_leaves; +}; + +// Explicit instantiation +template std::vector CurveTrees::flatten_leaves( + std::vector &&leaves) const; +//---------------------------------------------------------------------------------------------------------------------- +template +typename CurveTrees::TreeExtension CurveTrees::get_tree_extension( + const uint64_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, + std::vector &&new_outputs) const +{ + TreeExtension tree_extension; + tree_extension.leaves.start_leaf_tuple_idx = old_n_leaf_tuples; + + if (new_outputs.empty()) + return tree_extension; + + // Sort the outputs by order they appear in the chain + const auto sort_fn = [](const OutputContext &a, const OutputContext &b) { return a.output_id < b.output_id; }; + std::sort(new_outputs.begin(), new_outputs.end(), sort_fn); + + // Convert sorted outputs into leaf tuples, place each element of each leaf tuple in a flat vector to be hashed, + // and place the outputs in a tree extension struct for insertion into the db. We ignore invalid outputs, since + // they cannot be inserted to the tree. + std::vector flattened_leaves; + this->set_valid_leaves(flattened_leaves, tree_extension.leaves.tuples, std::move(new_outputs)); + + if (flattened_leaves.empty()) + return tree_extension; + + auto grow_layer_instructions = get_leaf_layer_grow_instructions( + old_n_leaf_tuples, + tree_extension.leaves.tuples.size(), + LEAF_TUPLE_SIZE, + m_leaf_layer_chunk_width); + + if (grow_layer_instructions.need_old_last_parent) + CHECK_AND_ASSERT_THROW_MES(!existing_last_hashes.c2_last_hashes.empty(), "missing last c2 parent"); + + // Hash the leaf layer + auto leaf_parents = hash_children_chunks(m_c2, + nullptr, // We never need the old last child from leaf layer because the leaf layer is always append-only + grow_layer_instructions.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr, + grow_layer_instructions.start_offset, + grow_layer_instructions.next_parent_start_index, + flattened_leaves, + m_leaf_layer_chunk_width + ); + + CHECK_AND_ASSERT_THROW_MES( + (leaf_parents.start_idx + leaf_parents.hashes.size()) == grow_layer_instructions.new_total_parents, + "unexpected num leaf parents extended"); + + tree_extension.c2_layer_extensions.emplace_back(std::move(leaf_parents)); + + // Alternate between hashing c2 children, c1 children, c2, c1, ... + bool parent_is_c1 = true; + + std::size_t c1_last_idx = 0; + std::size_t c2_last_idx = 0; + while (grow_layer_instructions.new_total_parents > 1) + { + MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); + + const uint64_t new_total_children = grow_layer_instructions.new_total_parents; + + grow_layer_instructions = this->set_next_layer_extension( + grow_layer_instructions, + parent_is_c1, + existing_last_hashes, + c1_last_idx, + c2_last_idx, + tree_extension + ); + + // Sanity check to make sure we're making progress to exit the while loop + CHECK_AND_ASSERT_THROW_MES(grow_layer_instructions.new_total_parents < new_total_children, + "expect fewer parents than children in every layer"); + + parent_is_c1 = !parent_is_c1; + } + + return tree_extension; +}; + +// Explicit instantiation +template CurveTrees::TreeExtension CurveTrees::get_tree_extension( + const uint64_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, + std::vector &&new_outputs) const; +//---------------------------------------------------------------------------------------------------------------------- +template +std::vector CurveTrees::get_trim_instructions( + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const +{ + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + std::vector trim_instructions; + + if (old_n_leaf_tuples == trim_n_leaf_tuples) + return trim_instructions; + + // Get trim instructions for the leaf layer + { + const uint64_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; + const uint64_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE; + + const std::size_t parent_chunk_width = m_leaf_layer_chunk_width; + + // Leaf layer's last child never changes since leaf layer is pop-/append-only + const bool last_child_will_change = false; + + auto trim_leaf_layer_instructions = get_trim_layer_instructions( + old_total_leaves, + new_total_leaves, + parent_chunk_width, + last_child_will_change); + + trim_instructions.emplace_back(std::move(trim_leaf_layer_instructions)); + } + + bool use_c2 = false; + while (trim_instructions.back().new_total_parents > 1) + { + auto trim_layer_instructions = get_trim_layer_instructions( + trim_instructions.back().old_total_parents, + trim_instructions.back().new_total_parents, + use_c2 ? m_c2_width : m_c1_width, + trim_instructions.back().update_existing_last_hash); + + trim_instructions.emplace_back(std::move(trim_layer_instructions)); + use_c2 = !use_c2; + } + + return trim_instructions; +} + +// Explicit instantiation +template std::vector CurveTrees::get_trim_instructions( + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const; +//---------------------------------------------------------------------------------------------------------------------- +template +typename CurveTrees::TreeReduction CurveTrees::get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const +{ + TreeReduction tree_reduction_out; + + if (trim_instructions.empty()) + { + tree_reduction_out.new_total_leaf_tuples = 0; + return tree_reduction_out; + } + + CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0, + "unexpected new total leaves"); + const uint64_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; + tree_reduction_out.new_total_leaf_tuples = new_total_leaf_tuples; + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + + for (const auto &trim_layer_instructions : trim_instructions) + { + MDEBUG("Trimming layer " << (c1_idx + c2_idx) << " (c1_idx: " << c1_idx << " , c2_idx: " << c2_idx << ")"); + + if (use_c2) + { + auto c2_layer_reduction_out = get_next_layer_reduction( + m_c1, + m_c2, + trim_layer_instructions, + last_hashes.c2_last_hashes, + children_to_trim.c2_children, + last_hashes.c1_last_hashes, + c2_idx, + c1_idx, + tree_reduction_out.c1_layer_reductions + ); + + tree_reduction_out.c2_layer_reductions.emplace_back(std::move(c2_layer_reduction_out)); + ++c2_idx; + } + else + { + auto c1_layer_reduction_out = get_next_layer_reduction( + m_c2, + m_c1, + trim_layer_instructions, + last_hashes.c1_last_hashes, + children_to_trim.c1_children, + last_hashes.c2_last_hashes, + c1_idx, + c2_idx, + tree_reduction_out.c2_layer_reductions + ); + + tree_reduction_out.c1_layer_reductions.emplace_back(std::move(c1_layer_reduction_out)); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + return tree_reduction_out; +}; + +// Explicit instantiation +template CurveTrees::TreeReduction CurveTrees::get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTrees private member functions +//---------------------------------------------------------------------------------------------------------------------- +template +void CurveTrees::set_valid_leaves( + std::vector &flattened_leaves_out, + std::vector &tuples_out, + std::vector &&new_outputs) const +{ + // Keep track of valid outputs to make sure we only use leaves from valid outputs. Can't use std::vector + // because std::vector concurrent access is not thread safe. + enum Boolean : uint8_t { + False = 0, + True = 1, + }; + std::vector valid_outputs(new_outputs.size(), False); + + tools::threadpool& tpool = tools::threadpool::getInstanceForCompute(); + tools::threadpool::waiter waiter(tpool); + + // Step 1. Multithreaded convert valid outputs into Edwards y derivatives needed to get Wei x coordinates + // TODO: investigate batched threading (as opposed to small tasks) + std::vector pre_leaves; + pre_leaves.resize(new_outputs.size()); + for (std::size_t i = 0; i < new_outputs.size(); ++i) + { + tpool.submit(&waiter, + [ + &new_outputs, + &valid_outputs, + &pre_leaves, + i + ]() + { + CHECK_AND_ASSERT_THROW_MES(valid_outputs.size() > i, "unexpected valid outputs size"); + CHECK_AND_ASSERT_THROW_MES(!valid_outputs[i], "unexpected valid output"); + CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected pre_leaves size"); + + const auto &output_pair = new_outputs[i].output_pair; + + try { pre_leaves[i] = output_to_pre_leaf_tuple(output_pair); } + catch(...) { /* Invalid outputs can't be added to the tree */ return; } + + valid_outputs[i] = True; + }, + true + ); + } + + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to ed y derivatives"); + + // Step 2. Collect valid Edwards y derivatives + const std::size_t n_valid_outputs = std::count(valid_outputs.begin(), valid_outputs.end(), True); + const std::size_t n_valid_leaf_elems = n_valid_outputs * LEAF_TUPLE_SIZE; + + // Collecting (1+y)'s + fe *one_plus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(one_plus_y_vec, "failed malloc one_plus_y_vec"); + + // Collecting (1-y)'s + fe *one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(one_minus_y_vec, "failed malloc one_minus_y_vec"); + + std::size_t valid_i = 0; + for (std::size_t i = 0; i < valid_outputs.size(); ++i) + { + if (!valid_outputs[i]) + continue; + + CHECK_AND_ASSERT_THROW_MES(pre_leaves.size() > i, "unexpected size of pre_leaves"); + CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems > valid_i, "unexpected valid_i"); + + auto &pl = pre_leaves[i]; + + auto &O_pre_x = pl.O_pre_x; + auto &I_pre_x = pl.I_pre_x; + auto &C_pre_x = pl.C_pre_x; + + static_assert(LEAF_TUPLE_SIZE == 3, "unexpected leaf tuple size"); + + // TODO: avoid copying underlying (tried using pointer to pointers, but wasn't clean) + memcpy(&one_plus_y_vec[valid_i], &O_pre_x.one_plus_y, sizeof(fe)); + memcpy(&one_plus_y_vec[valid_i+1], &I_pre_x.one_plus_y, sizeof(fe)); + memcpy(&one_plus_y_vec[valid_i+2], &C_pre_x.one_plus_y, sizeof(fe)); + + memcpy(&one_minus_y_vec[valid_i], &O_pre_x.one_minus_y, sizeof(fe)); + memcpy(&one_minus_y_vec[valid_i+1], &I_pre_x.one_minus_y, sizeof(fe)); + memcpy(&one_minus_y_vec[valid_i+2], &C_pre_x.one_minus_y, sizeof(fe)); + + valid_i += LEAF_TUPLE_SIZE; + } + + CHECK_AND_ASSERT_THROW_MES(n_valid_leaf_elems == valid_i, "unexpected end valid_i"); + + // Step 3. Get batch inverse of all valid (1-y)'s + // - Batch inversion is significantly faster than inverting 1 at a time + fe *inv_one_minus_y_vec = (fe *) malloc(n_valid_leaf_elems * sizeof(fe)); + CHECK_AND_ASSERT_THROW_MES(inv_one_minus_y_vec, "failed malloc inv_one_minus_y_vec"); + CHECK_AND_ASSERT_THROW_MES(fe_batch_invert(inv_one_minus_y_vec, one_minus_y_vec, n_valid_leaf_elems) == 0, + "failed to batch invert"); + + // Step 4. Multithreaded get Wei x's and convert to Selene scalars + // TODO: investigate batched threading (as opposed to small tasks) + flattened_leaves_out.resize(n_valid_leaf_elems); + for (std::size_t i = 0; i < n_valid_leaf_elems; ++i) + { + tpool.submit(&waiter, + [ + &inv_one_minus_y_vec, + &one_plus_y_vec, + &flattened_leaves_out, + i + ]() + { + rct::key wei_x; + fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y_vec[i], one_plus_y_vec[i]); + flattened_leaves_out[i] = tower_cycle::selene_scalar_from_bytes(wei_x); + }, + true + ); + } + + CHECK_AND_ASSERT_THROW_MES(waiter.wait(), "failed to convert outputs to wei x coords"); + + // Step 5. Set valid tuples to be stored in the db + tuples_out.clear(); + tuples_out.reserve(n_valid_outputs); + for (std::size_t i = 0; i < valid_outputs.size(); ++i) + { + if (!valid_outputs[i]) + continue; + + CHECK_AND_ASSERT_THROW_MES(new_outputs.size() > i, "unexpected size of valid outputs"); + + // We can derive {O.x,I.x,C.x} from output pairs, so we store just the output context in the db to save 32 bytes + tuples_out.emplace_back(std::move(new_outputs[i])); + } + + // Step 6. Clean up + free(one_plus_y_vec); + free(one_minus_y_vec); + free(inv_one_minus_y_vec); +} +//---------------------------------------------------------------------------------------------------------------------- +template +GrowLayerInstructions CurveTrees::set_next_layer_extension( + const GrowLayerInstructions &prev_layer_instructions, + const bool parent_is_c1, + const LastHashes &last_hashes, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, + TreeExtension &tree_extension_inout) const +{ + const auto &c1_last_hashes = last_hashes.c1_last_hashes; + const auto &c2_last_hashes = last_hashes.c2_last_hashes; + + auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; + auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; + + const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; + + const auto grow_layer_instructions = get_grow_layer_instructions( + prev_layer_instructions.old_total_parents, + prev_layer_instructions.new_total_parents, + parent_chunk_width, + prev_layer_instructions.need_old_last_parent + ); + + if (parent_is_c1) + { + auto c1_layer_extension = get_next_layer_extension( + m_c2, + m_c1, + grow_layer_instructions, + c2_last_hashes, + c1_last_hashes, + c2_layer_extensions_out, + c2_last_idx_inout, + c1_last_idx_inout + ); + + c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension)); + ++c2_last_idx_inout; + } + else + { + auto c2_layer_extension = get_next_layer_extension( + m_c1, + m_c2, + grow_layer_instructions, + c1_last_hashes, + c2_last_hashes, + c1_layer_extensions_out, + c1_last_idx_inout, + c2_last_idx_inout + ); + + c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension)); + ++c1_last_idx_inout; + } + + return grow_layer_instructions; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace curve_trees +} //namespace fcmp_pp diff --git a/src/fcmp_pp/curve_trees.h b/src/fcmp_pp/curve_trees.h new file mode 100644 index 0000000000..aca0c753fb --- /dev/null +++ b/src/fcmp_pp/curve_trees.h @@ -0,0 +1,350 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "crypto/crypto.h" +#include "cryptonote_basic/cryptonote_basic.h" +#include "fcmp_pp_crypto.h" +#include "misc_log_ex.h" +#include "tower_cycle.h" + +#include +#include +#include + + +namespace fcmp_pp +{ +namespace curve_trees +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Hash a chunk of new children +template +typename C::Point get_new_parent(const std::unique_ptr &curve, const typename C::Chunk &new_children); +//---------------------------------------------------------------------------------------------------------------------- +// A layer of contiguous hashes starting from a specific start_idx in the tree +template +struct LayerExtension final +{ + uint64_t start_idx{0}; + bool update_existing_last_hash; + std::vector hashes; +}; + +// A struct useful to trim a layer and update its last hash if necessary +template +struct LayerReduction final +{ + uint64_t new_total_parents{0}; + bool update_existing_last_hash; + typename C::Point new_last_hash; +}; + +// Useful metadata for growing a layer +struct GrowLayerInstructions final +{ + // The max chunk width of children used to hash into a parent + std::size_t parent_chunk_width; + + // Total children refers to the total number of elements in a layer + uint64_t old_total_children; + uint64_t new_total_children; + + // Total parents refers to the total number of hashes of chunks of children + uint64_t old_total_parents; + uint64_t new_total_parents; + + // When updating the tree, we use this boolean to know when we'll need to use the tree's existing old root in order + // to set a new layer after that root + // - We'll need to be sure the old root gets hashed when setting the next layer + bool setting_next_layer_after_old_root; + // When the last child in the child layer changes, we'll need to use its old value to update its parent hash + bool need_old_last_child; + // When the last parent in the layer changes, we'll need to use its old value to update itself + bool need_old_last_parent; + + // The first chunk that needs to be updated's first child's offset within that chunk + std::size_t start_offset; + // The parent's starting index in the layer + uint64_t next_parent_start_index; +}; + +// Useful metadata for trimming a layer +struct TrimLayerInstructions final +{ + // The max chunk width of children used to hash into a parent + std::size_t parent_chunk_width; + + // Total children refers to the total number of elements in a layer + uint64_t old_total_children; + uint64_t new_total_children; + + // Total parents refers to the total number of hashes of chunks of children + uint64_t old_total_parents; + uint64_t new_total_parents; + + // True if the new last chunk's existing parent hash will need to be updated + bool update_existing_last_hash; + + // Whether we need to explicitly trim children from the new last chunk + bool need_last_chunk_children_to_trim; + // Whether we need to trim by growing using the remaining children from the new last chunk + bool need_last_chunk_remaining_children; + // Whether we need the new last chunk's existing parent hash in order to complete the trim + bool need_existing_last_hash; + // Whether we need the new last child from the new last chunk in order to complete the trim + bool need_new_last_child; + + // The offset to use when hashing the last chunk + std::size_t hash_offset; + + // The starting and ending indexes of the children we're going to need to trim the last chunk + uint64_t start_trim_idx; + uint64_t end_trim_idx; +}; + +// Output pub key and commitment, ready to be converted to a leaf tuple +// - From {output_pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} +// - Output pairs do NOT necessarily have torsion cleared. We need the output pubkey as it exists in the chain in order +// to derive the correct I (when deriving {O.x, I.x, C.x}). Torsion clearing O before deriving I from O would enable +// spending a torsioned output once before the fcmp++ fork and again with a different key image via fcmp++. +#pragma pack(push, 1) +struct OutputPair final +{ + crypto::public_key output_pubkey; + rct::key commitment; +}; + +// Contextual wrapper for the output +struct OutputContext final +{ + // Output's global id in the chain, used to insert the output in the tree in the order it entered the chain + uint64_t output_id; + OutputPair output_pair; +}; +#pragma pack(pop) + +static_assert(sizeof(OutputPair) == (32+32), "db expects 64 bytes for output pairs"); +static_assert(sizeof(OutputContext) == (8+32+32), "db expects 72 bytes for output context"); + +using OutputsByUnlockBlock = std::unordered_map>; + +// Ed25519 points (can go from OutputTuple -> LeafTuple) +struct OutputTuple final +{ + rct::key O; + rct::key I; + rct::key C; +}; + +// Struct composed of ec elems needed to get a full-fledged leaf tuple +struct PreLeafTuple final +{ + fcmp_pp::EdYDerivatives O_pre_x; + fcmp_pp::EdYDerivatives I_pre_x; + fcmp_pp::EdYDerivatives C_pre_x; +}; + +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// This class is useful to help update the curve trees merkle tree without needing to keep the entire tree in memory +// - It requires instantiation with the C1 and C2 curve classes and widths, hardening the tree structure +// - It ties the C2 curve in the tree to the leaf layer (the leaf layer is composed of C2 scalars) +template +class CurveTrees +{ +public: + CurveTrees(std::unique_ptr &&c1, + std::unique_ptr &&c2, + const std::size_t c1_width, + const std::size_t c2_width): + m_c1{std::move(c1)}, + m_c2{std::move(c2)}, + m_c1_width{c1_width}, + m_c2_width{c2_width}, + m_leaf_layer_chunk_width{LEAF_TUPLE_SIZE * c2_width} + { + assert(c1_width > 0); + assert(c2_width > 0); + }; + +//member structs +public: + // Tuple that composes a single leaf in the tree + struct LeafTuple final + { + // Output ed25519 point x-coordinate + typename C2::Scalar O_x; + // Key image generator x-coordinate + typename C2::Scalar I_x; + // Commitment x-coordinate + typename C2::Scalar C_x; + }; + static const std::size_t LEAF_TUPLE_SIZE = 3; + static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); + + // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer + struct Leaves final + { + // Starting leaf tuple index in the leaf layer + uint64_t start_leaf_tuple_idx{0}; + // Contiguous leaves in a tree that start at the start_idx + std::vector tuples; + }; + + // A struct useful to extend an existing tree + // - layers alternate between C1 and C2 + // - c2_layer_extensions[0] is first layer after leaves, then c1_layer_extensions[0], c2_layer_extensions[1], etc + struct TreeExtension final + { + Leaves leaves; + std::vector> c1_layer_extensions; + std::vector> c2_layer_extensions; + }; + + // A struct useful to reduce the number of leaves in an existing tree + // - layers alternate between C1 and C2 + // - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc + struct TreeReduction final + { + uint64_t new_total_leaf_tuples; + std::vector> c1_layer_reductions; + std::vector> c2_layer_reductions; + }; + + // Last hashes from each layer in the tree + // - layers alternate between C1 and C2 + // - c2_last_hashes[0] refers to the layer after leaves, then c1_last_hashes[0], then c2_last_hashes[1], etc + struct LastHashes final + { + std::vector c1_last_hashes; + std::vector c2_last_hashes; + }; + + // The children we'll trim from each last chunk in the tree + // - layers alternate between C1 and C2 + // - c2_children[0] refers to the layer after leaves, then c1_children[0], then c2_children[1], etc + struct LastChunkChildrenToTrim final + { + std::vector> c1_children; + std::vector> c2_children; + }; + +//member functions +public: + // Convert output pairs into leaf tuples, from {output pubkey,commitment} -> {O,C} -> {O.x,I.x,C.x} + LeafTuple leaf_tuple(const OutputPair &output_pair) const; + + // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [O.x, I.x, C.x, O.x, I.x, C.x...] + std::vector flatten_leaves(std::vector &&leaves) const; + + // Take in the existing number of leaf tuples and the existing last hash in each layer in the tree, as well as new + // outputs to add to the tree, and return a tree extension struct that can be used to extend a tree + TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples, + const LastHashes &existing_last_hashes, + std::vector &&new_leaf_tuples) const; + + // Get instructions useful for trimming all existing layers in the tree + std::vector get_trim_instructions( + const uint64_t old_n_leaf_tuples, + const uint64_t trim_n_leaf_tuples) const; + + // Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from + // each last chunk, and the existing last hash in what will become the new last parent of each layer, and return + // a tree reduction struct that can be used to trim a tree + TreeReduction get_tree_reduction( + const std::vector &trim_instructions, + const LastChunkChildrenToTrim &children_to_trim, + const LastHashes &last_hashes) const; + +private: + // Multithreaded helper function to convert outputs to leaf tuples and set leaves on tree extension + void set_valid_leaves( + std::vector &flattened_leaves_out, + std::vector &tuples_out, + std::vector &&new_outputs) const; + + // Helper function used to set the next layer extension used to grow the next layer in the tree + // - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent + // layer of the leaf layer + GrowLayerInstructions set_next_layer_extension( + const GrowLayerInstructions &prev_layer_instructions, + const bool parent_is_c1, + const LastHashes &last_hashes, + std::size_t &c1_last_idx_inout, + std::size_t &c2_last_idx_inout, + TreeExtension &tree_extension_inout) const; + +//public member variables +public: + // The curve interfaces + const std::unique_ptr m_c1; + const std::unique_ptr m_c2; + + // The leaf layer has a distinct chunk width than the other layers + const std::size_t m_leaf_layer_chunk_width; + + // The chunk widths of the layers in the tree tied to each curve + const std::size_t m_c1_width; + const std::size_t m_c2_width; +}; +//---------------------------------------------------------------------------------------------------------------------- +using Helios = tower_cycle::Helios; +using Selene = tower_cycle::Selene; +using CurveTreesV1 = CurveTrees; + +// https://github.com/kayabaNerve/fcmp-plus-plus/blob +// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 +const std::size_t HELIOS_CHUNK_WIDTH = 38; +const std::size_t SELENE_CHUNK_WIDTH = 18; + +std::shared_ptr curve_trees_v1( + const std::size_t helios_chunk_width = HELIOS_CHUNK_WIDTH, + const std::size_t selene_chunk_width = SELENE_CHUNK_WIDTH); + +// A path in the tree containing whole chunks at each layer +// - leaves contain a complete chunk of leaves, encoded as compressed ed25519 points +// - c2_layers[0] refers to the chunk of elems in the tree in the layer after leaves. The hash of the chunk of +// leaves is 1 member of the c2_layers[0] chunk. The rest of c2_layers[0] is the chunk of elems that hash is in. +// - layers alternate between C1 and C2 +// - c1_layers[0] refers to the chunk of elems in the tree in the layer after c2_layers[0]. The hash of the chunk +// of c2_layers[0] is 1 member of the c1_layers[0] chunk. The rest of c1_layers[0] is the chunk of elems that hash +// is in. +// - c2_layers[1] refers to the chunk of elems in the tree in the layer after c1_layers[0] etc. +struct PathV1 final +{ + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace curve_trees +} //namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_crypto.cpp b/src/fcmp_pp/fcmp_pp_crypto.cpp new file mode 100644 index 0000000000..6d3f2507a7 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_crypto.cpp @@ -0,0 +1,80 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "fcmp_pp_crypto.h" + +#include "ringct/rctOps.h" + +namespace fcmp_pp +{ +//---------------------------------------------------------------------------------------------------------------------- +bool clear_torsion(const rct::key &k, rct::key &k_out) { + ge_p3 point; + if (ge_frombytes_vartime(&point, k.bytes) != 0) + return false; + // mul by inv 8, then mul by 8 + ge_p2 point_inv_8; + ge_scalarmult(&point_inv_8, rct::INV_EIGHT.bytes, &point); + ge_p1p1 point_inv_8_mul_8; + ge_mul8(&point_inv_8_mul_8, &point_inv_8); + ge_p3 torsion_cleared_point; + ge_p1p1_to_p3(&torsion_cleared_point, &point_inv_8_mul_8); + ge_p3_tobytes(k_out.bytes, &torsion_cleared_point); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +bool point_to_ed_y_derivatives(const rct::key &pub, EdYDerivatives &ed_y_derivatives) { + if (pub == rct::I) + return false; + fe y; + if (fe_frombytes_vartime(y, pub.bytes) != 0) + return false; + fe one; + fe_1(one); + // (1+y),(1-y) + fe_add(ed_y_derivatives.one_plus_y, one, y); + fe_sub(ed_y_derivatives.one_minus_y, one, y); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +void ed_y_derivatives_to_wei_x(const EdYDerivatives &pre_wei_x, rct::key &wei_x) { + fe inv_one_minus_y; + fe_invert(inv_one_minus_y, pre_wei_x.one_minus_y); + fe_ed_y_derivatives_to_wei_x(wei_x.bytes, inv_one_minus_y, pre_wei_x.one_plus_y); +} +//---------------------------------------------------------------------------------------------------------------------- +bool point_to_wei_x(const rct::key &pub, rct::key &wei_x) { + EdYDerivatives ed_y_derivatives; + if (!point_to_ed_y_derivatives(pub, ed_y_derivatives)) + return false; + ed_y_derivatives_to_wei_x(ed_y_derivatives, wei_x); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_crypto.h b/src/fcmp_pp/fcmp_pp_crypto.h new file mode 100644 index 0000000000..2c63ff2245 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_crypto.h @@ -0,0 +1,54 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +extern "C" +{ +#include "crypto/crypto-ops.h" +} +#include "ringct/rctTypes.h" + +namespace fcmp_pp +{ +//---------------------------------------------------------------------------------------------------------------------- +// Field elems needed to get wei x coord +struct EdYDerivatives final +{ + fe one_plus_y; + fe one_minus_y; +}; +//---------------------------------------------------------------------------------------------------------------------- +// TODO: tests for these functions +bool clear_torsion(const rct::key &k, rct::key &k_out); +bool point_to_ed_y_derivatives(const rct::key &pub, EdYDerivatives &ed_y_derivatives); +void ed_y_derivatives_to_wei_x(const EdYDerivatives &ed_y_derivatives, rct::key &wei_x); +bool point_to_wei_x(const rct::key &pub, rct::key &wei_x); +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace fcmp_pp diff --git a/src/fcmp_pp/fcmp_pp_rust/.gitignore b/src/fcmp_pp/fcmp_pp_rust/.gitignore new file mode 100644 index 0000000000..5a07b8927f --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/.gitignore @@ -0,0 +1,4 @@ +# If a developer runs cargo build inside this sub-directory to only work with +# the Rust side of things, they'll create this target directory which shouldn't +# be committed +target diff --git a/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt b/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt new file mode 100644 index 0000000000..646fc6981d --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/CMakeLists.txt @@ -0,0 +1,118 @@ +# Copyright (c) 2016-2024, The Monero Project +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of +# conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be +# used to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +if(ARCH_ID MATCHES "x86-64") + set(RUST_ARCH "x86_64") +elseif(ARCH_ID MATCHES "(arm64|armv8a|armv8-a)") + set(RUST_ARCH "aarch64") +elseif(ARCH_ID MATCHES "armv7-a") + set(RUST_ARCH "armv7") +elseif(ARCH_ID MATCHES "i386") + set(RUST_ARCH "i686") +elseif(ARCH_ID MATCHES "riscv64") + set(RUST_ARCH "riscv64gc") +else() + set(RUST_ARCH "${ARCH_ID}") +endif() + +if(MINGW) + set(RUST_PLATFORM "pc-windows") + set(RUST_TOOLCHAIN "-gnu") +elseif(MSVC) + set(RUST_PLATFORM "pc-windows") + set(RUST_TOOLCHAIN "-msvc") +elseif(APPLE) + set(RUST_PLATFORM "apple-darwin") + set(RUST_TOOLCHAIN "") +elseif(FREEBSD) + set(RUST_PLATFORM "unknown-freebsd") + set(RUST_TOOLCHAIN "") +elseif(OPENBSD) + set(RUST_PLATFORM "unknown-openbsd") + set(RUST_TOOLCHAIN "") +elseif(ANDROID) + set(RUST_PLATFORM "linux-android") + if(RUST_ARCH MATCHES "(arm|armv7)") + set(RUST_TOOLCHAIN "eabi") + else() + set(RUST_TOOLCHAIN "") + endif() +elseif(DRAGONFLY) + set(RUST_PLATFORM "unknown-dragonfly") + set(RUST_TOOLCHAIN "") +elseif(CMAKE_SYSTEM_NAME MATCHES "(SunOS|Solaris)") + if(RUST_ARCH MATCHES "x86_64") + set(RUST_PLATFORM "pc-solaris") + set(RUST_TOOLCHAIN "") + elseif(RUST_ARCH MATCHES "sparcv9") + set(RUST_PLATFORM "sun-solaris") + set(RUST_TOOLCHAIN "") + endif() +else() + set(RUST_PLATFORM "unknown-linux") + if(RUST_ARCH MATCHES "armv7") + # Rust does support non-HF, yet Monero assumes HF for armv7 + set(RUST_TOOLCHAIN "-gnueabihf") + else() + set(RUST_TOOLCHAIN "-gnu") + endif() +endif() + +set(RUST_TARGET "${RUST_ARCH}-${RUST_PLATFORM}${RUST_TOOLCHAIN}") + +if (CMAKE_BUILD_TYPE STREQUAL "Debug") + set(CARGO_CMD cargo build --target "${RUST_TARGET}") + set(TARGET_DIR "debug") +else () + set(CARGO_CMD cargo build --target "${RUST_TARGET}" --release) + set(TARGET_DIR "release") +endif () + +set(FCMP_PP_RUST_HEADER_DIR "${MONERO_GENERATED_HEADERS_DIR}/fcmp_pp_rust") +set(FCMP_PP_RUST_HEADER "${FCMP_PP_RUST_HEADER_DIR}/fcmp++.h") +set(FCMP_PP_RUST_LIB "${CMAKE_CURRENT_BINARY_DIR}/libfcmp_pp_rust.a") + +# Removing OUTPUT files makes sure custom command runs every time +file(REMOVE_RECURSE "${FCMP_PP_RUST_HEADER_DIR}") +file(MAKE_DIRECTORY "${FCMP_PP_RUST_HEADER_DIR}") + +file(REMOVE "${FCMP_PP_RUST_LIB}") + +add_custom_command( + COMMENT "Building fcmp++ rust lib" + OUTPUT ${FCMP_PP_RUST_HEADER} + OUTPUT ${FCMP_PP_RUST_LIB} + COMMAND CARGO_TARGET_DIR=${CMAKE_CURRENT_BINARY_DIR} ${CARGO_CMD} + COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/fcmp++.h ${FCMP_PP_RUST_HEADER} + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/${RUST_TARGET}/${TARGET_DIR}/libfcmp_pp_rust.a ${FCMP_PP_RUST_LIB} + COMMAND echo "Finished copying fcmp++ rust targets" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + VERBATIM +) + +add_custom_target(fcmp_pp_rust DEPENDS ${FCMP_PP_RUST_LIB}) diff --git a/src/fcmp_pp/fcmp_pp_rust/Cargo.lock b/src/fcmp_pp/fcmp_pp_rust/Cargo.lock new file mode 100644 index 0000000000..742b6570e8 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/Cargo.lock @@ -0,0 +1,804 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciphersuite" +version = "0.4.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "dalek-ff-group", + "digest", + "elliptic-curve", + "ff", + "flexible-transcript", + "group", + "helioselene", + "k256", + "minimal-ed448", + "p256", + "rand_core", + "sha2", + "sha3", + "std-shims", + "subtle", + "zeroize", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "git+https://github.com/kayabaNerve/crypto-bigint?branch=c-repr#78352771313f1e9b8e48abe5ce30d50d6bdd291d" +dependencies = [ + "generic-array 0.14.7", + "rand_core", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array 0.14.7", + "typenum", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "group", + "rand_core", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dalek-ff-group" +version = "0.4.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "crypto-bigint", + "curve25519-dalek", + "digest", + "ff", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "ec-divisors" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "dalek-ff-group", + "group", + "hex", + "rand_core", + "zeroize", +] + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "elliptic-curve", + "signature", + "spki", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array 0.14.7", + "group", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "tap", + "zeroize", +] + +[[package]] +name = "fcmp_pp_rust" +version = "0.0.0" +dependencies = [ + "ciphersuite", + "ec-divisors", + "full-chain-membership-proofs", + "generalized-bulletproofs", + "helioselene", + "monero-fcmp-plus-plus", + "std-shims", +] + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "bitvec", + "rand_core", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "flexible-transcript" +version = "0.3.2" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "digest", + "merlin", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "full-chain-membership-proofs" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "ciphersuite", + "ec-divisors", + "generalized-bulletproofs", + "generalized-bulletproofs-circuit-abstraction", + "generalized-bulletproofs-ec-gadgets", + "generic-array 1.1.0", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "generalized-bulletproofs" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "blake2", + "ciphersuite", + "multiexp", + "rand_core", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-circuit-abstraction" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-ec-gadgets" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs-circuit-abstraction", + "generic-array 1.1.0", +] + +[[package]] +name = "generalized-schnorr" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "flexible-transcript", + "multiexp", + "rand_core", + "std-shims", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "helioselene" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "crypto-bigint", + "dalek-ff-group", + "ec-divisors", + "ff", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "k256" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core", + "zeroize", +] + +[[package]] +name = "minimal-ed448" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "crypto-bigint", + "ff", + "generic-array 1.1.0", + "group", + "rand_core", + "rustversion", + "subtle", + "zeroize", +] + +[[package]] +name = "monero-fcmp-plus-plus" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ciphersuite", + "dalek-ff-group", + "ec-divisors", + "flexible-transcript", + "full-chain-membership-proofs", + "generalized-bulletproofs", + "generalized-bulletproofs-ec-gadgets", + "generalized-schnorr", + "generic-array 1.1.0", + "monero-generators", + "monero-io", + "monero-primitives", + "multiexp", + "rand_core", + "std-shims", + "zeroize", +] + +[[package]] +name = "monero-generators" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "dalek-ff-group", + "group", + "monero-io", + "sha3", + "std-shims", + "subtle", +] + +[[package]] +name = "monero-io" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "std-shims", +] + +[[package]] +name = "monero-primitives" +version = "0.1.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "sha3", + "std-shims", + "zeroize", +] + +[[package]] +name = "multiexp" +version = "0.4.0" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "ff", + "group", + "rand_core", + "rustversion", + "std-shims", + "zeroize", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "std-shims" +version = "0.1.1" +source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#529a3fe9c54b7f701d7978b3dc781ea27ba7d7e5" +dependencies = [ + "hashbrown", + "spin", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/src/fcmp_pp/fcmp_pp_rust/Cargo.toml b/src/fcmp_pp/fcmp_pp_rust/Cargo.toml new file mode 100644 index 0000000000..65a487a0c7 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "fcmp_pp_rust" +version = "0.0.0" +edition = "2021" + +[lib] +name = "fcmp_pp_rust" +crate-type = ["staticlib"] + +[dependencies] +std-shims = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + +helioselene = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } +ciphersuite = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519", "helioselene"] } + +generalized-bulletproofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["tests"] } + +ec-divisors = { git = "https://github.com/kayabaNerve/fcmp-plus-plus", features = ["ed25519"] } +full-chain-membership-proofs = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + +monero-fcmp-plus-plus = { git = "https://github.com/kayabaNerve/fcmp-plus-plus" } + +[patch.crates-io] +crypto-bigint = { git = "https://github.com/kayabaNerve/crypto-bigint", branch = "c-repr" } + +[profile.dev] +lto = "off" +panic = "abort" + +[profile.release] +lto = "off" +panic = "abort" diff --git a/src/fcmp_pp/fcmp_pp_rust/fcmp++.h b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h new file mode 100644 index 0000000000..81f7d02829 --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/fcmp++.h @@ -0,0 +1,142 @@ +#include +#include +#include +#include +#include + + +namespace fcmp_pp_rust +{ +// ----- deps C bindings ----- + +/// Inner integer type that the [`Limb`] newtype wraps. +// TODO: test 32-bit platforms +using Word = uintptr_t; + +/// Big integers are represented as an array of smaller CPU word-size integers +/// called "limbs". +using Limb = Word; + + +/// Stack-allocated big unsigned integer. +/// +/// Generic over the given number of `LIMBS` +/// +/// # Encoding support +/// This type supports many different types of encodings, either via the +/// [`Encoding`][`crate::Encoding`] trait or various `const fn` decoding and +/// encoding functions that can be used with [`Uint`] constants. +/// +/// Optional crate features for encoding (off-by-default): +/// - `generic-array`: enables [`ArrayEncoding`][`crate::ArrayEncoding`] trait which can be used to +/// [`Uint`] as `GenericArray` and a [`ArrayDecoding`][`crate::ArrayDecoding`] trait which +/// can be used to `GenericArray` as [`Uint`]. +/// - `rlp`: support for [Recursive Length Prefix (RLP)][RLP] encoding. +/// +/// [RLP]: https://eth.wiki/fundamentals/rlp +template +struct Uint { + /// Inner limb array. Stored from least significant to most significant. + Limb limbs[LIMBS]; +}; + + +/// A residue mod `MOD`, represented using `LIMBS` limbs. The modulus of this residue is constant, so it cannot be set at runtime. +/// Internally, the value is stored in Montgomery form (multiplied by MOD::R) until it is retrieved. +template +struct Residue { + Uint montgomery_form; +}; + + +/// A constant-time implementation of the Ed25519 field. +struct SeleneScalar { + Residue<32 / sizeof(uintptr_t)> _0; +}; +static_assert(sizeof(SeleneScalar) == 32, "unexpected size of selene scalar"); + + +/// The field novel to Helios/Selene. +struct HeliosScalar { + Residue<32 / sizeof(uintptr_t)> _0; +}; +static_assert(sizeof(HeliosScalar) == 32, "unexpected size of helios scalar"); + +struct HeliosPoint { + SeleneScalar x; + SeleneScalar y; + SeleneScalar z; +}; + +struct SelenePoint { + HeliosScalar x; + HeliosScalar y; + HeliosScalar z; +}; + +// ----- End deps C bindings ----- + +struct CResult { + void* value; + void* err; +}; + +template +struct Slice { + const T *buf; + uintptr_t len; +}; + +using HeliosScalarSlice = Slice; + +using SeleneScalarSlice = Slice; + +extern "C" { +HeliosPoint helios_hash_init_point(); + +SelenePoint selene_hash_init_point(); + +uint8_t *helios_scalar_to_bytes(HeliosScalar helios_scalar); + +uint8_t *selene_scalar_to_bytes(SeleneScalar selene_scalar); + +uint8_t *helios_point_to_bytes(HeliosPoint helios_point); + +uint8_t *selene_point_to_bytes(SelenePoint selene_point); + +HeliosPoint helios_point_from_bytes(const uint8_t *helios_point_bytes); + +SelenePoint selene_point_from_bytes(const uint8_t *selene_point_bytes); + +SeleneScalar selene_scalar_from_bytes(const uint8_t *selene_scalar_bytes); + +HeliosScalar selene_point_to_helios_scalar(SelenePoint selene_point); + +SeleneScalar helios_point_to_selene_scalar(HeliosPoint helios_point); + +HeliosScalar helios_zero_scalar(); + +SeleneScalar selene_zero_scalar(); + +CResult hash_grow_helios(HeliosPoint existing_hash, + uintptr_t offset, + HeliosScalar existing_child_at_offset, + HeliosScalarSlice new_children); + +CResult hash_trim_helios(HeliosPoint existing_hash, + uintptr_t offset, + HeliosScalarSlice children, + HeliosScalar child_to_grow_back); + +CResult hash_grow_selene(SelenePoint existing_hash, + uintptr_t offset, + SeleneScalar existing_child_at_offset, + SeleneScalarSlice new_children); + +CResult hash_trim_selene(SelenePoint existing_hash, + uintptr_t offset, + SeleneScalarSlice children, + SeleneScalar child_to_grow_back); + +} // extern "C" +}//namespace fcmp_pp_rust diff --git a/src/fcmp_pp/fcmp_pp_rust/src/lib.rs b/src/fcmp_pp/fcmp_pp_rust/src/lib.rs new file mode 100644 index 0000000000..4d9c37c75f --- /dev/null +++ b/src/fcmp_pp/fcmp_pp_rust/src/lib.rs @@ -0,0 +1,232 @@ +use ciphersuite::{ + group::{ + ff::{Field, PrimeField}, + GroupEncoding, + }, + Ciphersuite, Helios, Selene, +}; +use helioselene::{ + Field25519 as SeleneScalar, HeliosPoint, HelioseleneField as HeliosScalar, SelenePoint, +}; + +use ec_divisors::DivisorCurve; +use full_chain_membership_proofs::tree::{hash_grow, hash_trim}; + +use monero_fcmp_plus_plus::{HELIOS_HASH_INIT, SELENE_HASH_INIT, HELIOS_GENERATORS, SELENE_GENERATORS}; + +// TODO: Use a macro to de-duplicate some of of this code + +#[no_mangle] +pub extern "C" fn helios_hash_init_point() -> HeliosPoint { + HELIOS_HASH_INIT() +} + +#[no_mangle] +pub extern "C" fn selene_hash_init_point() -> SelenePoint { + SELENE_HASH_INIT() +} + +fn c_u8_32(bytes: [u8; 32]) -> *const u8 { + let arr_ptr = Box::into_raw(Box::new(bytes)); + arr_ptr as *const u8 +} + +#[no_mangle] +pub extern "C" fn helios_scalar_to_bytes(helios_scalar: HeliosScalar) -> *const u8 { + c_u8_32(helios_scalar.to_repr()) +} + +#[no_mangle] +pub extern "C" fn selene_scalar_to_bytes(selene_scalar: SeleneScalar) -> *const u8 { + c_u8_32(selene_scalar.to_repr()) +} + +#[no_mangle] +pub extern "C" fn helios_point_to_bytes(helios_point: HeliosPoint) -> *const u8 { + c_u8_32(helios_point.to_bytes()) +} + +#[no_mangle] +pub extern "C" fn selene_point_to_bytes(selene_point: SelenePoint) -> *const u8 { + c_u8_32(selene_point.to_bytes()) +} + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn helios_point_from_bytes(helios_point: *const u8) -> HeliosPoint { + let mut helios_point = unsafe { core::slice::from_raw_parts(helios_point, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_G(&mut helios_point).unwrap() +} + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn selene_point_from_bytes(selene_point: *const u8) -> SelenePoint { + let mut selene_point = unsafe { core::slice::from_raw_parts(selene_point, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_G(&mut selene_point).unwrap() +} + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +#[no_mangle] +pub extern "C" fn selene_scalar_from_bytes(selene_scalar: *const u8) -> SeleneScalar { + let mut selene_scalar = unsafe { core::slice::from_raw_parts(selene_scalar, 32) }; + // TODO: Return an error here (instead of unwrapping) + ::read_F(&mut selene_scalar).unwrap() +} + +#[no_mangle] +pub extern "C" fn selene_point_to_helios_scalar(selene_point: SelenePoint) -> HeliosScalar { + let xy_coords = SelenePoint::to_xy(selene_point); + // TODO: Return an error here (instead of unwrapping) + let x: HeliosScalar = xy_coords.unwrap().0; + x +} + +#[no_mangle] +pub extern "C" fn helios_point_to_selene_scalar(helios_point: HeliosPoint) -> SeleneScalar { + let xy_coords = HeliosPoint::to_xy(helios_point); + // TODO: Return an error here (instead of unwrapping) + let x: SeleneScalar = xy_coords.unwrap().0; + x +} + +#[no_mangle] +pub extern "C" fn helios_zero_scalar() -> HeliosScalar { + HeliosScalar::ZERO +} + +#[no_mangle] +pub extern "C" fn selene_zero_scalar() -> SeleneScalar { + SeleneScalar::ZERO +} + +#[repr(C)] +pub struct Slice { + buf: *const T, + len: usize, +} +pub type HeliosScalarSlice = Slice; +pub type SeleneScalarSlice = Slice; +impl<'a, T> From> for &'a [T] { + fn from(slice: Slice) -> Self { + unsafe { core::slice::from_raw_parts(slice.buf, slice.len) } + } +} + +#[repr(C)] +pub struct CResult { + value: *const T, + err: *const E, +} +impl CResult { + fn ok(value: T) -> Self { + CResult { + value: Box::into_raw(Box::new(value)), + err: core::ptr::null(), + } + } + fn err(err: E) -> Self { + CResult { + value: core::ptr::null(), + err: Box::into_raw(Box::new(err)), + } + } +} + +#[no_mangle] +pub extern "C" fn hash_grow_helios( + existing_hash: HeliosPoint, + offset: usize, + existing_child_at_offset: HeliosScalar, + new_children: HeliosScalarSlice, +) -> CResult { + let hash = hash_grow( + HELIOS_GENERATORS(), + existing_hash, + offset, + existing_child_at_offset, + new_children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +#[no_mangle] +pub extern "C" fn hash_trim_helios( + existing_hash: HeliosPoint, + offset: usize, + children: HeliosScalarSlice, + child_to_grow_back: HeliosScalar, +) -> CResult { + let hash = hash_trim( + HELIOS_GENERATORS(), + existing_hash, + offset, + children.into(), + child_to_grow_back, + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +#[no_mangle] +pub extern "C" fn hash_grow_selene( + existing_hash: SelenePoint, + offset: usize, + existing_child_at_offset: SeleneScalar, + new_children: SeleneScalarSlice, +) -> CResult { + let hash = hash_grow( + SELENE_GENERATORS(), + existing_hash, + offset, + existing_child_at_offset, + new_children.into(), + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +#[no_mangle] +pub extern "C" fn hash_trim_selene( + existing_hash: SelenePoint, + offset: usize, + children: SeleneScalarSlice, + child_to_grow_back: SeleneScalar, +) -> CResult { + let hash = hash_trim( + SELENE_GENERATORS(), + existing_hash, + offset, + children.into(), + child_to_grow_back, + ); + + if let Some(hash) = hash { + CResult::ok(hash) + } else { + // TODO: return defined error here: https://github.com/monero-project/monero/pull/9436#discussion_r1720477391 + CResult::err(()) + } +} + +// https://github.com/rust-lang/rust/issues/79609 +#[cfg(all(target_os = "windows", target_arch = "x86"))] +#[no_mangle] +pub extern "C" fn _Unwind_Resume() {} diff --git a/src/fcmp_pp/proof.h b/src/fcmp_pp/proof.h new file mode 100644 index 0000000000..24f91fae9e --- /dev/null +++ b/src/fcmp_pp/proof.h @@ -0,0 +1,46 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include + +namespace fcmp_pp +{ + +// Byte buffer containing the fcmp++ proof +using FcmpPpProof = std::vector; + +static inline std::size_t proof_len(const std::size_t n_inputs, const uint8_t curve_trees_tree_depth) +{ + // TODO: implement + static_assert(sizeof(std::size_t) >= sizeof(uint8_t), "unexpected size of size_t"); + return n_inputs * (std::size_t)curve_trees_tree_depth * 2; +}; + +}//namespace fcmp_pp diff --git a/src/fcmp_pp/tower_cycle.cpp b/src/fcmp_pp/tower_cycle.cpp new file mode 100644 index 0000000000..9cb35af1f4 --- /dev/null +++ b/src/fcmp_pp/tower_cycle.cpp @@ -0,0 +1,283 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "string_tools.h" +#include "tower_cycle.h" + +namespace fcmp_pp +{ +namespace tower_cycle +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_init_point() const +{ + return fcmp_pp_rust::helios_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_init_point() const +{ + return fcmp_pp_rust::selene_hash_init_point(); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::CycleScalar Helios::point_to_cycle_scalar(const Helios::Point &point) const +{ + return fcmp_pp_rust::helios_point_to_selene_scalar(point); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::CycleScalar Selene::point_to_cycle_scalar(const Selene::Point &point) const +{ + return fcmp_pp_rust::selene_point_to_helios_scalar(point); +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_grow( + const Helios::Point &existing_hash, + const std::size_t offset, + const Helios::Scalar &existing_child_at_offset, + const Helios::Chunk &new_children) const +{ + auto result = fcmp_pp_rust::hash_grow_helios( + existing_hash, + offset, + existing_child_at_offset, + new_children); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash grow"); + } + + typename Helios::Point res; + memcpy(&res, result.value, sizeof(typename Helios::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::hash_trim( + const Helios::Point &existing_hash, + const std::size_t offset, + const Helios::Chunk &children, + const Helios::Scalar &child_to_grow_back) const +{ + auto result = fcmp_pp_rust::hash_trim_helios( + existing_hash, + offset, + children, + child_to_grow_back); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash trim"); + } + + typename Helios::Point res; + memcpy(&res, result.value, sizeof(typename Helios::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_grow( + const Selene::Point &existing_hash, + const std::size_t offset, + const Selene::Scalar &existing_child_at_offset, + const Selene::Chunk &new_children) const +{ + auto result = fcmp_pp_rust::hash_grow_selene( + existing_hash, + offset, + existing_child_at_offset, + new_children); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash grow"); + } + + typename Selene::Point res; + memcpy(&res, result.value, sizeof(typename Selene::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::hash_trim( + const Selene::Point &existing_hash, + const std::size_t offset, + const Selene::Chunk &children, + const Selene::Scalar &child_to_grow_back) const +{ + auto result = fcmp_pp_rust::hash_trim_selene( + existing_hash, + offset, + children, + child_to_grow_back); + + if (result.err != nullptr) + { + free(result.err); + throw std::runtime_error("failed to hash trim"); + } + + typename Selene::Point res; + memcpy(&res, result.value, sizeof(typename Selene::Point)); + free(result.value); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Scalar Helios::zero_scalar() const +{ + return fcmp_pp_rust::helios_zero_scalar(); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Scalar Selene::zero_scalar() const +{ + return fcmp_pp_rust::selene_zero_scalar(); +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Helios::to_bytes(const Helios::Scalar &scalar) const +{ + auto bytes = fcmp_pp_rust::helios_scalar_to_bytes(scalar); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Selene::to_bytes(const Selene::Scalar &scalar) const +{ + auto bytes = fcmp_pp_rust::selene_scalar_to_bytes(scalar); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Helios::to_bytes(const Helios::Point &point) const +{ + auto bytes = fcmp_pp_rust::helios_point_to_bytes(point); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array Selene::to_bytes(const Selene::Point &point) const +{ + auto bytes = fcmp_pp_rust::selene_point_to_bytes(point); + std::array res; + memcpy(&res, bytes, 32); + free(bytes); + return res; +} +//---------------------------------------------------------------------------------------------------------------------- +Helios::Point Helios::from_bytes(const std::array &bytes) const +{ + return fcmp_pp_rust::helios_point_from_bytes(bytes.data()); +} +//---------------------------------------------------------------------------------------------------------------------- +Selene::Point Selene::from_bytes(const std::array &bytes) const +{ + return fcmp_pp_rust::selene_point_from_bytes(bytes.data()); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Helios::to_string(const typename Helios::Scalar &scalar) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(scalar)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Selene::to_string(const typename Selene::Scalar &scalar) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(scalar)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Helios::to_string(const typename Helios::Point &point) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(point)); +} +//---------------------------------------------------------------------------------------------------------------------- +std::string Selene::to_string(const typename Selene::Point &point) const +{ + return epee::string_tools::pod_to_hex(this->to_bytes(point)); +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Exposed helper functions +//---------------------------------------------------------------------------------------------------------------------- +SeleneScalar selene_scalar_from_bytes(const rct::key &scalar) +{ + return fcmp_pp_rust::selene_scalar_from_bytes(scalar.bytes); +} +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout) +{ + zeroes_inout.reserve(zeroes_inout.size() + num_zeroes); + + for (std::size_t i = 0; i < num_zeroes; ++i) + zeroes_inout.emplace_back(curve->zero_scalar()); +} + +// Explicit instantiations +template void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); + +template void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out) +{ + scalars_out.reserve(scalars_out.size() + points.size()); + + for (const auto &point : points) + { + typename C_SCALARS::Scalar scalar = curve->point_to_cycle_scalar(point); + scalars_out.push_back(std::move(scalar)); + } +} + +// Explicit instantiations +template void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out); + +template void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out); +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +} //namespace tower_cycle +} //namespace fcmp_pp diff --git a/src/fcmp_pp/tower_cycle.h b/src/fcmp_pp/tower_cycle.h new file mode 100644 index 0000000000..8ab69f902b --- /dev/null +++ b/src/fcmp_pp/tower_cycle.h @@ -0,0 +1,191 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "crypto/crypto.h" +#include "fcmp_pp_rust/fcmp++.h" +#include "ringct/rctTypes.h" + +#include + +namespace fcmp_pp +{ +namespace tower_cycle +{ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Rust types +//---------------------------------------------------------------------------------------------------------------------- +// Need to forward declare Scalar types for point_to_cycle_scalar below +using SeleneScalar = fcmp_pp_rust::SeleneScalar; +using HeliosScalar = fcmp_pp_rust::HeliosScalar; +//---------------------------------------------------------------------------------------------------------------------- +struct HeliosT final +{ + using Scalar = HeliosScalar; + using Point = fcmp_pp_rust::HeliosPoint; + using Chunk = fcmp_pp_rust::HeliosScalarSlice; + using CycleScalar = SeleneScalar; +}; +//---------------------------------------------------------------------------------------------------------------------- +struct SeleneT final +{ + using Scalar = SeleneScalar; + using Point = fcmp_pp_rust::SelenePoint; + using Chunk = fcmp_pp_rust::SeleneScalarSlice; + using CycleScalar = HeliosScalar; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Abstract parent curve class that curves in a cycle must implement +template +class Curve +{ +//member functions +public: + virtual typename C::Point hash_init_point() const = 0; + + // Read the x-coordinate from this curve's point to get this curve's cycle scalar + virtual typename C::CycleScalar point_to_cycle_scalar(const typename C::Point &point) const = 0; + + virtual typename C::Point hash_grow( + const typename C::Point &existing_hash, + const std::size_t offset, + const typename C::Scalar &existing_child_at_offset, + const typename C::Chunk &new_children) const = 0; + + virtual typename C::Point hash_trim( + const typename C::Point &existing_hash, + const std::size_t offset, + const typename C::Chunk &children, + const typename C::Scalar &child_to_grow_back) const = 0; + + virtual typename C::Scalar zero_scalar() const = 0; + + virtual std::array to_bytes(const typename C::Scalar &scalar) const = 0; + virtual std::array to_bytes(const typename C::Point &point) const = 0; + + virtual typename C::Point from_bytes(const std::array &bytes) const = 0; + + virtual std::string to_string(const typename C::Scalar &scalar) const = 0; + virtual std::string to_string(const typename C::Point &point) const = 0; +}; +//---------------------------------------------------------------------------------------------------------------------- +class Helios final : public Curve +{ +//typedefs +public: + using Scalar = HeliosT::Scalar; + using Point = HeliosT::Point; + using Chunk = HeliosT::Chunk; + using CycleScalar = HeliosT::CycleScalar; + +//member functions +public: + Point hash_init_point() const override; + + CycleScalar point_to_cycle_scalar(const Point &point) const override; + + Point hash_grow( + const Point &existing_hash, + const std::size_t offset, + const Scalar &existing_child_at_offset, + const Chunk &new_children) const override; + + Point hash_trim( + const Point &existing_hash, + const std::size_t offset, + const Chunk &children, + const Scalar &child_to_grow_back) const override; + + Scalar zero_scalar() const override; + + std::array to_bytes(const Scalar &scalar) const override; + std::array to_bytes(const Point &point) const override; + + Point from_bytes(const std::array &bytes) const override; + + std::string to_string(const Scalar &scalar) const override; + std::string to_string(const Point &point) const override; +}; +//---------------------------------------------------------------------------------------------------------------------- +class Selene final : public Curve +{ +//typedefs +public: + using Scalar = SeleneT::Scalar; + using Point = SeleneT::Point; + using Chunk = SeleneT::Chunk; + using CycleScalar = SeleneT::CycleScalar; + +//member functions +public: + Point hash_init_point() const override; + + CycleScalar point_to_cycle_scalar(const Point &point) const override; + + Point hash_grow( + const Point &existing_hash, + const std::size_t offset, + const Scalar &existing_child_at_offset, + const Chunk &new_children) const override; + + Point hash_trim( + const Point &existing_hash, + const std::size_t offset, + const Chunk &children, + const Scalar &child_to_grow_back) const override; + + Scalar zero_scalar() const override; + + std::array to_bytes(const Scalar &scalar) const override; + std::array to_bytes(const Point &point) const override; + + Point from_bytes(const std::array &bytes) const override; + + std::string to_string(const Scalar &scalar) const override; + std::string to_string(const Point &point) const override; +}; +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +SeleneScalar selene_scalar_from_bytes(const rct::key &scalar); +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_zeroes(const std::unique_ptr &curve, + const std::size_t num_zeroes, + std::vector &zeroes_inout); +//---------------------------------------------------------------------------------------------------------------------- +template +void extend_scalars_from_cycle_points(const std::unique_ptr &curve, + const std::vector &points, + std::vector &scalars_out); +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +}//namespace tower_cycle +}//namespace fcmp_pp diff --git a/src/ringct/rctSigs.cpp b/src/ringct/rctSigs.cpp index 2d92ba05d4..c96bc7bbaa 100644 --- a/src/ringct/rctSigs.cpp +++ b/src/ringct/rctSigs.cpp @@ -47,8 +47,7 @@ using namespace std; #define CHECK_AND_ASSERT_MES_L1(expr, ret, message) {if(!(expr)) {MCERROR("verify", message); return ret;}} -namespace -{ +namespace rct { rct::Bulletproof make_dummy_bulletproof(const std::vector &outamounts, rct::keyV &C, rct::keyV &masks) { const size_t n_outs = outamounts.size(); @@ -117,9 +116,7 @@ namespace const size_t n_scalars = ring_size; return rct::clsag{rct::keyV(n_scalars, I), I, I, I}; } -} -namespace rct { Bulletproof proveRangeBulletproof(keyV &C, keyV &masks, const std::vector &amounts, epee::span sk, hw::device &hwdev) { CHECK_AND_ASSERT_THROW_MES(amounts.size() == sk.size(), "Invalid amounts/sk sizes"); diff --git a/src/ringct/rctSigs.h b/src/ringct/rctSigs.h index 035d866d69..af533e4950 100644 --- a/src/ringct/rctSigs.h +++ b/src/ringct/rctSigs.h @@ -64,6 +64,10 @@ namespace hw { namespace rct { + // helpers for mock txs + Bulletproof make_dummy_bulletproof(const std::vector &outamounts, keyV &C, keyV &masks); + BulletproofPlus make_dummy_bulletproof_plus(const std::vector &outamounts, keyV &C, keyV &masks); + clsag make_dummy_clsag(size_t ring_size); boroSig genBorromean(const key64 x, const key64 P1, const key64 P2, const bits indices); bool verifyBorromean(const boroSig &bb, const key64 P1, const key64 P2); diff --git a/src/ringct/rctTypes.h b/src/ringct/rctTypes.h index 247f25fffb..946f520a2c 100644 --- a/src/ringct/rctTypes.h +++ b/src/ringct/rctTypes.h @@ -45,7 +45,7 @@ extern "C" { } #include "crypto/generic-ops.h" #include "crypto/crypto.h" - +#include "fcmp_pp/proof.h" #include "hex.h" #include "span.h" #include "memwipe.h" @@ -84,6 +84,7 @@ namespace rct { return bytes[i]; } bool operator==(const key &k) const { return !crypto_verify_32(bytes, k.bytes); } + bool operator!=(const key &k) const { return crypto_verify_32(bytes, k.bytes); } unsigned char bytes[32]; }; typedef std::vector keyV; //vector of keys @@ -303,6 +304,7 @@ namespace rct { RCTTypeBulletproof2 = 4, RCTTypeCLSAG = 5, RCTTypeBulletproofPlus = 6, + RCTTypeFcmpPlusPlus = 7, }; enum RangeProofType { RangeProofBorromean, RangeProofBulletproof, RangeProofMultiOutputBulletproof, RangeProofPaddedBulletproof }; struct RCTConfig { @@ -324,9 +326,10 @@ namespace rct { std::vector ecdhInfo; ctkeyV outPk; xmr_amount txnFee; // contains b + crypto::hash referenceBlock; // block containing the merkle tree root used for fcmp++ rctSigBase() : - type(RCTTypeNull), message{}, mixRing{}, pseudoOuts{}, ecdhInfo{}, outPk{}, txnFee(0) + type(RCTTypeNull), message{}, mixRing{}, pseudoOuts{}, ecdhInfo{}, outPk{}, txnFee(0), referenceBlock{} {} template class Archive> @@ -335,7 +338,7 @@ namespace rct { FIELD(type) if (type == RCTTypeNull) return ar.good(); - if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus) + if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus && type != RCTTypeFcmpPlusPlus) return false; VARINT_FIELD(txnFee) // inputs/outputs not saved, only here for serialization help @@ -364,7 +367,7 @@ namespace rct { return false; for (size_t i = 0; i < outputs; ++i) { - if (type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { // Since RCTTypeBulletproof2 enote types, we don't serialize the blinding factor, and only serialize the // first 8 bytes of ecdhInfo[i].amount @@ -400,6 +403,8 @@ namespace rct { ar.delimit_array(); } ar.end_array(); + if (type == RCTTypeFcmpPlusPlus) + FIELD(referenceBlock) return ar.good(); } @@ -411,6 +416,7 @@ namespace rct { FIELD(ecdhInfo) FIELD(outPk) VARINT_FIELD(txnFee) + FIELD(referenceBlock) END_SERIALIZE() }; struct rctSigPrunable { @@ -420,6 +426,8 @@ namespace rct { std::vector MGs; // simple rct has N, full has 1 std::vector CLSAGs; keyV pseudoOuts; //C - for simple rct + uint8_t curve_trees_tree_depth; // for fcmp++ + fcmp_pp::FcmpPpProof fcmp_pp; // when changing this function, update cryptonote::get_pruned_transaction_weight template class Archive> @@ -433,9 +441,9 @@ namespace rct { return false; if (type == RCTTypeNull) return ar.good(); - if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus) + if (type != RCTTypeFull && type != RCTTypeSimple && type != RCTTypeBulletproof && type != RCTTypeBulletproof2 && type != RCTTypeCLSAG && type != RCTTypeBulletproofPlus && type != RCTTypeFcmpPlusPlus) return false; - if (type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { uint32_t nbp = bulletproofs_plus.size(); VARINT_FIELD(nbp) @@ -492,7 +500,22 @@ namespace rct { ar.end_array(); } - if (type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeFcmpPlusPlus) + { + FIELD(curve_trees_tree_depth) + ar.tag("fcmp_pp"); + ar.begin_object(); + const std::size_t proof_len = fcmp_pp::proof_len(inputs, curve_trees_tree_depth); + if (!typename Archive::is_saving()) + fcmp_pp.resize(proof_len); + if (fcmp_pp.size() != proof_len) + return false; + ar.serialize_blob(fcmp_pp.data(), proof_len); + if (!ar.good()) + return false; + ar.end_object(); + } + else if (type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) { ar.tag("CLSAGs"); ar.begin_array(); @@ -583,7 +606,7 @@ namespace rct { } ar.end_array(); } - if (type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus) + if (type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus) { ar.tag("pseudoOuts"); ar.begin_array(); @@ -607,6 +630,8 @@ namespace rct { FIELD(bulletproofs_plus) FIELD(MGs) FIELD(CLSAGs) + FIELD(curve_trees_tree_depth) + FIELD(fcmp_pp) FIELD(pseudoOuts) END_SERIALIZE() }; @@ -615,12 +640,12 @@ namespace rct { keyV& get_pseudo_outs() { - return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus ? p.pseudoOuts : pseudoOuts; + return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus ? p.pseudoOuts : pseudoOuts; } keyV const& get_pseudo_outs() const { - return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus ? p.pseudoOuts : pseudoOuts; + return type == RCTTypeBulletproof || type == RCTTypeBulletproof2 || type == RCTTypeCLSAG || type == RCTTypeBulletproofPlus || type == RCTTypeFcmpPlusPlus ? p.pseudoOuts : pseudoOuts; } BEGIN_SERIALIZE_OBJECT() @@ -740,6 +765,7 @@ namespace rct { static inline const rct::key &sk2rct(const crypto::secret_key &sk) { return (const rct::key&)sk; } static inline const rct::key &ki2rct(const crypto::key_image &ki) { return (const rct::key&)ki; } static inline const rct::key &hash2rct(const crypto::hash &h) { return (const rct::key&)h; } + static inline const rct::key &pt2rct(const crypto::ec_point &pt) { return (const rct::key&)pt; } static inline const crypto::public_key &rct2pk(const rct::key &k) { return (const crypto::public_key&)k; } static inline const crypto::secret_key &rct2sk(const rct::key &k) { return (const crypto::secret_key&)k; } static inline const crypto::key_image &rct2ki(const rct::key &k) { return (const crypto::key_image&)k; } diff --git a/src/rpc/core_rpc_server.cpp b/src/rpc/core_rpc_server.cpp index 7fcd1e6d7c..46df1bcede 100644 --- a/src/rpc/core_rpc_server.cpp +++ b/src/rpc/core_rpc_server.cpp @@ -1288,6 +1288,16 @@ namespace cryptonote res.status = "Failed"; return true; } + + std::vector key_images_y; + key_images_y.reserve(key_images.size()); + for (const auto &ki : key_images) + { + crypto::key_image_y ki_y; + crypto::key_image_to_y(ki, ki_y); + key_images_y.emplace_back(std::move(ki_y)); + } + for (std::vector::const_iterator i = ki.begin(); i != ki.end(); ++i) { crypto::hash hash; @@ -1295,11 +1305,13 @@ namespace cryptonote if (parse_hash256(i->id_hash, hash)) { memcpy(&spent_key_image, &hash, sizeof(hash)); // a bit dodgy, should be other parse functions somewhere + crypto::key_image_y spent_key_image_y; + crypto::key_image_to_y(spent_key_image, spent_key_image_y); for (size_t n = 0; n < res.spent_status.size(); ++n) { if (res.spent_status[n] == COMMAND_RPC_IS_KEY_IMAGE_SPENT::UNSPENT) { - if (key_images[n] == spent_key_image) + if (key_images_y[n] == spent_key_image_y) { res.spent_status[n] = COMMAND_RPC_IS_KEY_IMAGE_SPENT::SPENT_IN_POOL; break; diff --git a/src/rpc/message_data_structs.h b/src/rpc/message_data_structs.h index 3fff923b77..f808f0b19c 100644 --- a/src/rpc/message_data_structs.h +++ b/src/rpc/message_data_structs.h @@ -104,7 +104,7 @@ namespace rpc bool double_spend_seen; }; - typedef std::unordered_map > key_images_with_tx_hashes; + typedef std::unordered_map > key_images_with_tx_hashes; struct output_amount_count { diff --git a/tests/block_weight/block_weight.cpp b/tests/block_weight/block_weight.cpp index 44ccf1e646..31f5ad1046 100644 --- a/tests/block_weight/block_weight.cpp +++ b/tests/block_weight/block_weight.cpp @@ -32,6 +32,7 @@ #include #include "cryptonote_core/cryptonote_core.h" #include "blockchain_db/testdb.h" +#include "fcmp_pp/curve_trees.h" #define LONG_TERM_BLOCK_WEIGHT_WINDOW 5000 @@ -64,6 +65,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/core_tests/chaingen.cpp b/tests/core_tests/chaingen.cpp index 05a6ce1f90..78c0927159 100644 --- a/tests/core_tests/chaingen.cpp +++ b/tests/core_tests/chaingen.cpp @@ -88,6 +88,7 @@ namespace , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({blk, blk_hash}); @@ -171,7 +172,7 @@ static std::unique_ptr init_blockchain(const std: const block *blk = &boost::get(ev); auto blk_hash = get_block_hash(*blk); - bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash); + bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash, {}); } bool r = bap->blockchain.init(bdb, nettype, true, test_options, 2, nullptr); diff --git a/tests/crypto/main.cpp b/tests/crypto/main.cpp index 668c04ea10..a2e3a96b58 100644 --- a/tests/crypto/main.cpp +++ b/tests/crypto/main.cpp @@ -277,6 +277,24 @@ int main(int argc, char *argv[]) { if (expected != actual) { goto error; } + } else if (cmd == "key_image_to_y") { + key_image ki; + key_image_y expected_ki_y, actual_ki_y; + bool expected_sign, actual_sign; + get(input, ki, expected_ki_y, expected_sign); + actual_sign = key_image_to_y(ki, actual_ki_y); + if (expected_ki_y != actual_ki_y || expected_sign != actual_sign) { + goto error; + } + } else if (cmd == "key_image_from_y") { + key_image_y ki_y; + bool sign; + key_image expected_ki, actual_ki; + get(input, ki_y, sign, expected_ki); + key_image_from_y(ki_y, sign, actual_ki); + if (expected_ki != actual_ki) { + goto error; + } } else { throw ios_base::failure("Unknown function: " + cmd); } diff --git a/tests/crypto/tests.txt b/tests/crypto/tests.txt index 32e3b2d090..9e48c8b28d 100644 --- a/tests/crypto/tests.txt +++ b/tests/crypto/tests.txt @@ -5543,3 +5543,203 @@ derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 15 00 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 127 a6 derive_view_tag 8edfabada2b24ef4d8d915826c9ff0245910e4b835b59c2cf8ed8fc991b2e1e8 128 0d +key_image_to_y fefdcf401bcf85b3b744e7c9f6af8ea0e181c799b1ec0f1c887cf77df085051d fefdcf401bcf85b3b744e7c9f6af8ea0e181c799b1ec0f1c887cf77df085051d false +key_image_to_y af6ce7761f2062d7f6f1f7158e4448989b459dfa1d6df35db12360e2322aab1b af6ce7761f2062d7f6f1f7158e4448989b459dfa1d6df35db12360e2322aab1b false +key_image_to_y f2e73a432004eeac96746e43885021ec1fc2a59d11a5cb17e0757aedc8bc2a2e f2e73a432004eeac96746e43885021ec1fc2a59d11a5cb17e0757aedc8bc2a2e false +key_image_to_y b6692eb3436a670837ce2ed2a580ed18a62eaa1a7c7c515882e8a6a6e3416867 b6692eb3436a670837ce2ed2a580ed18a62eaa1a7c7c515882e8a6a6e3416867 false +key_image_to_y b405de8162d9d3b2f89588e374aa0efde8bfd9b9f848cf2b9831258d776a3512 b405de8162d9d3b2f89588e374aa0efde8bfd9b9f848cf2b9831258d776a3512 false +key_image_to_y 0b5a7872f28930d1384dbf75c41c06ff9254d807507cbfacbba4e8ae71191f4b 0b5a7872f28930d1384dbf75c41c06ff9254d807507cbfacbba4e8ae71191f4b false +key_image_to_y d947f1c89ec5de075b3c987d69ccd54b5a91eb78d7708e876b6968537bd5f877 d947f1c89ec5de075b3c987d69ccd54b5a91eb78d7708e876b6968537bd5f877 false +key_image_to_y f995fb5b59cec97eef860b27c1cf8c30d5da21324ab351e3bb62a97712a22830 f995fb5b59cec97eef860b27c1cf8c30d5da21324ab351e3bb62a97712a22830 false +key_image_to_y 07c2fa33abba6add8c34b43bbef8d4cc5fb515f876c7d06440a676e9d68fee36 07c2fa33abba6add8c34b43bbef8d4cc5fb515f876c7d06440a676e9d68fee36 false +key_image_to_y 96b01ba882e094e90fb5ae63134da1c78be4e62c57f1eb121bef2789f5fe3923 96b01ba882e094e90fb5ae63134da1c78be4e62c57f1eb121bef2789f5fe3923 false +key_image_to_y b4796a8bc9801ed57e28d59b95c18371e8cf933e297551ec5bd1f47b270c9c7b b4796a8bc9801ed57e28d59b95c18371e8cf933e297551ec5bd1f47b270c9c7b false +key_image_to_y b964be6438b709aed6d79677c7c0cc6446f502b31a1af108409c3d3a80c84203 b964be6438b709aed6d79677c7c0cc6446f502b31a1af108409c3d3a80c84203 false +key_image_to_y ae926a5a753f3d7a7aaf4dd1ca171c45c0bb0aa75280e9b9e088a764b2d0ef55 ae926a5a753f3d7a7aaf4dd1ca171c45c0bb0aa75280e9b9e088a764b2d0ef55 false +key_image_to_y 69463dbc77e0bd1363f46c05a0791bd84a9f34b4c6274654fa84ac74a7273331 69463dbc77e0bd1363f46c05a0791bd84a9f34b4c6274654fa84ac74a7273331 false +key_image_to_y 8ea9c60b287243a611316ff4a70ca667610c06a51570d65c836626fa2a81aa54 8ea9c60b287243a611316ff4a70ca667610c06a51570d65c836626fa2a81aa54 false +key_image_to_y ca55d6a70218adfe3e6e893c39888a01f7b297fdddaf8a48f333ed6bc5731d32 ca55d6a70218adfe3e6e893c39888a01f7b297fdddaf8a48f333ed6bc5731d32 false +key_image_to_y 5880cbd36de5697e73a8310972d13dd4c5a10ec091501abd63bc3dc21a736305 5880cbd36de5697e73a8310972d13dd4c5a10ec091501abd63bc3dc21a736305 false +key_image_to_y 70b620fd6ffec9720309dcc4d90fe37244ce62276c0fd910f782d72976909306 70b620fd6ffec9720309dcc4d90fe37244ce62276c0fd910f782d72976909306 false +key_image_to_y ddb3a9e3c57fbc7bf030b0155afe41563b0b89fdc50aed9a203319b65a3f960b ddb3a9e3c57fbc7bf030b0155afe41563b0b89fdc50aed9a203319b65a3f960b false +key_image_to_y 4eb11c28a0eb7ce30dcca67ae05c79181a8603ec1e55d83000cb72c3842da100 4eb11c28a0eb7ce30dcca67ae05c79181a8603ec1e55d83000cb72c3842da100 false +key_image_to_y 32df4f304bb1bed43f76d2315d139fe66aa79ede363ab5961e828f477dfbf772 32df4f304bb1bed43f76d2315d139fe66aa79ede363ab5961e828f477dfbf772 false +key_image_to_y b46bba83dbe888af05289c99911d251ab71621222311ea18cf5ca6cdcd74ed2b b46bba83dbe888af05289c99911d251ab71621222311ea18cf5ca6cdcd74ed2b false +key_image_to_y 582535660d8d8a8b11158b2cc72baab9824ca63b7b9ed99d19247d140cd4fb23 582535660d8d8a8b11158b2cc72baab9824ca63b7b9ed99d19247d140cd4fb23 false +key_image_to_y 4cfb08d96e162470e92651550ec06d6c693d428b8d85d43c53b67ff6dbc53030 4cfb08d96e162470e92651550ec06d6c693d428b8d85d43c53b67ff6dbc53030 false +key_image_to_y 1cd08eb20c60ba42f3eaeccce39ea185d588f61e3e51a38ff5cc48aa4458037e 1cd08eb20c60ba42f3eaeccce39ea185d588f61e3e51a38ff5cc48aa4458037e false +key_image_to_y 3c7e2a6795db3c1b70f8786e2d2d20116dd6478acfc374362b6ea106a1af2d42 3c7e2a6795db3c1b70f8786e2d2d20116dd6478acfc374362b6ea106a1af2d42 false +key_image_to_y e01f40decbc883a5daa126755e972e6427b052be9f3edec00d49041119a44f63 e01f40decbc883a5daa126755e972e6427b052be9f3edec00d49041119a44f63 false +key_image_to_y 048fbfa1d56a6d7c239321eb85aebf6839fc4ac830329aebce827f5140d7cb0c 048fbfa1d56a6d7c239321eb85aebf6839fc4ac830329aebce827f5140d7cb0c false +key_image_to_y 31630841a494d5a34d89a4709d8b36eee4ab3b6cf4914ff61a6b0eace1cdbd43 31630841a494d5a34d89a4709d8b36eee4ab3b6cf4914ff61a6b0eace1cdbd43 false +key_image_to_y 1350e9f4231fa7a7878172a08a63618dc710ca4bfa9a93a32dd0976ecbf67059 1350e9f4231fa7a7878172a08a63618dc710ca4bfa9a93a32dd0976ecbf67059 false +key_image_to_y 325eef1bb5a4d96a5ec074cc29fd4078a1aadc3f7435985d42c96cbc9526a002 325eef1bb5a4d96a5ec074cc29fd4078a1aadc3f7435985d42c96cbc9526a002 false +key_image_to_y 86e74b20ec60d8162c026206a61dfb8da300a0b563cb69c3f456c8a21f135d4f 86e74b20ec60d8162c026206a61dfb8da300a0b563cb69c3f456c8a21f135d4f false +key_image_to_y f3f3100cd90ce128b4c8d6c339d77249106c0a656fe651fe7a285a607e47a966 f3f3100cd90ce128b4c8d6c339d77249106c0a656fe651fe7a285a607e47a966 false +key_image_to_y 625fc0d4f4728f7a659c127026bbbc7c0c26a68b351a1656c8875e2d5ff1473f 625fc0d4f4728f7a659c127026bbbc7c0c26a68b351a1656c8875e2d5ff1473f false +key_image_to_y e10e93b9478bb561f4b08fc1d9d0b63f2f4b082ba49e5d0736bc5dac7551896a e10e93b9478bb561f4b08fc1d9d0b63f2f4b082ba49e5d0736bc5dac7551896a false +key_image_to_y 0a45ed914810e7a1c4e94d5d3466702790fe2882458092fcec8fd2ece7544e12 0a45ed914810e7a1c4e94d5d3466702790fe2882458092fcec8fd2ece7544e12 false +key_image_to_y a83bb44ffaa27eb6c1f1bce66018e3fa96587d30f37a5338905616502b78da47 a83bb44ffaa27eb6c1f1bce66018e3fa96587d30f37a5338905616502b78da47 false +key_image_to_y 9d96f7f4617236da071a986deb7e3afdd3b96989c747384bc6d1b863ff72620c 9d96f7f4617236da071a986deb7e3afdd3b96989c747384bc6d1b863ff72620c false +key_image_to_y e4cc5a1a31184e706c2a8aaf510f16cd2f5d623037aae52a27b010319522bf10 e4cc5a1a31184e706c2a8aaf510f16cd2f5d623037aae52a27b010319522bf10 false +key_image_to_y 7baf7f85021d837208a600256a78684c5c2542f0cf085df7d75d4e5c148a4358 7baf7f85021d837208a600256a78684c5c2542f0cf085df7d75d4e5c148a4358 false +key_image_to_y b55f9018f08daea272b8726fa9a3fd5c0a97683654a694cf361bc534a0d74a54 b55f9018f08daea272b8726fa9a3fd5c0a97683654a694cf361bc534a0d74a54 false +key_image_to_y 218e58d7355d1534b5b633abc5caa16386ca3109519dd04086db30c6c7d1af06 218e58d7355d1534b5b633abc5caa16386ca3109519dd04086db30c6c7d1af06 false +key_image_to_y 0f274b89128658a5d1736638fef3ca7ce20a4d89ff23d7c79d9add00fa263b3b 0f274b89128658a5d1736638fef3ca7ce20a4d89ff23d7c79d9add00fa263b3b false +key_image_to_y dcfddcb87039b6a615df29491fd39b8decf62bc8b06cb85f170eeec1505c6001 dcfddcb87039b6a615df29491fd39b8decf62bc8b06cb85f170eeec1505c6001 false +key_image_to_y b75a2d1c3f7f262830619e5ed38cbb9656737d273180c4e7f4fac875d434fd18 b75a2d1c3f7f262830619e5ed38cbb9656737d273180c4e7f4fac875d434fd18 false +key_image_to_y fe5b354a174c97dc1742a71191f602395867efb961c817764171f39f50347264 fe5b354a174c97dc1742a71191f602395867efb961c817764171f39f50347264 false +key_image_to_y 16c2908507300ffbb4b346e19c15cd3b9f04459ee939a144866e1f02d19ef97f 16c2908507300ffbb4b346e19c15cd3b9f04459ee939a144866e1f02d19ef97f false +key_image_to_y 14c04cb0415df29bb918fd9e4b6a878ead2669668ba72d7d78c7c74068d50377 14c04cb0415df29bb918fd9e4b6a878ead2669668ba72d7d78c7c74068d50377 false +key_image_to_y 35e1f520f6cae77dc98e0bbd09129d86e82fb5ad23f44f676f5b56731e575a13 35e1f520f6cae77dc98e0bbd09129d86e82fb5ad23f44f676f5b56731e575a13 false +key_image_to_y 35747e2cba77c3103c205919180eeb55d614af69107d586e0b4946651b815a5b 35747e2cba77c3103c205919180eeb55d614af69107d586e0b4946651b815a5b false +key_image_to_y 977b0f71a082f9a73ce6343ba0f12e257477633b0ddda6ec79fa4efa2a1d2e29 977b0f71a082f9a73ce6343ba0f12e257477633b0ddda6ec79fa4efa2a1d2e29 false +key_image_to_y 2b9c90881584045c6b114d6c86be8901ce38162a2168ba1a485203d89a5c6c2f 2b9c90881584045c6b114d6c86be8901ce38162a2168ba1a485203d89a5c6c2f false +key_image_to_y 61debcd08a03cffec9745c95371f749d749b1f24dafd8f1b3016105f77408b0c 61debcd08a03cffec9745c95371f749d749b1f24dafd8f1b3016105f77408b0c false +key_image_to_y 8bc860d86aad2dd0be6af91f7e5185d56fa66d9e7ffb1339b0c5991663bcaa54 8bc860d86aad2dd0be6af91f7e5185d56fa66d9e7ffb1339b0c5991663bcaa54 false +key_image_to_y 25bf7f10ebd260a5dbae567dfce30525dfbc9af1b2521e5baeb7fd8cbc2ca93d 25bf7f10ebd260a5dbae567dfce30525dfbc9af1b2521e5baeb7fd8cbc2ca93d false +key_image_to_y b949beebe0ffe7b87bc1c9e4bce431d6d75d706b008797043607bf407a301e5e b949beebe0ffe7b87bc1c9e4bce431d6d75d706b008797043607bf407a301e5e false +key_image_to_y a896a41cd7622e38d0b43ee402f48886b3daa8f747dd96f8661243ee513d98de a896a41cd7622e38d0b43ee402f48886b3daa8f747dd96f8661243ee513d985e true +key_image_to_y 70491afad4c4739263dac2a94cadaffe95115553ed2252f784704867a05488fd 70491afad4c4739263dac2a94cadaffe95115553ed2252f784704867a054887d true +key_image_to_y b131e2745a54155a1d490e73e95294466740fbb5276f727a63e5f5eb182e13a1 b131e2745a54155a1d490e73e95294466740fbb5276f727a63e5f5eb182e1321 true +key_image_to_y 5cb915be5aec2fb6986143f6f3df4563e9051ea96591f20f20f49f97a01055ea 5cb915be5aec2fb6986143f6f3df4563e9051ea96591f20f20f49f97a010556a true +key_image_to_y 3810186d871bdfe0e7269d1d26682a1761c91d6d934b370ea17b14dc7044c6c8 3810186d871bdfe0e7269d1d26682a1761c91d6d934b370ea17b14dc7044c648 true +key_image_to_y 00db468b9479beed003e34d38d439267d6e6acffde1e606f465dbce0fc0666d0 00db468b9479beed003e34d38d439267d6e6acffde1e606f465dbce0fc066650 true +key_image_to_y a6c29a734b7aa4d93d29657be4dacb4a0f3595e530d8eff1edb08f1eace15181 a6c29a734b7aa4d93d29657be4dacb4a0f3595e530d8eff1edb08f1eace15101 true +key_image_to_y 48d9a438ef0265d8936f58a14c5b786a47481098b6db206b61e8305cf0b780cc 48d9a438ef0265d8936f58a14c5b786a47481098b6db206b61e8305cf0b7804c true +key_image_to_y 93204f8620430334b844704de904ad1bc6c8622769360c679b77df1673264e90 93204f8620430334b844704de904ad1bc6c8622769360c679b77df1673264e10 true +key_image_to_y dfb14b5961117227711861b81778b283aeded0cdd6f9717a95701042f7b2ddcf dfb14b5961117227711861b81778b283aeded0cdd6f9717a95701042f7b2dd4f true +key_image_to_y 7c06d5309ceecd9f4e2dd11a28dfdc035096780f36d9a4c61d63ff54075e1ad3 7c06d5309ceecd9f4e2dd11a28dfdc035096780f36d9a4c61d63ff54075e1a53 true +key_image_to_y 70d3176a9b2ca21e60675ce4b2a097d8e9d4794a0e838cc598b21a6a7aef06cd 70d3176a9b2ca21e60675ce4b2a097d8e9d4794a0e838cc598b21a6a7aef064d true +key_image_to_y 8266564fa110f488aefb36048d46959708bdd839cb8b7ba58190c9c8ffd27cab 8266564fa110f488aefb36048d46959708bdd839cb8b7ba58190c9c8ffd27c2b true +key_image_to_y 06d94e0765b6b11ab9c1baf3e5ff1cddbaf7f2c0ac17cf0da55cde4b06bab0b2 06d94e0765b6b11ab9c1baf3e5ff1cddbaf7f2c0ac17cf0da55cde4b06bab032 true +key_image_to_y f7d0ac71751e9bea9e8bd8da43ddab22bcb3edacf019a99443b9068cd4474185 f7d0ac71751e9bea9e8bd8da43ddab22bcb3edacf019a99443b9068cd4474105 true +key_image_to_y ee029aa2269ce142e2e9d6e0502cf7ee23dedc847436c0eb935a1ded8701f382 ee029aa2269ce142e2e9d6e0502cf7ee23dedc847436c0eb935a1ded8701f302 true +key_image_to_y 370640fae7b49a03da1c99538d6afe52bf09afd160e35c9e971b41c2aba8e3cc 370640fae7b49a03da1c99538d6afe52bf09afd160e35c9e971b41c2aba8e34c true +key_image_to_y 02d781e9c56a25a924fd49f5f80eee90eb55f0d7269d7157f89403dfc58ad386 02d781e9c56a25a924fd49f5f80eee90eb55f0d7269d7157f89403dfc58ad306 true +key_image_to_y 7cb9d19798bc4bf5402326b2e9aa371b2b7a504f09e4cfc123d23ee0f05098fc 7cb9d19798bc4bf5402326b2e9aa371b2b7a504f09e4cfc123d23ee0f050987c true +key_image_to_y f01e93a0b1fb01890b162f7002a4425c35421e8a46cec1d9c84d0fa9263990e5 f01e93a0b1fb01890b162f7002a4425c35421e8a46cec1d9c84d0fa926399065 true +key_image_to_y 8018dc18a0bf007fde0bb5293ef247b9446f4f0b9c20d18194a216fc500bf4db 8018dc18a0bf007fde0bb5293ef247b9446f4f0b9c20d18194a216fc500bf45b true +key_image_to_y f8e3600327a95a90b8dea0659ac00110b45c410b97dad6660348892891ffb690 f8e3600327a95a90b8dea0659ac00110b45c410b97dad6660348892891ffb610 true +key_image_to_y 11ed169c028c854bd41f6d7ea583ec50c1568bfc8c784e4d04d30533e58496f3 11ed169c028c854bd41f6d7ea583ec50c1568bfc8c784e4d04d30533e5849673 true +key_image_to_y ddcac461f3c9d265cce797039bbfff3f3156d07c4e0231b096292434df5bcabb ddcac461f3c9d265cce797039bbfff3f3156d07c4e0231b096292434df5bca3b true +key_image_to_y b861f2dba6252d878029f417ac02555f9502c66d889de49683262d1b020f5adb b861f2dba6252d878029f417ac02555f9502c66d889de49683262d1b020f5a5b true +key_image_to_y cda6bd18b5dbe8705d7a7be1d5f4b96767bf03d901931b643ee138ba66c64dd3 cda6bd18b5dbe8705d7a7be1d5f4b96767bf03d901931b643ee138ba66c64d53 true +key_image_to_y b4aa5fefc0e81a37f3ac19482a32fc49141c79c013e8d9058a9d1c6ca347a79b b4aa5fefc0e81a37f3ac19482a32fc49141c79c013e8d9058a9d1c6ca347a71b true +key_image_to_y cc3b15a7feec558a73e12028b11cede86ff9f6956b014722872037b9ee652ebf cc3b15a7feec558a73e12028b11cede86ff9f6956b014722872037b9ee652e3f true +key_image_to_y 1e5b547e0e6da07390a74da76995118abc565c4e7f4acb24d90e5f85721d33d5 1e5b547e0e6da07390a74da76995118abc565c4e7f4acb24d90e5f85721d3355 true +key_image_to_y f3003c72bf5f87b97f34dc255dda2cb39d3e8e4045168631de8d2fecf5e76296 f3003c72bf5f87b97f34dc255dda2cb39d3e8e4045168631de8d2fecf5e76216 true +key_image_to_y 241452c33318416debb476707bcb7e52c9f3480768ac2c9bf394ce36df7923de 241452c33318416debb476707bcb7e52c9f3480768ac2c9bf394ce36df79235e true +key_image_to_y 8a4f7a0e19ad5af9315b0691f35506fc78e9e8fe7f5572e36d19d582526abdff 8a4f7a0e19ad5af9315b0691f35506fc78e9e8fe7f5572e36d19d582526abd7f true +key_image_to_y fdae81e5a3719b1ac05f27cd7bf83e01bd5026d91e99b6f8bc1672bc2711fb91 fdae81e5a3719b1ac05f27cd7bf83e01bd5026d91e99b6f8bc1672bc2711fb11 true +key_image_to_y 2649d2bc7f3e0d6b87e5d519d5aad9f8e22ff5e8f02466efc33be443e67d76f0 2649d2bc7f3e0d6b87e5d519d5aad9f8e22ff5e8f02466efc33be443e67d7670 true +key_image_to_y fc161a566fc014ed9e15e4cec7b2eb1c19a3220e518106982843861c9aac69e3 fc161a566fc014ed9e15e4cec7b2eb1c19a3220e518106982843861c9aac6963 true +key_image_to_y d246c119405dfd6de0ed83a04ca542caf73785b55671572a60ea5f665ec91296 d246c119405dfd6de0ed83a04ca542caf73785b55671572a60ea5f665ec91216 true +key_image_to_y f28722915db5acda96b5281f2a36625e9994d5b8eca68f3e250dd4c4e815b5c1 f28722915db5acda96b5281f2a36625e9994d5b8eca68f3e250dd4c4e815b541 true +key_image_to_y eb73cb1356f4114d01983d552301bb8f4927b41256d9c90d52024476d3d2e2cc eb73cb1356f4114d01983d552301bb8f4927b41256d9c90d52024476d3d2e24c true +key_image_to_y 5d3ea3b7c892e585008a220c51cbe42ae7e0c7e9e525a42ec492d3a7602a1cd5 5d3ea3b7c892e585008a220c51cbe42ae7e0c7e9e525a42ec492d3a7602a1c55 true +key_image_to_y 36a322f166933f550102d14e9c9daaeaa34bd06e9e20dc605a101a2d0ae69fbb 36a322f166933f550102d14e9c9daaeaa34bd06e9e20dc605a101a2d0ae69f3b true +key_image_to_y 2b31db2834f0e35ca15ebe00e73a583581476253f94b7f3b270546e58193b4a0 2b31db2834f0e35ca15ebe00e73a583581476253f94b7f3b270546e58193b420 true +key_image_to_y 3725e83d6e945fb0f8feb442cd12487f9e351d286ee89fa4dd68fb86b847bcb1 3725e83d6e945fb0f8feb442cd12487f9e351d286ee89fa4dd68fb86b847bc31 true +key_image_to_y 4cdcc458412ed752e804a0d4bc31bc5b4f47ff49a8771b0dc47d0388c10805f7 4cdcc458412ed752e804a0d4bc31bc5b4f47ff49a8771b0dc47d0388c1080577 true +key_image_to_y bb50dc83ae41cd9f1508073186087950c95a482bd780eccd70cd63388c7649f1 bb50dc83ae41cd9f1508073186087950c95a482bd780eccd70cd63388c764971 true +key_image_from_y b14939b9254f8df6d3e5c7b33a7dc0c6aa1ab8fe1293cb4795c9d92cf81d634f false b14939b9254f8df6d3e5c7b33a7dc0c6aa1ab8fe1293cb4795c9d92cf81d634f +key_image_from_y 6669a8eab861a2f4d4fdfd9fb8a9cb5fdd3a15e0facb8ff77c24727635af634e false 6669a8eab861a2f4d4fdfd9fb8a9cb5fdd3a15e0facb8ff77c24727635af634e +key_image_from_y c3134aa2143389e2d3b1a00fe661e2b82490956bbdf65ac2396d606f1a58b134 false c3134aa2143389e2d3b1a00fe661e2b82490956bbdf65ac2396d606f1a58b134 +key_image_from_y 8c27bc121f3fe85abb4c8084c5744960231d7b1b5861c30aa82749bf54018b53 false 8c27bc121f3fe85abb4c8084c5744960231d7b1b5861c30aa82749bf54018b53 +key_image_from_y 9e7be117a77921058748cba4fcfa043c026a884d969fd7b3a49ef99fdda3a772 false 9e7be117a77921058748cba4fcfa043c026a884d969fd7b3a49ef99fdda3a772 +key_image_from_y 19c1820e3677f5b6c72db2c4ae804e6b93cbe802bf5de884d7d695253079da02 false 19c1820e3677f5b6c72db2c4ae804e6b93cbe802bf5de884d7d695253079da02 +key_image_from_y f37184d49ef88da56a1f37b3a4424c8c40a39b888c0c65817ce0cbfaeba17943 false f37184d49ef88da56a1f37b3a4424c8c40a39b888c0c65817ce0cbfaeba17943 +key_image_from_y a558636042e148d97f699bb55dd2c2fb6c6d64f54aa5e1c06f6d2e6e054c5261 false a558636042e148d97f699bb55dd2c2fb6c6d64f54aa5e1c06f6d2e6e054c5261 +key_image_from_y f2c6b50c496c5b4e0fa715d24e8a22727633d05b91b9d08232181741bcb36a3a false f2c6b50c496c5b4e0fa715d24e8a22727633d05b91b9d08232181741bcb36a3a +key_image_from_y 0543ccb07c6b9d2a2602107d0aa5ed6aa1398ec6543d9b9d7822bbf339ddbb09 false 0543ccb07c6b9d2a2602107d0aa5ed6aa1398ec6543d9b9d7822bbf339ddbb09 +key_image_from_y bf2acac9328c8538beec88fffee1ca49d9b28c70f9acc23f59dfbc8d21754654 false bf2acac9328c8538beec88fffee1ca49d9b28c70f9acc23f59dfbc8d21754654 +key_image_from_y 81e6611d33146dd5c3e402b4cb660b628175e074c1ccff093258a6f355655045 false 81e6611d33146dd5c3e402b4cb660b628175e074c1ccff093258a6f355655045 +key_image_from_y 4382e51caba64548432e6f0ddf3df5bb29eba0d55f46f806f8281b6b324ccf66 false 4382e51caba64548432e6f0ddf3df5bb29eba0d55f46f806f8281b6b324ccf66 +key_image_from_y 7d7185e987cbb9ee1608c7eef268764080906c9a7d5e91dfd1f6ea6538405f6e false 7d7185e987cbb9ee1608c7eef268764080906c9a7d5e91dfd1f6ea6538405f6e +key_image_from_y 8558c5ad0304b8b4fbf0ab12ed4f89295e7729a3ec4b05fffacdb9fbcc53f859 false 8558c5ad0304b8b4fbf0ab12ed4f89295e7729a3ec4b05fffacdb9fbcc53f859 +key_image_from_y 4c9ec93dbaf801eae69ea60ea6c5b970b06c9bd542ad3aba60d6d982abfcd653 false 4c9ec93dbaf801eae69ea60ea6c5b970b06c9bd542ad3aba60d6d982abfcd653 +key_image_from_y 361268ad395bc3162699092b95d138f023c41dd0e832d85c3f190440a2d0a87c false 361268ad395bc3162699092b95d138f023c41dd0e832d85c3f190440a2d0a87c +key_image_from_y f1ab05c1794fe907bbe657af5e046e2682312408ab267e24f6586f7fd52c306d false f1ab05c1794fe907bbe657af5e046e2682312408ab267e24f6586f7fd52c306d +key_image_from_y 9870dec355f5afcd193f7bbd803ad3038540cca12aa65ee0fc4108fe72657f1a false 9870dec355f5afcd193f7bbd803ad3038540cca12aa65ee0fc4108fe72657f1a +key_image_from_y 71c688eaef0dee7d48d803fa38fd7d20690e666594a4ce5ea505832e2e8c4666 false 71c688eaef0dee7d48d803fa38fd7d20690e666594a4ce5ea505832e2e8c4666 +key_image_from_y af57f563d8446a522666222c830f33f89ce0124280be5159388900a657ea9d12 false af57f563d8446a522666222c830f33f89ce0124280be5159388900a657ea9d12 +key_image_from_y 76d14b96961619765fc5b6f4e2e30166fa4c3e275c227bd275b5f4e6c0a91255 false 76d14b96961619765fc5b6f4e2e30166fa4c3e275c227bd275b5f4e6c0a91255 +key_image_from_y 59d7e8425798b6c6b2f7fa7ff6fe344eb5cf84511899dd39bd56e71beea5f960 false 59d7e8425798b6c6b2f7fa7ff6fe344eb5cf84511899dd39bd56e71beea5f960 +key_image_from_y d0db255ff4a1b619dc5e0fc9773659a19c75bd7a868e3fd45e83c92aa18c6e04 false d0db255ff4a1b619dc5e0fc9773659a19c75bd7a868e3fd45e83c92aa18c6e04 +key_image_from_y c03bf07443db65ce3b7bcd58c17b6266d81b8a6624deb081c65c14650b51d827 false c03bf07443db65ce3b7bcd58c17b6266d81b8a6624deb081c65c14650b51d827 +key_image_from_y 87102828ddeb3a31a266de1937b966658710264ad3c520bcc93abb07bc459849 false 87102828ddeb3a31a266de1937b966658710264ad3c520bcc93abb07bc459849 +key_image_from_y fc749c9fcc3300819ad312af6d235813975e6ce70bb904bad49930ce34b47201 false fc749c9fcc3300819ad312af6d235813975e6ce70bb904bad49930ce34b47201 +key_image_from_y e53657e4a0bbf098112777134885f65ea7abfc0639d28515bd00bd52a418b93e false e53657e4a0bbf098112777134885f65ea7abfc0639d28515bd00bd52a418b93e +key_image_from_y 55c7fe59e8c41d0d0f77f2d993f10e638cf6d4678984a4b9422202105ad51349 false 55c7fe59e8c41d0d0f77f2d993f10e638cf6d4678984a4b9422202105ad51349 +key_image_from_y e4246f6bd27e9323e08107ac9fa911f3f6c27f64d0f03b2a265789f2f8718401 false e4246f6bd27e9323e08107ac9fa911f3f6c27f64d0f03b2a265789f2f8718401 +key_image_from_y f7fce49a5ff25d00f655942508e1a31e210a66fe03f22bd6c799575ea6b88b5b false f7fce49a5ff25d00f655942508e1a31e210a66fe03f22bd6c799575ea6b88b5b +key_image_from_y 20325307f450143797fc7b7969b3ad093fd6318d97c6dfbe09a04a50abc9ba42 false 20325307f450143797fc7b7969b3ad093fd6318d97c6dfbe09a04a50abc9ba42 +key_image_from_y 4a2e87eaade16f12c728bd0fee887488db0d9e03f940de2e1acd4d77123ede59 false 4a2e87eaade16f12c728bd0fee887488db0d9e03f940de2e1acd4d77123ede59 +key_image_from_y 9a4227ccd723624c7dd4d536a8476463bd767ebc55e1e4f27bbe84139245151b false 9a4227ccd723624c7dd4d536a8476463bd767ebc55e1e4f27bbe84139245151b +key_image_from_y c64c6c2505ccfbe929fe6e93a8376c9377a05cb9df5547a203d3e9247e5dfa75 false c64c6c2505ccfbe929fe6e93a8376c9377a05cb9df5547a203d3e9247e5dfa75 +key_image_from_y f32193f4a45a9ee531f4e54b6a8cbae179048cd3e93d24cc21229ba67d3c886f false f32193f4a45a9ee531f4e54b6a8cbae179048cd3e93d24cc21229ba67d3c886f +key_image_from_y e480ed1ecdbf1e10dd7e347862e153b35f457bb2dac5bce766cb831265a0122a false e480ed1ecdbf1e10dd7e347862e153b35f457bb2dac5bce766cb831265a0122a +key_image_from_y e3283fa4f9eae1a612ac40a3a9f7ceaf472d4ad0fc7dba0c2bc6387f4f170753 false e3283fa4f9eae1a612ac40a3a9f7ceaf472d4ad0fc7dba0c2bc6387f4f170753 +key_image_from_y 208220ab9fb01a76f92df80d367c9b8187bd647e2df67143d315107c24c19870 false 208220ab9fb01a76f92df80d367c9b8187bd647e2df67143d315107c24c19870 +key_image_from_y 4ec772fe0cd753a573838454fa5d3764c84466bf1d8c7b051b0499c56c8ccf58 false 4ec772fe0cd753a573838454fa5d3764c84466bf1d8c7b051b0499c56c8ccf58 +key_image_from_y 350f46cace1e8cf8e82352a72070d3131d9fd7f7b71bec1781a93ddfb82a7601 false 350f46cace1e8cf8e82352a72070d3131d9fd7f7b71bec1781a93ddfb82a7601 +key_image_from_y 91624ed82640d2f131b996db59c64564be1342725a7de6ced3776d19f15b4367 false 91624ed82640d2f131b996db59c64564be1342725a7de6ced3776d19f15b4367 +key_image_from_y 700abb5038344ed6561a2e25f5296f785cdf6f359b360cb3be69eaf535df6671 false 700abb5038344ed6561a2e25f5296f785cdf6f359b360cb3be69eaf535df6671 +key_image_from_y 4d93904090a5c37cadb4c8b911955bd6374ab302f142d918c722eb8252bace0c true 4d93904090a5c37cadb4c8b911955bd6374ab302f142d918c722eb8252bace8c +key_image_from_y d375ac0223b138a9d0a0d3adf3a7a62c0a7207bc87a30bed0e582912aa4fb656 true d375ac0223b138a9d0a0d3adf3a7a62c0a7207bc87a30bed0e582912aa4fb6d6 +key_image_from_y ece17c47a92da9b0ef4218c19fa799ec04673c8843f65f20a14d492ced296542 true ece17c47a92da9b0ef4218c19fa799ec04673c8843f65f20a14d492ced2965c2 +key_image_from_y c721614309a89ac2ef41570662ce244da418476cfbd87331cd8e44ce6dd24973 true c721614309a89ac2ef41570662ce244da418476cfbd87331cd8e44ce6dd249f3 +key_image_from_y 00b5b85871c39384b359b2d2e89773c619ea546512d9e78ef43b6d8ad4f55408 true 00b5b85871c39384b359b2d2e89773c619ea546512d9e78ef43b6d8ad4f55488 +key_image_from_y fa32508215245c07dc980bbddf4483e597ed8ceb2747f559bcb4950e7706a43b true fa32508215245c07dc980bbddf4483e597ed8ceb2747f559bcb4950e7706a4bb +key_image_from_y 5c02d3bc62f0fcd55c264e8919f4a7cd84f1646a5f467df8e0cc70a0a2a0c717 true 5c02d3bc62f0fcd55c264e8919f4a7cd84f1646a5f467df8e0cc70a0a2a0c797 +key_image_from_y b96033b13a7007d716200bc739001fcf9a062dbdc4c2583270cd1cf8fda38f5b true b96033b13a7007d716200bc739001fcf9a062dbdc4c2583270cd1cf8fda38fdb +key_image_from_y 52c650e2e938e87f72f40bfa534f454c5b6339a3fbfd3059afb939c2d9ab683a true 52c650e2e938e87f72f40bfa534f454c5b6339a3fbfd3059afb939c2d9ab68ba +key_image_from_y 5afee29bbf0ffbf1feec56d43f624f429565fdea27d9544d6c7dcb9d2d43d11f true 5afee29bbf0ffbf1feec56d43f624f429565fdea27d9544d6c7dcb9d2d43d19f +key_image_from_y a4c5b1932e4dba9666641782a4f95f8bb5a617633a17fb4bc10cfccde634276b true a4c5b1932e4dba9666641782a4f95f8bb5a617633a17fb4bc10cfccde63427eb +key_image_from_y 7a63fafdcf359db81604b14bbe51e15adf8d28ba9394d306aa665a258aef2609 true 7a63fafdcf359db81604b14bbe51e15adf8d28ba9394d306aa665a258aef2689 +key_image_from_y 696183751af706b468e221b207ba4aa5a3f97902afa4ab825bf235e85e13dc16 true 696183751af706b468e221b207ba4aa5a3f97902afa4ab825bf235e85e13dc96 +key_image_from_y 28b328e40365cd780fb0637d3870dcf755976ec5c088e97d8a1e8a04db54bd45 true 28b328e40365cd780fb0637d3870dcf755976ec5c088e97d8a1e8a04db54bdc5 +key_image_from_y c4f938652ade2f8996addca457c82876205b207ea470c4231e3a7f5ca3472d4d true c4f938652ade2f8996addca457c82876205b207ea470c4231e3a7f5ca3472dcd +key_image_from_y b993e32601093bf0e63c708501c7f91afe9fa4298d287f3f55bb493569f6b26b true b993e32601093bf0e63c708501c7f91afe9fa4298d287f3f55bb493569f6b2eb +key_image_from_y 6246cfaa394da87a45edf395472ad3594d8b3b6f39550078cfbf39066aeea91b true 6246cfaa394da87a45edf395472ad3594d8b3b6f39550078cfbf39066aeea99b +key_image_from_y 5f0590a3b37df89f27caee54afc6101a3cf0b896a0f1997098bace1bf3d9b954 true 5f0590a3b37df89f27caee54afc6101a3cf0b896a0f1997098bace1bf3d9b9d4 +key_image_from_y e3955bd20dc37d5ae620ee5bffa1b1cfdc05a062826df39197e6d191f23c031b true e3955bd20dc37d5ae620ee5bffa1b1cfdc05a062826df39197e6d191f23c039b +key_image_from_y 53aa7307b2ef3c5d5f51e73b90891b1a597d5ddfcbb41bcd79a0f199f7b20d54 true 53aa7307b2ef3c5d5f51e73b90891b1a597d5ddfcbb41bcd79a0f199f7b20dd4 +key_image_from_y 3474abfa456935af7ca56f5bdf3751ff8437e30de6b5f830329ec2cdd8aa1846 true 3474abfa456935af7ca56f5bdf3751ff8437e30de6b5f830329ec2cdd8aa18c6 +key_image_from_y 2533d58ebfa13c3175be9f05235c1730c93033a35fa002577e44c6675b817e15 true 2533d58ebfa13c3175be9f05235c1730c93033a35fa002577e44c6675b817e95 +key_image_from_y ae848420273035bd516728bd9c2f9b421736a46c3806a77fa64acd680357d733 true ae848420273035bd516728bd9c2f9b421736a46c3806a77fa64acd680357d7b3 +key_image_from_y af96b48d7a704a507a9b0eee52b19edf1ddaa00ed84ff1f04202113dbb79634d true af96b48d7a704a507a9b0eee52b19edf1ddaa00ed84ff1f04202113dbb7963cd +key_image_from_y 1305c030bf02efd242f7d826d53fefdba57546228f911d2a6b2e32bd02952577 true 1305c030bf02efd242f7d826d53fefdba57546228f911d2a6b2e32bd029525f7 +key_image_from_y 6dd4392fb42c478bfbb1555276a79bdb8558cfa0f207787c6b700f40f464042f true 6dd4392fb42c478bfbb1555276a79bdb8558cfa0f207787c6b700f40f46404af +key_image_from_y a7e51a48f006bcff53fbf9f2a5dbc5c8b2cb5251147fa4bd10e5f9bb00db2f7d true a7e51a48f006bcff53fbf9f2a5dbc5c8b2cb5251147fa4bd10e5f9bb00db2ffd +key_image_from_y dc4713b4709e1bf6df4f72a728328816d25ba9b013e4183f1802cc1bbf6c9149 true dc4713b4709e1bf6df4f72a728328816d25ba9b013e4183f1802cc1bbf6c91c9 +key_image_from_y 393b89cb3a994e60cdec7c004be50262c36b7171c22cc8b9ed93d217b3dd1b20 true 393b89cb3a994e60cdec7c004be50262c36b7171c22cc8b9ed93d217b3dd1ba0 +key_image_from_y dde48284f2512d01fe5e6eb0dc7bed77d9f0cd4a520d7e4f48fd98d8be871a47 true dde48284f2512d01fe5e6eb0dc7bed77d9f0cd4a520d7e4f48fd98d8be871ac7 +key_image_from_y 168c123f48f0e06f8dd2fb656a4418e8c8118f94c1b4fb4dd875ce66f79f0108 true 168c123f48f0e06f8dd2fb656a4418e8c8118f94c1b4fb4dd875ce66f79f0188 +key_image_from_y 638317b0f09425de7b63a3e349e706b0a51ee5872a1bfe5b5c6bbc7bf7dee201 true 638317b0f09425de7b63a3e349e706b0a51ee5872a1bfe5b5c6bbc7bf7dee281 +key_image_from_y 261c14e9b16d50f500e60e9d1f66d33a8466fe8bd8d025418d80602d5caff465 true 261c14e9b16d50f500e60e9d1f66d33a8466fe8bd8d025418d80602d5caff4e5 +key_image_from_y c6aa35885209ab7f49ce3635f1b2c16b70c2bd8c8b0ea9dd22210fc5a8d5c852 true c6aa35885209ab7f49ce3635f1b2c16b70c2bd8c8b0ea9dd22210fc5a8d5c8d2 +key_image_from_y f8222184ed7629b3e994b43fe9d787aa34f33a784a9985deaa1f9dcfb709be73 true f8222184ed7629b3e994b43fe9d787aa34f33a784a9985deaa1f9dcfb709bef3 +key_image_from_y 33a015c73192d8dbc67bd25d28ba2c4cbb4fb8bc92fa5c680d4179d54b7dfe6c true 33a015c73192d8dbc67bd25d28ba2c4cbb4fb8bc92fa5c680d4179d54b7dfeec +key_image_from_y 6b9a54af748eca68552c36464b32344583444a76456cfeab8badf753d2919663 true 6b9a54af748eca68552c36464b32344583444a76456cfeab8badf753d29196e3 +key_image_from_y b8a1892a9174bd24bc5c4560f2116d64ef9985eb39f7c56ae6dcf9112e0d3b40 true b8a1892a9174bd24bc5c4560f2116d64ef9985eb39f7c56ae6dcf9112e0d3bc0 +key_image_from_y 55075478f2d9a2f93c2c8c40e32a2e79b157b16ae619c7f4492e9e5aee450b37 true 55075478f2d9a2f93c2c8c40e32a2e79b157b16ae619c7f4492e9e5aee450bb7 +key_image_from_y bbb54e6c3500b90a73fd9df8273a8146dd182af9350867671f6b3335c340625c true bbb54e6c3500b90a73fd9df8273a8146dd182af9350867671f6b3335c34062dc +key_image_from_y 2f1602dbf3381f51d8d33d56becffec3f5cdef6230032e27a719525b4b38c941 true 2f1602dbf3381f51d8d33d56becffec3f5cdef6230032e27a719525b4b38c9c1 +key_image_from_y af046aaf25bf374dd22baa4fae78c982f800f1b7c2731f97f97e882688856034 true af046aaf25bf374dd22baa4fae78c982f800f1b7c2731f97f97e8826888560b4 +key_image_from_y 6a9640d8f15984358ce8acf46cb39dec56933cf13e335d6a3bd0e01dd64e7b57 true 6a9640d8f15984358ce8acf46cb39dec56933cf13e335d6a3bd0e01dd64e7bd7 +key_image_from_y 49ac34e8e8089177108c9c764feeba8f7bc67bb1715a9f937b400d5726dd2810 true 49ac34e8e8089177108c9c764feeba8f7bc67bb1715a9f937b400d5726dd2890 +key_image_from_y 874e851d37c89dbe2cddd1848d0f4fa991e59455deb73f754cf2615c2f21595a true 874e851d37c89dbe2cddd1848d0f4fa991e59455deb73f754cf2615c2f2159da +key_image_from_y 7b0ee4271536ba40e0e3bfacbd6a9f04f24ba6cb2e79b96070bec36480973113 true 7b0ee4271536ba40e0e3bfacbd6a9f04f24ba6cb2e79b96070bec36480973193 +key_image_from_y bbebfd909dffa248f85390d3860bf5f2123369be07ea3dee5f13e99e25e49359 true bbebfd909dffa248f85390d3860bf5f2123369be07ea3dee5f13e99e25e493d9 +key_image_from_y df5ca9835c856ee38141ff5d10a4985f958e7f986391ecf639263f7319bc6b36 true df5ca9835c856ee38141ff5d10a4985f958e7f986391ecf639263f7319bc6bb6 +key_image_from_y 7a4fbe8236800caeaea9d726a15b21ba515e7414ed74790717d464b2d8c70e39 true 7a4fbe8236800caeaea9d726a15b21ba515e7414ed74790717d464b2d8c70eb9 +key_image_from_y a338ae983e77870095e9f7cc6f9c13d8603796741553483071a4971c8de4bc7d true a338ae983e77870095e9f7cc6f9c13d8603796741553483071a4971c8de4bcfd +key_image_from_y 7bda514ff46aeae2c62b19fe1be1cb11ccd7405cbf089088863d12d97e718324 true 7bda514ff46aeae2c62b19fe1be1cb11ccd7405cbf089088863d12d97e7183a4 +key_image_from_y c3ce63428cfda4dc85a1dae4c3c6b051087a82f49776a546cff6b47484ff3961 true c3ce63428cfda4dc85a1dae4c3c6b051087a82f49776a546cff6b47484ff39e1 +key_image_from_y 3dfc1d9d714860c14540568d7da56e31b1a290db1023ad5bd10862ade6d4ae74 true 3dfc1d9d714860c14540568d7da56e31b1a290db1023ad5bd10862ade6d4aef4 +key_image_from_y 3cecd238630915a9f142a9c561461a7f321824ae726e03290fe70e2cbb17e955 true 3cecd238630915a9f142a9c561461a7f321824ae726e03290fe70e2cbb17e9d5 +key_image_from_y 683c108ea462e200e6e774f6b6ec75bd6a1041f4c0c3ac392f79c2ae66bc1f4d true 683c108ea462e200e6e774f6b6ec75bd6a1041f4c0c3ac392f79c2ae66bc1fcd +key_image_from_y 6428539f6949cb005e1dfa470718c6f2eddee6ad4579e876c909b92a6561c178 true 6428539f6949cb005e1dfa470718c6f2eddee6ad4579e876c909b92a6561c1f8 +key_image_from_y 6658cab76b1481b2023873a57c06d69097d9bfa96c05a995e84731cfe65a384b true 6658cab76b1481b2023873a57c06d69097d9bfa96c05a995e84731cfe65a38cb diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index a1158fcecd..4f259643a2 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -36,6 +36,7 @@ set(performance_tests_headers construct_tx.h derive_public_key.h derive_secret_key.h + fe_batch_invert.h ge_frombytes_vartime.h generate_key_derivation.h generate_key_image.h diff --git a/tests/performance_tests/fe_batch_invert.h b/tests/performance_tests/fe_batch_invert.h new file mode 100644 index 0000000000..2aed96f9a0 --- /dev/null +++ b/tests/performance_tests/fe_batch_invert.h @@ -0,0 +1,79 @@ +// Copyright (c) 2024, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers + +#pragma once + +#include "crypto/crypto.h" + +template +class test_fe_batch_invert +{ +public: + static const size_t loop_count = 50; + static const size_t n_elems = 1000; + + bool init() + { + m_fes = (fe *) malloc(n_elems * sizeof(fe)); + + for (std::size_t i = 0; i < n_elems; ++i) + { + crypto::secret_key r; + crypto::random32_unbiased((unsigned char*)r.data); + + ge_p3 point; + ge_scalarmult_base(&point, (unsigned char*)r.data); + + memcpy(m_fes[i], &point.Y, sizeof(fe)); + } + + return true; + } + + bool test() + { + fe *inv_fes = (fe *) malloc(n_elems * sizeof(fe)); + + if (batched) + fe_batch_invert(inv_fes, m_fes, n_elems); + else + { + for (std::size_t i = 0; i < n_elems; ++i) + fe_invert(inv_fes[i], m_fes[i]); + } + + free(inv_fes); + + return true; + } + +private: + fe *m_fes; +}; diff --git a/tests/performance_tests/main.cpp b/tests/performance_tests/main.cpp index 929eec590d..02770d65a8 100644 --- a/tests/performance_tests/main.cpp +++ b/tests/performance_tests/main.cpp @@ -43,6 +43,7 @@ #include "derive_public_key.h" #include "derive_secret_key.h" #include "derive_view_tag.h" +#include "fe_batch_invert.h" #include "ge_frombytes_vartime.h" #include "ge_tobytes.h" #include "generate_key_derivation.h" @@ -206,6 +207,8 @@ int main(int argc, char** argv) TEST_PERFORMANCE0(filter, p, test_generate_key_image); TEST_PERFORMANCE0(filter, p, test_derive_public_key); TEST_PERFORMANCE0(filter, p, test_derive_secret_key); + TEST_PERFORMANCE1(filter, p, test_fe_batch_invert, true); // batched + TEST_PERFORMANCE1(filter, p, test_fe_batch_invert, false); // individual inversions TEST_PERFORMANCE0(filter, p, test_ge_frombytes_vartime); TEST_PERFORMANCE0(filter, p, test_ge_tobytes); TEST_PERFORMANCE0(filter, p, test_generate_keypair); diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index 8659b0ed07..eb48e281cb 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -41,6 +41,7 @@ set(unit_tests_sources chacha.cpp checkpoints.cpp command_line.cpp + curve_trees.cpp crypto.cpp decompose_amount_into_digits.cpp device.cpp @@ -51,6 +52,7 @@ set(unit_tests_sources epee_serialization.cpp epee_utils.cpp expect.cpp + fcmp_pp.cpp json_serialization.cpp get_xtype_from_string.cpp hashchain.cpp @@ -113,11 +115,13 @@ monero_add_minimal_executable(unit_tests target_link_libraries(unit_tests PRIVATE ringct + cncrypto cryptonote_protocol cryptonote_core daemon_messages daemon_rpc_server blockchain_db + fcmp_pp lmdb_lib rpc net diff --git a/tests/unit_tests/crypto.cpp b/tests/unit_tests/crypto.cpp index f251235638..2c264c85d3 100644 --- a/tests/unit_tests/crypto.cpp +++ b/tests/unit_tests/crypto.cpp @@ -345,3 +345,63 @@ TEST(Crypto, generator_consistency) // ringct/rctTypes.h ASSERT_TRUE(memcmp(H.data, rct::H.bytes, 32) == 0); } + +TEST(Crypto, key_image_y) +{ + const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); + crypto::key_image ki; + crypto::generate_key_image(kp.pub, kp.sec, ki); + + crypto::key_image_y ki_y; + bool sign = crypto::key_image_to_y(ki, ki_y); + + static_assert(sizeof(crypto::key_image) == sizeof(crypto::key_image_y), "unequal key image <> key image y size"); + if (memcmp(ki.data, ki_y.data, sizeof(crypto::key_image)) == 0) + ASSERT_FALSE(sign); + else + ASSERT_TRUE(sign); + + // decoded y coordinate should be the same + fe y_from_ki; + fe y_from_ki_y; + ASSERT_EQ(fe_frombytes_vartime(y_from_ki, (unsigned char*)ki.data), 0); + ASSERT_EQ(fe_frombytes_vartime(y_from_ki_y, (unsigned char*)ki_y.data), 0); + + ASSERT_EQ(memcmp(y_from_ki, y_from_ki_y, sizeof(fe)), 0); +} + +TEST(Crypto, batch_inversion) +{ + const std::size_t MAX_TEST_ELEMS = 1000; + + // Memory allocator + auto alloc = [](const std::size_t n) -> fe* + { + fe *ptr = (fe *) malloc(n * sizeof(fe)); + if (!ptr) + throw std::runtime_error("failed to malloc fe *"); + return ptr; + }; + + // Init test elems and individual inversions + fe *init_elems = alloc(MAX_TEST_ELEMS); + fe *norm_inverted = alloc(MAX_TEST_ELEMS); + for (std::size_t i = 0; i < MAX_TEST_ELEMS; ++i) + { + const cryptonote::keypair kp = cryptonote::keypair::generate(hw::get_device("default")); + ASSERT_EQ(fe_frombytes_vartime(init_elems[i], (unsigned char*)kp.pub.data), 0); + fe_invert(norm_inverted[i], init_elems[i]); + } + + // Do batch inversions and compare to individual inversions + for (std::size_t n_elems = 1; n_elems <= MAX_TEST_ELEMS; ++n_elems) + { + fe *batch_inverted = alloc(n_elems); + ASSERT_EQ(fe_batch_invert(batch_inverted, init_elems, n_elems), 0); + ASSERT_EQ(memcmp(batch_inverted, norm_inverted, n_elems * sizeof(fe)), 0); + free(batch_inverted); + } + + free(init_elems); + free(norm_inverted); +} diff --git a/tests/unit_tests/curve_trees.cpp b/tests/unit_tests/curve_trees.cpp new file mode 100644 index 0000000000..491448b3a2 --- /dev/null +++ b/tests/unit_tests/curve_trees.cpp @@ -0,0 +1,1353 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/gtest.h" + +#include "cryptonote_basic/cryptonote_format_utils.h" +#include "curve_trees.h" +#include "fcmp_pp/fcmp_pp_crypto.h" +#include "misc_log_ex.h" +#include "ringct/rctOps.h" + +#include + + +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test helpers +//---------------------------------------------------------------------------------------------------------------------- +static const std::vector generate_random_leaves(const CurveTreesV1 &curve_trees, + const std::size_t old_n_leaf_tuples, + const std::size_t new_n_leaf_tuples) +{ + std::vector outs; + outs.reserve(new_n_leaf_tuples); + + for (std::size_t i = 0; i < new_n_leaf_tuples; ++i) + { + const std::uint64_t output_id = old_n_leaf_tuples + i; + + // Generate random output tuple + crypto::secret_key o,c; + crypto::public_key O,C; + crypto::generate_keys(O, o, o, false); + crypto::generate_keys(C, c, c, false); + + rct::key C_key = rct::pk2rct(C); + auto output_pair = fcmp_pp::curve_trees::OutputPair{ + .output_pubkey = std::move(O), + .commitment = std::move(C_key) + }; + + auto output_context = fcmp_pp::curve_trees::OutputContext{ + .output_id = output_id, + .output_pair = std::move(output_pair) + }; + + outs.emplace_back(std::move(output_context)); + } + + return outs; +} +//---------------------------------------------------------------------------------------------------------------------- +static const Selene::Scalar generate_random_selene_scalar() +{ + crypto::secret_key s; + crypto::public_key S; + + crypto::generate_keys(S, s, s, false); + + rct::key S_x; + CHECK_AND_ASSERT_THROW_MES(fcmp_pp::point_to_wei_x(rct::pk2rct(S), S_x), "failed to convert to wei x"); + return fcmp_pp::tower_cycle::selene_scalar_from_bytes(S_x); +} +//---------------------------------------------------------------------------------------------------------------------- +static bool grow_tree_db(const std::size_t expected_old_n_leaf_tuples, + const std::size_t n_leaves, + std::shared_ptr curve_trees, + unit_test::BlockchainLMDBTest &test_db) +{ + cryptonote::db_wtxn_guard guard(test_db.m_db); + + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), + false, "unexpected starting n leaf tuples in db"); + + auto leaves = generate_random_leaves(*curve_trees, 0, n_leaves); + + test_db.m_db->grow_tree(std::move(leaves)); + + return test_db.m_db->audit_tree(expected_old_n_leaf_tuples + n_leaves); +} +//---------------------------------------------------------------------------------------------------------------------- +static bool trim_tree_db(const std::size_t expected_old_n_leaf_tuples, + const std::size_t trim_leaves, + unit_test::BlockchainLMDBTest &test_db) +{ + cryptonote::db_wtxn_guard guard(test_db.m_db); + + CHECK_AND_ASSERT_THROW_MES(expected_old_n_leaf_tuples >= trim_leaves, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_leaves > 0, "must be trimming some leaves"); + + LOG_PRINT_L1("Trimming " << trim_leaves << " leaf tuples from tree with " + << expected_old_n_leaf_tuples << " leaves in db"); + + CHECK_AND_ASSERT_MES(test_db.m_db->get_num_leaf_tuples() == (uint64_t)(expected_old_n_leaf_tuples), + false, "trimming unexpected starting n leaf tuples in db"); + + // Can use 0 for trim_block_id since it's unused in tests + test_db.m_db->trim_tree(trim_leaves, 0); + CHECK_AND_ASSERT_MES(test_db.m_db->audit_tree(expected_old_n_leaf_tuples - trim_leaves), false, + "failed to trim tree in db"); + + MDEBUG("Successfully trimmed tree in db by " << trim_leaves << " leaves"); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +#define BEGIN_INIT_TREE_ITER(curve_trees) \ + for (std::size_t init_leaves = 1; init_leaves <= min_leaves_needed_for_tree_depth; ++init_leaves) \ + { \ + LOG_PRINT_L1("Initializing tree with " << init_leaves << " leaves"); \ + \ + /* Init tree in memory */ \ + CurveTreesGlobalTree global_tree(*curve_trees); \ + ASSERT_TRUE(global_tree.grow_tree(0, init_leaves)); \ + \ + /* Init tree in db */ \ + INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees); \ + ASSERT_TRUE(grow_tree_db(0, init_leaves, curve_trees, test_db)); \ +//---------------------------------------------------------------------------------------------------------------------- +#define END_INIT_TREE_ITER(curve_trees) \ + }; \ +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesGlobalTree helpers +//---------------------------------------------------------------------------------------------------------------------- +template +static bool validate_layer(const std::unique_ptr &curve, + const CurveTreesGlobalTree::Layer &parents, + const std::vector &child_scalars, + const std::size_t max_chunk_size) +{ + // Hash chunk of children scalars, then see if the hash matches up to respective parent + std::size_t chunk_start_idx = 0; + for (std::size_t i = 0; i < parents.size(); ++i) + { + CHECK_AND_ASSERT_MES(child_scalars.size() > chunk_start_idx, false, "chunk start too high"); + const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size); + CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large"); + + const typename C::Point &parent = parents[i]; + + const auto chunk_start = child_scalars.data() + chunk_start_idx; + const typename C::Chunk chunk{chunk_start, chunk_size}; + + for (std::size_t i = 0; i < chunk_size; ++i) + MDEBUG("Hashing " << curve->to_string(chunk_start[i])); + + const typename C::Point chunk_hash = fcmp_pp::curve_trees::get_new_parent(curve, chunk); + + MDEBUG("chunk_start_idx: " << chunk_start_idx << " , chunk_size: " << chunk_size << " , chunk_hash: " << curve->to_string(chunk_hash)); + + const auto actual_bytes = curve->to_bytes(parent); + const auto expected_bytes = curve->to_bytes(chunk_hash); + CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash"); + + chunk_start_idx += chunk_size; + } + + CHECK_AND_ASSERT_THROW_MES(chunk_start_idx == child_scalars.size(), "unexpected ending chunk start idx"); + + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +template +static std::vector get_last_chunk_children_to_trim(const std::unique_ptr &c_child, + const CurveTreesGlobalTree::Layer &child_layer, + const bool need_last_chunk_children_to_trim, + const bool need_last_chunk_remaining_children, + const std::size_t start_trim_idx, + const std::size_t end_trim_idx) +{ + std::vector children_to_trim_out; + if (end_trim_idx > start_trim_idx) + { + std::size_t idx = start_trim_idx; + MDEBUG("Start trim from idx: " << idx << " , ending trim at: " << end_trim_idx); + do + { + CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high"); + const auto &child_point = child_layer[idx]; + + auto child_scalar = c_child->point_to_cycle_scalar(child_point); + children_to_trim_out.push_back(std::move(child_scalar)); + + ++idx; + } + while (idx < end_trim_idx); + } + + return children_to_trim_out; +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesGlobalTree public implementations +//---------------------------------------------------------------------------------------------------------------------- +std::size_t CurveTreesGlobalTree::get_num_leaf_tuples() const +{ + return m_tree.leaves.size(); +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::grow_tree(const std::size_t expected_old_n_leaf_tuples,const std::size_t new_n_leaf_tuples) +{ + // Do initial tree reads + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); + const CurveTreesV1::LastHashes last_hashes = this->get_last_hashes(); + + this->log_last_hashes(last_hashes); + + auto new_outputs = generate_random_leaves(m_curve_trees, old_n_leaf_tuples, new_n_leaf_tuples); + + // Get a tree extension object to the existing tree using randomly generated leaves + // - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves + const auto tree_extension = m_curve_trees.get_tree_extension(old_n_leaf_tuples, + last_hashes, + std::move(new_outputs)); + + this->log_tree_extension(tree_extension); + + // Use the tree extension to extend the existing tree + this->extend_tree(tree_extension); + + this->log_tree(); + + // Validate tree structure and all hashes + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples + new_n_leaf_tuples; + return this->audit_tree(expected_n_leaf_tuples); +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::trim_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t trim_n_leaf_tuples) +{ + const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_MES(old_n_leaf_tuples == expected_old_n_leaf_tuples, false, "unexpected old_n_leaf_tuples"); + CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples >= trim_n_leaf_tuples, "cannot trim more leaves than exist"); + CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); + + // Trim the global tree by `trim_n_leaf_tuples` + LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree with " + << old_n_leaf_tuples << " leaves in memory"); + + // Get trim instructions + const auto trim_instructions = m_curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); + MDEBUG("Acquired trim instructions for " << trim_instructions.size() << " layers"); + + // Do initial tree reads + const auto last_chunk_children_to_trim = this->get_all_last_chunk_children_to_trim(trim_instructions); + const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions); + + // Get the new hashes, wrapped in a simple struct we can use to trim the tree + const auto tree_reduction = m_curve_trees.get_tree_reduction( + trim_instructions, + last_chunk_children_to_trim, + last_hashes_to_trim); + + // Use tree reduction to trim tree + this->reduce_tree(tree_reduction); + + const std::size_t new_n_leaf_tuples = this->get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES((new_n_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples, + "unexpected num leaves after trim"); + + MDEBUG("Finished trimming " << trim_n_leaf_tuples << " leaf tuples from tree"); + + this->log_tree(); + + const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples - trim_n_leaf_tuples; + bool res = this->audit_tree(expected_n_leaf_tuples); + CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory"); + + MDEBUG("Successfully trimmed " << trim_n_leaf_tuples << " leaves in memory"); + return true; +} +//---------------------------------------------------------------------------------------------------------------------- +bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples) const +{ + MDEBUG("Auditing global tree"); + + auto leaves = m_tree.leaves; + const auto &c1_layers = m_tree.c1_layers; + const auto &c2_layers = m_tree.c2_layers; + + CHECK_AND_ASSERT_MES(leaves.size() == expected_n_leaf_tuples, false, "unexpected num leaves"); + + if (leaves.empty()) + { + CHECK_AND_ASSERT_MES(c2_layers.empty() && c1_layers.empty(), false, "expected empty tree"); + return true; + } + + CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree"); + CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + false, "unexpected mismatch of c2 and c1 layers"); + + // Verify root has 1 member in it + const bool c2_is_root = c2_layers.size() > c1_layers.size(); + CHECK_AND_ASSERT_MES(c2_is_root ? c2_layers.back().size() == 1 : c1_layers.back().size() == 1, false, + "root must have 1 member in it"); + + // Iterate from root down to layer above leaves, and check hashes match up correctly + bool parent_is_c2 = c2_is_root; + std::size_t c2_idx = c2_layers.size() - 1; + std::size_t c1_idx = c1_layers.empty() ? 0 : (c1_layers.size() - 1); + for (std::size_t i = 1; i < (c2_layers.size() + c1_layers.size()); ++i) + { + // TODO: implement templated function for below if statement + if (parent_is_c2) + { + MDEBUG("Validating parent c2 layer " << c2_idx << " , child c1 layer " << c1_idx); + + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + + const Layer &parents = c2_layers[c2_idx]; + const Layer &children = c1_layers[c1_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c2_idx " + std::to_string(c2_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c1_idx " + std::to_string(c1_idx)); + + std::vector child_scalars; + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c1, + children, + child_scalars); + + const bool valid = validate_layer(m_curve_trees.m_c2, + parents, + child_scalars, + m_curve_trees.m_c2_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c2_idx " + std::to_string(c2_idx)); + + --c2_idx; + } + else + { + MDEBUG("Validating parent c1 layer " << c1_idx << " , child c2 layer " << c2_idx); + + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx"); + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx"); + + const Layer &parents = c1_layers[c1_idx]; + const Layer &children = c2_layers[c2_idx]; + + CHECK_AND_ASSERT_MES(!parents.empty(), false, "no parents at c1_idx " + std::to_string(c1_idx)); + CHECK_AND_ASSERT_MES(!children.empty(), false, "no children at c2_idx " + std::to_string(c2_idx)); + + std::vector child_scalars; + fcmp_pp::tower_cycle::extend_scalars_from_cycle_points(m_curve_trees.m_c2, + children, + child_scalars); + + const bool valid = validate_layer( + m_curve_trees.m_c1, + parents, + child_scalars, + m_curve_trees.m_c1_width); + + CHECK_AND_ASSERT_MES(valid, false, "failed to validate c1_idx " + std::to_string(c1_idx)); + + --c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + MDEBUG("Validating leaves"); + + // Convert output pairs to leaf tuples + std::vector leaf_tuples; + leaf_tuples.reserve(leaves.size()); + for (const auto &leaf : leaves) + { + auto leaf_tuple = m_curve_trees.leaf_tuple(leaf); + leaf_tuples.emplace_back(std::move(leaf_tuple)); + } + + // Now validate leaves + return validate_layer(m_curve_trees.m_c2, + c2_layers[0], + m_curve_trees.flatten_leaves(std::move(leaf_tuples)), + m_curve_trees.m_leaf_layer_chunk_width); +} +//---------------------------------------------------------------------------------------------------------------------- +fcmp_pp::curve_trees::PathV1 CurveTreesGlobalTree::get_path_at_leaf_idx(const std::size_t leaf_idx) const +{ + fcmp_pp::curve_trees::PathV1 path_out; + + const std::size_t n_leaf_tuples = get_num_leaf_tuples(); + CHECK_AND_ASSERT_THROW_MES(n_leaf_tuples > leaf_idx, "too high leaf idx"); + + // Get leaves + const std::size_t start_leaf_idx = (leaf_idx / m_curve_trees.m_c2_width) * m_curve_trees.m_c2_width; + const std::size_t end_leaf_idx = std::min(n_leaf_tuples, start_leaf_idx + m_curve_trees.m_c2_width); + for (std::size_t i = start_leaf_idx; i < end_leaf_idx; ++i) + { + const auto &output_pair = m_tree.leaves[i]; + + const crypto::public_key &output_pubkey = output_pair.output_pubkey; + const rct::key &commitment = output_pair.commitment; + + crypto::ec_point I; + crypto::derive_key_image_generator(output_pubkey, I); + + rct::key O = rct::pk2rct(output_pubkey); + rct::key C = commitment; + + auto output_tuple = fcmp_pp::curve_trees::OutputTuple{ + .O = std::move(O), + .I = std::move(rct::pt2rct(I)), + .C = std::move(C) + }; + + path_out.leaves.emplace_back(std::move(output_tuple)); + } + + // Get parents + const std::size_t n_layers = m_tree.c1_layers.size() + m_tree.c2_layers.size(); + std::size_t start_parent_idx = start_leaf_idx / m_curve_trees.m_c2_width; + std::size_t c1_idx = 0, c2_idx = 0; + bool use_c2 = true; + for (std::size_t i = 0; i < n_layers; ++i) + { + if (use_c2) + { + path_out.c2_layers.emplace_back(); + auto &layer_out = path_out.c2_layers.back(); + + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "too high c2_idx"); + const std::size_t n_layer_elems = m_tree.c2_layers[c2_idx].size(); + + CHECK_AND_ASSERT_THROW_MES(n_layer_elems > start_parent_idx, "too high parent idx"); + const std::size_t end_parent_idx = std::min(n_layer_elems, start_parent_idx + m_curve_trees.m_c2_width); + + for (std::size_t j = start_parent_idx; j < end_parent_idx; ++j) + { + layer_out.emplace_back(m_tree.c2_layers[c2_idx][j]); + } + + start_parent_idx /= m_curve_trees.m_c1_width; + ++c2_idx; + } + else + { + path_out.c1_layers.emplace_back(); + auto &layer_out = path_out.c1_layers.back(); + + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "too high c1_idx"); + const std::size_t n_layer_elems = m_tree.c1_layers[c1_idx].size(); + + CHECK_AND_ASSERT_THROW_MES(n_layer_elems > start_parent_idx, "too high parent idx"); + const std::size_t end_parent_idx = std::min(n_layer_elems, start_parent_idx + m_curve_trees.m_c1_width); + + for (std::size_t j = start_parent_idx; j < end_parent_idx; ++j) + { + layer_out.emplace_back(m_tree.c1_layers[c1_idx][j]); + } + + start_parent_idx /= m_curve_trees.m_c2_width; + ++c1_idx; + } + + use_c2 = !use_c2; + } + + return path_out; +} +//---------------------------------------------------------------------------------------------------------------------- +std::array CurveTreesGlobalTree::get_tree_root() const +{ + const std::size_t n_layers = m_tree.c1_layers.size() + m_tree.c2_layers.size(); + + if (n_layers == 0) + return std::array(); + + if ((n_layers % 2) == 0) + { + CHECK_AND_ASSERT_THROW_MES(!m_tree.c1_layers.empty(), "missing c1 layers"); + const auto &last_layer = m_tree.c1_layers.back(); + CHECK_AND_ASSERT_THROW_MES(!last_layer.empty(), "missing elems from last c1 layer"); + return m_curve_trees.m_c1->to_bytes(last_layer.back()); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!m_tree.c2_layers.empty(), "missing c2 layers"); + const auto &last_layer = m_tree.c2_layers.back(); + CHECK_AND_ASSERT_THROW_MES(!last_layer.empty(), "missing elems from last c2 layer"); + return m_curve_trees.m_c2->to_bytes(last_layer.back()); + } +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// CurveTreesGlobalTree private implementations +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_extension) +{ + // Add the leaves + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() == tree_extension.leaves.start_leaf_tuple_idx, + "unexpected leaf start idx"); + + m_tree.leaves.reserve(m_tree.leaves.size() + tree_extension.leaves.tuples.size()); + for (const auto &o : tree_extension.leaves.tuples) + { + m_tree.leaves.emplace_back(o.output_pair); + } + + // Add the layers + const auto &c2_extensions = tree_extension.c2_layer_extensions; + const auto &c1_extensions = tree_extension.c1_layer_extensions; + CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) + { + // TODO: template below if statement + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer extension"); + const fcmp_pp::curve_trees::LayerExtension &c2_ext = c2_extensions[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c2_ext.hashes.empty(), "empty c2 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c2_idx <= m_tree.c2_layers.size(), "missing c2 layer"); + if (m_tree.c2_layers.size() == c2_idx) + m_tree.c2_layers.emplace_back(Layer{}); + + auto &c2_inout = m_tree.c2_layers[c2_idx]; + + const bool started_after_tip = (c2_inout.size() == c2_ext.start_idx); + const bool started_at_tip = (c2_inout.size() == (c2_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c2 layer start"); + + // We updated the last hash + if (started_at_tip) + { + CHECK_AND_ASSERT_THROW_MES(c2_ext.update_existing_last_hash, "expect to be updating last hash"); + c2_inout.back() = c2_ext.hashes.front(); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!c2_ext.update_existing_last_hash, "unexpected last hash update"); + } + + for (std::size_t i = started_at_tip ? 1 : 0; i < c2_ext.hashes.size(); ++i) + c2_inout.emplace_back(c2_ext.hashes[i]); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer extension"); + const fcmp_pp::curve_trees::LayerExtension &c1_ext = c1_extensions[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(!c1_ext.hashes.empty(), "empty c1 layer extension"); + + CHECK_AND_ASSERT_THROW_MES(c1_idx <= m_tree.c1_layers.size(), "missing c1 layer"); + if (m_tree.c1_layers.size() == c1_idx) + m_tree.c1_layers.emplace_back(Layer{}); + + auto &c1_inout = m_tree.c1_layers[c1_idx]; + + const bool started_after_tip = (c1_inout.size() == c1_ext.start_idx); + const bool started_at_tip = (c1_inout.size() == (c1_ext.start_idx + 1)); + CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected c1 layer start"); + + // We updated the last hash + if (started_at_tip) + { + CHECK_AND_ASSERT_THROW_MES(c1_ext.update_existing_last_hash, "expect to be updating last hash"); + c1_inout.back() = c1_ext.hashes.front(); + } + else + { + CHECK_AND_ASSERT_THROW_MES(!c1_ext.update_existing_last_hash, "unexpected last hash update"); + } + + for (std::size_t i = started_at_tip ? 1 : 0; i < c1_ext.hashes.size(); ++i) + c1_inout.emplace_back(c1_ext.hashes[i]); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction) +{ + // Trim the leaves + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > tree_reduction.new_total_leaf_tuples, + "expected fewer new total leaves"); + while (m_tree.leaves.size() > tree_reduction.new_total_leaf_tuples) + m_tree.leaves.pop_back(); + + // Trim the layers + const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions; + const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions; + CHECK_AND_ASSERT_THROW_MES(c2_layer_reductions.size() == c1_layer_reductions.size() + || c2_layer_reductions.size() == (c1_layer_reductions.size() + 1), + "unexpected mismatch of c2 and c1 layer reductions"); + + bool use_c2 = true; + std::size_t c2_idx = 0; + std::size_t c1_idx = 0; + for (std::size_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) + { + // TODO: template below if statement + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction"); + const auto &c2_reduction = c2_layer_reductions[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(c2_idx < m_tree.c2_layers.size(), "missing c2 layer"); + auto &c2_inout = m_tree.c2_layers[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(c2_reduction.new_total_parents <= c2_inout.size(), + "unexpected c2 new total parents"); + + c2_inout.resize(c2_reduction.new_total_parents); + c2_inout.shrink_to_fit(); + + // We updated the last hash + if (c2_reduction.update_existing_last_hash) + { + c2_inout.back() = c2_reduction.new_last_hash; + } + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction"); + const auto &c1_reduction = c1_layer_reductions[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(c1_idx < m_tree.c1_layers.size(), "missing c1 layer"); + auto &c1_inout = m_tree.c1_layers[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(c1_reduction.new_total_parents <= c1_inout.size(), + "unexpected c1 new total parents"); + + c1_inout.resize(c1_reduction.new_total_parents); + c1_inout.shrink_to_fit(); + + // We updated the last hash + if (c1_reduction.update_existing_last_hash) + { + c1_inout.back() = c1_reduction.new_last_hash; + } + + ++c1_idx; + } + + use_c2 = !use_c2; + } + + // Delete remaining layers + m_tree.c1_layers.resize(c1_layer_reductions.size()); + m_tree.c2_layers.resize(c2_layer_reductions.size()); + + m_tree.c1_layers.shrink_to_fit(); + m_tree.c2_layers.shrink_to_fit(); +} +//---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes() const +{ + CurveTreesV1::LastHashes last_hashes_out; + auto &c1_last_hashes_out = last_hashes_out.c1_last_hashes; + auto &c2_last_hashes_out = last_hashes_out.c2_last_hashes; + + const auto &c1_layers = m_tree.c1_layers; + const auto &c2_layers = m_tree.c2_layers; + + // We started with c2 and then alternated, so c2 is the same size or 1 higher than c1 + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1), + "unexpected number of curve layers"); + + c1_last_hashes_out.reserve(c1_layers.size()); + c2_last_hashes_out.reserve(c2_layers.size()); + + if (c2_layers.empty()) + return last_hashes_out; + + // Next parents will be c2 + bool use_c2 = true; + + // Then get last chunks up until the root + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + while (c1_last_hashes_out.size() < c1_layers.size() || c2_last_hashes_out.size() < c2_layers.size()) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_layers.size() > c2_idx, "missing c2 layer"); + c2_last_hashes_out.push_back(c2_layers[c2_idx].back()); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_layers.size() > c1_idx, "missing c1 layer"); + c1_last_hashes_out.push_back(c1_layers[c1_idx].back()); + ++c1_idx; + } + + use_c2 = !use_c2; + } + + return last_hashes_out; +} +//---------------------------------------------------------------------------------------------------------------------- +// TODO: template +CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_children_to_trim( + const std::vector &trim_instructions) +{ + CurveTreesV1::LastChunkChildrenToTrim all_children_to_trim; + + if (trim_instructions.empty()) + return all_children_to_trim; + + // Leaf layer + const auto &trim_leaf_layer_instructions = trim_instructions[0]; + + std::vector leaves_to_trim; + + // TODO: separate function + if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx) + { + std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; + MDEBUG("Start trim from idx: " << idx); + do + { + CHECK_AND_ASSERT_THROW_MES(idx % CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size"); + const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE; + + CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high"); + const auto leaf_tuple = m_curve_trees.leaf_tuple(m_tree.leaves[leaf_tuple_idx]); + + leaves_to_trim.push_back(leaf_tuple.O_x); + leaves_to_trim.push_back(leaf_tuple.I_x); + leaves_to_trim.push_back(leaf_tuple.C_x); + + idx += CurveTreesV1::LEAF_TUPLE_SIZE; + } + while (idx < trim_leaf_layer_instructions.end_trim_idx); + } + + all_children_to_trim.c2_children.emplace_back(std::move(leaves_to_trim)); + + bool parent_is_c2 = false; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 1; i < trim_instructions.size(); ++i) + { + MDEBUG("Getting trim instructions for layer " << i); + + const auto &trim_layer_instructions = trim_instructions[i]; + + const bool need_last_chunk_children_to_trim = trim_layer_instructions.need_last_chunk_children_to_trim; + const bool need_last_chunk_remaining_children = trim_layer_instructions.need_last_chunk_remaining_children; + const std::size_t start_trim_idx = trim_layer_instructions.start_trim_idx; + const std::size_t end_trim_idx = trim_layer_instructions.end_trim_idx; + + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); + + auto children_to_trim = get_last_chunk_children_to_trim( + m_curve_trees.m_c1, + m_tree.c1_layers[c1_idx], + need_last_chunk_children_to_trim, + need_last_chunk_remaining_children, + start_trim_idx, + end_trim_idx); + + all_children_to_trim.c2_children.emplace_back(std::move(children_to_trim)); + ++c1_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); + + auto children_to_trim = get_last_chunk_children_to_trim( + m_curve_trees.m_c2, + m_tree.c2_layers[c2_idx], + need_last_chunk_children_to_trim, + need_last_chunk_remaining_children, + start_trim_idx, + end_trim_idx); + + all_children_to_trim.c1_children.emplace_back(std::move(children_to_trim)); + ++c2_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + return all_children_to_trim; +} +//---------------------------------------------------------------------------------------------------------------------- +CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim( + const std::vector &trim_instructions) const +{ + CurveTreesV1::LastHashes last_hashes; + + if (trim_instructions.empty()) + return last_hashes; + + bool parent_is_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (const auto &trim_layer_instructions : trim_instructions) + { + const std::size_t new_total_parents = trim_layer_instructions.new_total_parents; + CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "no new parents"); + + if (parent_is_c2) + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high"); + const auto &c2_layer = m_tree.c2_layers[c2_idx]; + + CHECK_AND_ASSERT_THROW_MES(c2_layer.size() >= new_total_parents, "not enough c2 parents"); + + last_hashes.c2_last_hashes.push_back(c2_layer[new_total_parents - 1]); + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high"); + const auto &c1_layer = m_tree.c1_layers[c1_idx]; + + CHECK_AND_ASSERT_THROW_MES(c1_layer.size() >= new_total_parents, "not enough c1 parents"); + + last_hashes.c1_last_hashes.push_back(c1_layer[new_total_parents - 1]); + ++c1_idx; + } + + parent_is_c2 = !parent_is_c2; + } + + return last_hashes; +} +//---------------------------------------------------------------------------------------------------------------------- +// Logging helpers +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::log_last_hashes(const CurveTreesV1::LastHashes &last_hashes) +{ + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + + const auto &c1_last_hashes = last_hashes.c1_last_hashes; + const auto &c2_last_hashes = last_hashes.c2_last_hashes; + + MDEBUG("Total of " << c1_last_hashes.size() << " Helios layers and " << c2_last_hashes.size() << " Selene layers"); + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (c1_last_hashes.size() + c2_last_hashes.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_last_hashes.size(), "unexpected c2 layer"); + + const auto &last_hash = c2_last_hashes[c2_idx]; + MDEBUG("c2_idx: " << c2_idx << " , last_hash: " << m_curve_trees.m_c2->to_string(last_hash)); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_last_hashes.size(), "unexpected c1 layer"); + + const auto &last_hash = c1_last_hashes[c1_idx]; + MDEBUG("c1_idx: " << c1_idx << " , last_hash: " << m_curve_trees.m_c1->to_string(last_hash)); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension) +{ + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + + const auto &c1_extensions = tree_extension.c1_layer_extensions; + const auto &c2_extensions = tree_extension.c2_layer_extensions; + + MDEBUG("Tree extension has " << tree_extension.leaves.tuples.size() << " leaves, " + << c1_extensions.size() << " helios layers, " << c2_extensions.size() << " selene layers"); + + MDEBUG("Leaf start idx: " << tree_extension.leaves.start_leaf_tuple_idx); + for (std::size_t i = 0; i < tree_extension.leaves.tuples.size(); ++i) + { + const auto &output_pair = tree_extension.leaves.tuples[i].output_pair; + const auto leaf = m_curve_trees.leaf_tuple(output_pair); + + const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2->to_string(leaf.C_x); + + MDEBUG("Leaf tuple idx " << (tree_extension.leaves.start_leaf_tuple_idx + (i * CurveTreesV1::LEAF_TUPLE_SIZE)) + << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); + } + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (c1_extensions.size() + c2_extensions.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_extensions.size(), "unexpected c2 layer"); + + const fcmp_pp::curve_trees::LayerExtension &c2_layer = c2_extensions[c2_idx]; + MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx); + + for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j) + MDEBUG("Child chunk start idx: " << (j + c2_layer.start_idx) << " , hash: " + << m_curve_trees.m_c2->to_string(c2_layer.hashes[j])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_extensions.size(), "unexpected c1 layer"); + + const fcmp_pp::curve_trees::LayerExtension &c1_layer = c1_extensions[c1_idx]; + MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx); + + for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j) + MDEBUG("Child chunk start idx: " << (j + c1_layer.start_idx) << " , hash: " + << m_curve_trees.m_c1->to_string(c1_layer.hashes[j])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +void CurveTreesGlobalTree::log_tree() +{ + if (!el::Loggers::allowed(el::Level::Debug, "serialization")) + return; + + MDEBUG("Tree has " << m_tree.leaves.size() << " leaves, " + << m_tree.c1_layers.size() << " helios layers, " << m_tree.c2_layers.size() << " selene layers"); + + for (std::size_t i = 0; i < m_tree.leaves.size(); ++i) + { + const auto leaf = m_curve_trees.leaf_tuple(m_tree.leaves[i]); + + const auto O_x = m_curve_trees.m_c2->to_string(leaf.O_x); + const auto I_x = m_curve_trees.m_c2->to_string(leaf.I_x); + const auto C_x = m_curve_trees.m_c2->to_string(leaf.C_x); + + MDEBUG("Leaf idx " << i << " : { O_x: " << O_x << " , I_x: " << I_x << " , C_x: " << C_x << " }"); + } + + bool use_c2 = true; + std::size_t c1_idx = 0; + std::size_t c2_idx = 0; + for (std::size_t i = 0; i < (m_tree.c1_layers.size() + m_tree.c2_layers.size()); ++i) + { + if (use_c2) + { + CHECK_AND_ASSERT_THROW_MES(c2_idx < m_tree.c2_layers.size(), "unexpected c2 layer"); + + const CurveTreesGlobalTree::Layer &c2_layer = m_tree.c2_layers[c2_idx]; + MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i); + + for (std::size_t j = 0; j < c2_layer.size(); ++j) + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c2->to_string(c2_layer[j])); + + ++c2_idx; + } + else + { + CHECK_AND_ASSERT_THROW_MES(c1_idx < m_tree.c1_layers.size(), "unexpected c1 layer"); + + const CurveTreesGlobalTree::Layer &c1_layer = m_tree.c1_layers[c1_idx]; + MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i); + + for (std::size_t j = 0; j < c1_layer.size(); ++j) + MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c1->to_string(c1_layer[j])); + + ++c1_idx; + } + + use_c2 = !use_c2; + } +} +//---------------------------------------------------------------------------------------------------------------------- +//---------------------------------------------------------------------------------------------------------------------- +// Test +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, grow_tree) +{ + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 2; + + static const std::size_t tree_depth = 4; + + LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + // First initialize the tree with init_leaves + BEGIN_INIT_TREE_ITER(curve_trees) + + // Then extend the tree with ext_leaves + for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= min_leaves_needed_for_tree_depth; ++ext_leaves) + { + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(tree_copy.grow_tree(init_leaves, ext_leaves)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, copy_db)); + } + + END_INIT_TREE_ITER() +} +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, trim_tree) +{ + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 3; + + static const std::size_t tree_depth = 4; + + LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + // First initialize the tree with init_leaves + BEGIN_INIT_TREE_ITER(curve_trees) + + // Then trim by trim_leaves + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) + { + if (trim_leaves > init_leaves) + continue; + + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(tree_copy.trim_tree(init_leaves, trim_leaves)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, copy_db)); + } + + END_INIT_TREE_ITER() +} +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, trim_tree_then_grow) +{ + // Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree + static const std::size_t helios_chunk_width = 3; + static const std::size_t selene_chunk_width = 3; + + static const std::size_t tree_depth = 2; + + static const std::size_t grow_after_trim = 1; + + LOG_PRINT_L1("Test trim tree with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth + << ", then grow " << grow_after_trim << " leaf/leaves"); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + // First initialize the tree with init_leaves + BEGIN_INIT_TREE_ITER(curve_trees) + + // Then trim by trim_leaves + for (std::size_t trim_leaves = 1; trim_leaves <= min_leaves_needed_for_tree_depth; ++trim_leaves) + { + if (trim_leaves > init_leaves) + continue; + + // Tree in memory + // Copy the already existing global tree + CurveTreesGlobalTree tree_copy(global_tree); + ASSERT_TRUE(tree_copy.trim_tree(init_leaves, trim_leaves)); + ASSERT_TRUE(tree_copy.grow_tree(init_leaves - trim_leaves, grow_after_trim)); + + // Tree in db + // Copy the already existing db + unit_test::BlockchainLMDBTest copy_db = *test_db.copy_db(curve_trees); + INIT_BLOCKCHAIN_LMDB_TEST_DB(copy_db, nullptr); + ASSERT_TRUE(trim_tree_db(init_leaves, trim_leaves, copy_db)); + ASSERT_TRUE(grow_tree_db(init_leaves - trim_leaves, grow_after_trim, curve_trees, copy_db)); + } + + END_INIT_TREE_ITER() +} +//---------------------------------------------------------------------------------------------------------------------- +// Make sure the result of hash_trim is the same as the equivalent hash_grow excluding the trimmed children +TEST(curve_trees, hash_trim) +{ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(); + + // 1. Trim 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then trim to: {selene_scalar_0} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 scalars + std::vector init_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + // Trim selene_scalar_1 + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + curve_trees->m_c2->zero_scalar()); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0} + std::vector remaining_children{selene_scalar_0}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 2. Trim 2 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then trim to: {selene_scalar_0} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Get the initial hash of the 3 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + // Trim the initial result by 2 children + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + curve_trees->m_c2->zero_scalar()); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0} + std::vector remaining_children{selene_scalar_0}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 3. Change 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then change to: {selene_scalar_0, selene_scalar_2} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Trim the 2nd child and grow with new child + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 1}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + selene_scalar_2); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_2} + std::vector remaining_children{selene_scalar_0, selene_scalar_2}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } + + // 4. Trim 2 and grow back by 1 + { + // Start by hashing: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then trim+grow to: {selene_scalar_0, selene_scalar_3} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + const auto selene_scalar_2 = generate_random_selene_scalar(); + + // Get the initial hash of the 3 selene scalars + std::vector init_children{selene_scalar_0, selene_scalar_1, selene_scalar_2}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{init_children.data(), init_children.size()}); + + const auto selene_scalar_3 = generate_random_selene_scalar(); + + // Trim the initial result by 2 children+grow by 1 + const auto &trimmed_children = Selene::Chunk{init_children.data() + 1, 2}; + const auto trim_res = curve_trees->m_c2->hash_trim( + init_hash, + 1, + trimmed_children, + selene_scalar_3); + const auto trim_res_bytes = curve_trees->m_c2->to_bytes(trim_res); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_3} + std::vector remaining_children{selene_scalar_0, selene_scalar_3}; + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{remaining_children.data(), remaining_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(trim_res_bytes, grow_res_bytes); + } +} +//---------------------------------------------------------------------------------------------------------------------- +TEST(curve_trees, hash_grow) +{ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(); + + // Start by hashing: {selene_scalar_0, selene_scalar_1} + // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2} + // Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} + const auto selene_scalar_0 = generate_random_selene_scalar(); + const auto selene_scalar_1 = generate_random_selene_scalar(); + + // Get the initial hash of the 2 selene scalars + std::vector all_children{selene_scalar_0, selene_scalar_1}; + const auto init_hash = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + + // Extend with a new child + const auto selene_scalar_2 = generate_random_selene_scalar(); + std::vector new_children{selene_scalar_2}; + const auto ext_hash = curve_trees->m_c2->hash_grow( + init_hash, + all_children.size(), + curve_trees->m_c2->zero_scalar(), + Selene::Chunk{new_children.data(), new_children.size()}); + const auto ext_hash_bytes = curve_trees->m_c2->to_bytes(ext_hash); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2} + all_children.push_back(selene_scalar_2); + const auto grow_res = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + const auto grow_res_bytes = curve_trees->m_c2->to_bytes(grow_res); + + ASSERT_EQ(ext_hash_bytes, grow_res_bytes); + + // Extend again with a new child + const auto selene_scalar_3 = generate_random_selene_scalar(); + new_children.clear(); + new_children = {selene_scalar_3}; + const auto ext_hash2 = curve_trees->m_c2->hash_grow( + ext_hash, + all_children.size(), + curve_trees->m_c2->zero_scalar(), + Selene::Chunk{new_children.data(), new_children.size()}); + const auto ext_hash_bytes2 = curve_trees->m_c2->to_bytes(ext_hash2); + + // Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3} + all_children.push_back(selene_scalar_3); + const auto grow_res2 = curve_trees->m_c2->hash_grow( + /*existing_hash*/ curve_trees->m_c2->hash_init_point(), + /*offset*/ 0, + /*existing_child_at_offset*/ curve_trees->m_c2->zero_scalar(), + /*children*/ Selene::Chunk{all_children.data(), all_children.size()}); + const auto grow_res_bytes2 = curve_trees->m_c2->to_bytes(grow_res2); + + ASSERT_EQ(ext_hash_bytes2, grow_res_bytes2); +} diff --git a/tests/unit_tests/curve_trees.h b/tests/unit_tests/curve_trees.h new file mode 100644 index 0000000000..4706a50581 --- /dev/null +++ b/tests/unit_tests/curve_trees.h @@ -0,0 +1,128 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "fcmp_pp/curve_trees.h" +#include "fcmp_pp/tower_cycle.h" +#include "unit_tests_utils.h" + +using Helios = fcmp_pp::curve_trees::Helios; +using Selene = fcmp_pp::curve_trees::Selene; +using CurveTreesV1 = fcmp_pp::curve_trees::CurveTreesV1; + +//---------------------------------------------------------------------------------------------------------------------- +#define INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth) \ + static_assert(helios_chunk_width > 1, "helios width must be > 1"); \ + static_assert(selene_chunk_width > 1, "selene width must be > 1"); \ + const auto curve_trees = fcmp_pp::curve_trees::curve_trees_v1(helios_chunk_width, selene_chunk_width); \ + \ + /* Number of leaves required for tree to reach given depth */ \ + std::size_t min_leaves_needed_for_tree_depth = selene_chunk_width; \ + for (std::size_t i = 1; i < tree_depth; ++i) \ + { \ + const std::size_t width = i % 2 == 0 ? selene_chunk_width : helios_chunk_width; \ + min_leaves_needed_for_tree_depth *= width; \ + } \ + \ + /* Increment to test for off-by-1 */ \ + ++min_leaves_needed_for_tree_depth; \ + \ + unit_test::BlockchainLMDBTest test_db; \ +//---------------------------------------------------------------------------------------------------------------------- + +// Helper class to read/write a global tree in memory. It's only used in testing because normally the tree isn't kept +// in memory (it's stored in the db) +class CurveTreesGlobalTree +{ +public: + CurveTreesGlobalTree(CurveTreesV1 &curve_trees): m_curve_trees(curve_trees) {}; + +//member structs +public: + template + using Layer = std::vector; + + // A complete tree, useful for testing (don't want to keep the whole tree in memory during normal operation) + struct Tree final + { + std::vector leaves; + std::vector> c1_layers; + std::vector> c2_layers; + }; + +//public member functions +public: + // Read the in-memory tree and get the number of leaf tuples + std::size_t get_num_leaf_tuples() const; + + // Grow tree by provided new_n_leaf_tuples + bool grow_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t new_n_leaf_tuples); + + // Trim the provided number of leaf tuples from the tree + bool trim_tree(const std::size_t expected_old_n_leaf_tuples, const std::size_t trim_n_leaf_tuples); + + // Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer + bool audit_tree(const std::size_t expected_n_leaf_tuples) const; + + // Get the path in the tree of the provided leaf idx + fcmp_pp::curve_trees::PathV1 get_path_at_leaf_idx(const std::size_t leaf_idx) const; + + // Hint: use num leaf tuples in the tree to determine the type + std::array get_tree_root() const; + +private: + // Use the tree extension to extend the in-memory tree + void extend_tree(const CurveTreesV1::TreeExtension &tree_extension); + + // Use the tree reduction to reduce the in-memory tree + void reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction); + + // Read the in-memory tree and get the last hashes from each layer in the tree + CurveTreesV1::LastHashes get_last_hashes() const; + + // Read the in-memory tree and get data from what will be the last chunks after trimming the tree to the provided + // number of leaves + // - This function is useful to collect all tree data necessary to perform the actual trim operation + // - This function can return elems from each last chunk that will need to be trimmed + CurveTreesV1::LastHashes get_last_hashes_to_trim( + const std::vector &trim_instructions) const; + + CurveTreesV1::LastChunkChildrenToTrim get_all_last_chunk_children_to_trim( + const std::vector &trim_instructions); + + // logging helpers + void log_last_hashes(const CurveTreesV1::LastHashes &last_hashes); + void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension); + void log_tree(); + +private: + CurveTreesV1 &m_curve_trees; + Tree m_tree = Tree{}; +}; + diff --git a/tests/unit_tests/fcmp_pp.cpp b/tests/unit_tests/fcmp_pp.cpp new file mode 100644 index 0000000000..fc879a8248 --- /dev/null +++ b/tests/unit_tests/fcmp_pp.cpp @@ -0,0 +1,64 @@ +// Copyright (c) 2014, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/gtest.h" + +#include "cryptonote_basic/cryptonote_format_utils.h" +#include "curve_trees.h" +#include "misc_log_ex.h" +#include "ringct/rctOps.h" + + +//---------------------------------------------------------------------------------------------------------------------- +TEST(fcmp_pp, prove) +{ + static const std::size_t helios_chunk_width = fcmp_pp::curve_trees::HELIOS_CHUNK_WIDTH; + static const std::size_t selene_chunk_width = fcmp_pp::curve_trees::SELENE_CHUNK_WIDTH; + + static const std::size_t tree_depth = 3; + + LOG_PRINT_L1("Test prove with helios chunk width " << helios_chunk_width + << ", selene chunk width " << selene_chunk_width << ", tree depth " << tree_depth); + + INIT_CURVE_TREES_TEST(helios_chunk_width, selene_chunk_width, tree_depth); + + LOG_PRINT_L1("Initializing tree with " << min_leaves_needed_for_tree_depth << " leaves"); + + // Init tree in memory + CurveTreesGlobalTree global_tree(*curve_trees); + ASSERT_TRUE(global_tree.grow_tree(0, min_leaves_needed_for_tree_depth)); + + LOG_PRINT_L1("Finished initializing tree with " << min_leaves_needed_for_tree_depth << " leaves"); + + // Create proof for every leaf in the tree + for (std::size_t leaf_idx = 0; leaf_idx < global_tree.get_num_leaf_tuples(); ++leaf_idx) + { + const auto path = global_tree.get_path_at_leaf_idx(leaf_idx); + } +} +//---------------------------------------------------------------------------------------------------------------------- diff --git a/tests/unit_tests/hardfork.cpp b/tests/unit_tests/hardfork.cpp index 56958a0d85..bd97784aab 100644 --- a/tests/unit_tests/hardfork.cpp +++ b/tests/unit_tests/hardfork.cpp @@ -35,6 +35,7 @@ #include "cryptonote_basic/cryptonote_format_utils.h" #include "cryptonote_basic/hardfork.h" #include "blockchain_db/testdb.h" +#include "fcmp_pp/curve_trees.h" using namespace cryptonote; @@ -54,6 +55,7 @@ class TestDB: public cryptonote::BaseTestDB { , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back(blk); } @@ -107,20 +109,20 @@ TEST(major, Only) ASSERT_FALSE(hf.add(mkblock(0, 2), 0)); ASSERT_FALSE(hf.add(mkblock(2, 2), 0)); ASSERT_TRUE(hf.add(mkblock(1, 2), 0)); - db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); // block height 1, only version 1 is accepted ASSERT_FALSE(hf.add(mkblock(0, 2), 1)); ASSERT_FALSE(hf.add(mkblock(2, 2), 1)); ASSERT_TRUE(hf.add(mkblock(1, 2), 1)); - db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); // block height 2, only version 2 is accepted ASSERT_FALSE(hf.add(mkblock(0, 2), 2)); ASSERT_FALSE(hf.add(mkblock(1, 2), 2)); ASSERT_FALSE(hf.add(mkblock(3, 2), 2)); ASSERT_TRUE(hf.add(mkblock(2, 2), 2)); - db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); } TEST(empty_hardforks, Success) @@ -134,7 +136,7 @@ TEST(empty_hardforks, Success) ASSERT_TRUE(hf.get_state(time(NULL) + 3600*24*400) == HardFork::Ready); for (uint64_t h = 0; h <= 10; ++h) { - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } ASSERT_EQ(hf.get(0), 1); @@ -168,14 +170,14 @@ TEST(check_for_height, Success) for (uint64_t h = 0; h <= 4; ++h) { ASSERT_TRUE(hf.check_for_height(mkblock(1, 1), h)); ASSERT_FALSE(hf.check_for_height(mkblock(2, 2), h)); // block version is too high - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 5; h <= 10; ++h) { ASSERT_FALSE(hf.check_for_height(mkblock(1, 1), h)); // block version is too low ASSERT_TRUE(hf.check_for_height(mkblock(2, 2), h)); - db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -192,19 +194,19 @@ TEST(get, next_version) for (uint64_t h = 0; h <= 4; ++h) { ASSERT_EQ(2, hf.get_next_version()); - db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 5; h <= 9; ++h) { ASSERT_EQ(4, hf.get_next_version()); - db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } for (uint64_t h = 10; h <= 15; ++h) { ASSERT_EQ(4, hf.get_next_version()); - db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -245,7 +247,7 @@ TEST(steps_asap, Success) hf.init(); for (uint64_t h = 0; h < 10; ++h) { - db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -272,7 +274,7 @@ TEST(steps_1, Success) hf.init(); for (uint64_t h = 0 ; h < 10; ++h) { - db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -297,7 +299,7 @@ TEST(reorganize, Same) // index 0 1 2 3 4 5 6 7 8 9 static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; for (uint64_t h = 0; h < 20; ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } @@ -328,7 +330,7 @@ TEST(reorganize, Changed) static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9 }; for (uint64_t h = 0; h < 16; ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE (hf.add(db.get_block_from_height(h), h)); } @@ -348,7 +350,7 @@ TEST(reorganize, Changed) ASSERT_EQ(db.height(), 3); hf.reorganize_from_block_height(2); for (uint64_t h = 3; h < 16; ++h) { - db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); ASSERT_EQ (ret, h < 15); } @@ -372,7 +374,7 @@ TEST(voting, threshold) for (uint64_t h = 0; h <= 8; ++h) { uint8_t v = 1 + !!(h % 8); - db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); if (h >= 8 && threshold == 87) { // for threshold 87, we reach the threshold at height 7, so from height 8, hard fork to version 2, but 8 tries to add 1 @@ -406,7 +408,7 @@ TEST(voting, different_thresholds) static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4 }; for (uint64_t h = 0; h < sizeof(block_versions) / sizeof(block_versions[0]); ++h) { - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); bool ret = hf.add(db.get_block_from_height(h), h); ASSERT_EQ(ret, true); } @@ -459,7 +461,7 @@ TEST(voting, info) ASSERT_EQ(expected_thresholds[h], threshold); ASSERT_EQ(4, voting); - db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); + db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {}); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); } } @@ -522,7 +524,7 @@ TEST(reorganize, changed) #define ADD(v, h, a) \ do { \ cryptonote::block b = mkblock(hf, h, v); \ - db.add_block(b, 0, 0, 0, 0, 0, crypto::hash()); \ + db.add_block(b, 0, 0, 0, 0, 0, crypto::hash(), {}); \ ASSERT_##a(hf.add(b, h)); \ } while(0) #define ADD_TRUE(v, h) ADD(v, h, TRUE) diff --git a/tests/unit_tests/long_term_block_weight.cpp b/tests/unit_tests/long_term_block_weight.cpp index f7ef262e61..07d33fb723 100644 --- a/tests/unit_tests/long_term_block_weight.cpp +++ b/tests/unit_tests/long_term_block_weight.cpp @@ -58,6 +58,7 @@ class TestDB: public cryptonote::BaseTestDB , const uint64_t& coins_generated , uint64_t num_rct_outs , const crypto::hash& blk_hash + , const fcmp_pp::curve_trees::OutputsByUnlockBlock& outs_by_unlock_block ) override { blocks.push_back({block_weight, long_term_block_weight}); } diff --git a/tests/unit_tests/serialization.cpp b/tests/unit_tests/serialization.cpp index 9daa44351c..05dd44da1b 100644 --- a/tests/unit_tests/serialization.cpp +++ b/tests/unit_tests/serialization.cpp @@ -1304,3 +1304,185 @@ TEST(Serialization, tuple_many_tuples) EXPECT_EQ(tupler, tupler_recovered); } + +TEST(Serialization, tx_fcmp_pp) +{ + using namespace cryptonote; + + const std::size_t n_inputs = 2; + const std::size_t n_outputs = 3; + const uint8_t curve_trees_tree_depth = 3; + + const std::size_t proof_len = fcmp_pp::proof_len(n_inputs, curve_trees_tree_depth); + + const auto make_dummy_fcmp_pp_tx = [curve_trees_tree_depth, proof_len]() -> transaction + { + transaction tx; + + tx.invalidate_hashes(); + tx.set_null(); + + tx.version = 2; + tx.rct_signatures.type = rct::RCTTypeFcmpPlusPlus; + + // Set inputs + txin_to_key txin_to_key1; + txin_to_key1.amount = 1; + memset(&txin_to_key1.k_image, 0x42, sizeof(crypto::key_image)); + txin_to_key1.key_offsets.clear(); + tx.vin.clear(); + for (size_t i = 0; i < n_inputs; ++i) + tx.vin.push_back(txin_to_key1); + + // Set outputs + const uint64_t amount = 1; + std::vector out_amounts; + tx_out vout; + set_tx_out(amount, crypto::public_key{}, true, crypto::view_tag{}, vout); + for (size_t i = 0; i < n_outputs; ++i) + { + tx.vout.push_back(vout); + out_amounts.push_back(amount); + } + + // 1 ecdhTuple for each output + rct::ecdhTuple ecdhInfo; + memset(&ecdhInfo.mask, 0x01, sizeof(rct::key)); + memset(&ecdhInfo.amount, 0x02, sizeof(rct::key)); + for (size_t i = 0; i < n_outputs; ++i) + tx.rct_signatures.ecdhInfo.push_back(ecdhInfo); + + // 1 outPk for each output + rct::ctkey ctkey; + memset(&ctkey.dest, 0x01, sizeof(rct::key)); + memset(&ctkey.mask, 0x02, sizeof(rct::key)); + for (size_t i = 0; i < n_outputs; ++i) + tx.rct_signatures.outPk.push_back(ctkey); + + // 1 bp+ + rct::keyV C, masks; + tx.rct_signatures.p.bulletproofs_plus.push_back(rct::make_dummy_bulletproof_plus(out_amounts, C, masks)); + + // 1 pseudoOut for each input + const rct::key pseudoOut{0x01}; + for (size_t i = 0; i < n_inputs; ++i) + tx.rct_signatures.p.pseudoOuts.push_back(pseudoOut); + + // Set the reference block for fcmp++ + const crypto::hash referenceBlock{0x01}; + tx.rct_signatures.referenceBlock = referenceBlock; + + // Set the curve trees merkle tree depth + tx.rct_signatures.p.curve_trees_tree_depth = curve_trees_tree_depth; + + // 1 fcmp++ proof + fcmp_pp::FcmpPpProof fcmp_pp; + fcmp_pp.reserve(proof_len); + for (std::size_t i = 0; i < proof_len; ++i) + fcmp_pp.push_back(i); + tx.rct_signatures.p.fcmp_pp = std::move(fcmp_pp); + + return tx; + }; + + // 1. Set up a normal tx that includes an fcmp++ proof + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + ASSERT_TRUE(serialization::parse_binary(blob, tx1)); + ASSERT_EQ(tx, tx1); + ASSERT_EQ(tx.rct_signatures.referenceBlock, crypto::hash{0x01}); + ASSERT_EQ(tx.rct_signatures.referenceBlock, tx1.rct_signatures.referenceBlock); + ASSERT_EQ(tx.rct_signatures.p.fcmp_pp, tx1.rct_signatures.p.fcmp_pp); + } + + // 2. fcmp++ proof is longer than expected when serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + + // Extend fcmp++ proof + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == proof_len); + tx.rct_signatures.p.fcmp_pp.push_back(0x01); + + string blob; + ASSERT_FALSE(serialization::dump_binary(tx, blob)); + } + + // 3. fcmp++ proof is shorter than expected when serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + + // Shorten the fcmp++ proof + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == proof_len); + ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() > 1); + tx.rct_signatures.p.fcmp_pp.pop_back(); + + string blob; + ASSERT_FALSE(serialization::dump_binary(tx, blob)); + } + + const auto fcmp_pp_to_hex_str = [](const transaction &tx) + { + std::string fcmp_pp_str; + for (std::size_t i = 0; i < tx.rct_signatures.p.fcmp_pp.size(); ++i) + { + std::stringstream ss; + ss << std::hex << std::setfill('0') << std::setw(2) << (int)tx.rct_signatures.p.fcmp_pp[i]; + fcmp_pp_str += ss.str(); + } + return fcmp_pp_str; + }; + + // 4. fcmp++ proof is longer than expected when de-serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + + std::string blob_str = epee::string_tools::buff_to_hex_nodelimer(blob); + + // Find the proof within the serialized tx blob + const std::string fcmp_pp_str = fcmp_pp_to_hex_str(tx); + ASSERT_TRUE(!fcmp_pp_str.empty()); + const std::size_t pos = blob_str.find(fcmp_pp_str); + ASSERT_TRUE(pos != std::string::npos); + ASSERT_TRUE(blob_str.find(fcmp_pp_str, pos + 1) == std::string::npos); + + // Insert an extra proof elem + blob_str.insert(pos, "2a"); + std::string larger_blob; + epee::string_tools::parse_hexstr_to_binbuff(blob_str, larger_blob); + + ASSERT_FALSE(serialization::parse_binary(larger_blob, tx1)); + } + + // 5. fcmp++ proof is shorter than expected when de-serializing + { + transaction tx = make_dummy_fcmp_pp_tx(); + transaction tx1; + string blob; + + ASSERT_TRUE(serialization::dump_binary(tx, blob)); + + std::string blob_str = epee::string_tools::buff_to_hex_nodelimer(blob); + + // Find the proof within the serialized tx blob + const std::string fcmp_pp_str = fcmp_pp_to_hex_str(tx); + ASSERT_TRUE(!fcmp_pp_str.empty()); + const std::size_t pos = blob_str.find(fcmp_pp_str); + ASSERT_TRUE(pos != std::string::npos); + ASSERT_TRUE(blob_str.find(fcmp_pp_str, pos + 1) == std::string::npos); + + // Delete a proof elem + blob_str.erase(pos, 2); + std::string smaller_blob; + epee::string_tools::parse_hexstr_to_binbuff(blob_str, smaller_blob); + + ASSERT_FALSE(serialization::parse_binary(smaller_blob, tx1)); + } +} diff --git a/tests/unit_tests/unit_tests_utils.h b/tests/unit_tests/unit_tests_utils.h index 65da7bf884..ab3b77889f 100644 --- a/tests/unit_tests/unit_tests_utils.h +++ b/tests/unit_tests/unit_tests_utils.h @@ -30,6 +30,13 @@ #pragma once +#include "gtest/gtest.h" + +#include "blockchain_db/blockchain_db.h" +#include "blockchain_db/lmdb/db_lmdb.h" +#include "fcmp_pp/curve_trees.h" +#include "misc_log_ex.h" + #include #include @@ -64,8 +71,92 @@ namespace unit_test private: std::atomic m_counter; }; + + class BlockchainLMDBTest + { + public: + BlockchainLMDBTest(bool is_copy = false) : + m_temp_db_dir(boost::filesystem::temp_directory_path().string() + "/monero-lmdb-tests/"), + m_is_copy{is_copy} + {} + + ~BlockchainLMDBTest() + { + delete m_db; + if (m_temp_db_dir.find("/monero-lmdb-tests/") == std::string::npos) + { + LOG_ERROR("unexpected temp db dir"); + return; + } + if (!m_is_copy) + boost::filesystem::remove_all(m_temp_db_dir); + } + + void init_new_db(std::shared_ptr curve_trees) + { + CHECK_AND_ASSERT_THROW_MES(this->m_db == nullptr, "expected nullptr m_db"); + this->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); + + const auto temp_db_path = boost::filesystem::unique_path(); + const std::string dir_path = m_temp_db_dir + temp_db_path.string(); + + MDEBUG("Creating test db at path " << dir_path); + ASSERT_NO_THROW(this->m_db->open(dir_path)); + m_cur_dir_path = dir_path; + } + + void init_hardfork(cryptonote::HardFork *hardfork) + { + hardfork->init(); + this->m_db->set_hard_fork(hardfork); + } + + BlockchainLMDBTest *copy_db(std::shared_ptr curve_trees) + { + CHECK_AND_ASSERT_THROW_MES(this->m_db != nullptr, "expected non-null m_db"); + CHECK_AND_ASSERT_THROW_MES(this->m_cur_dir_path != "", "expected cur dir path set"); + + const boost::filesystem::path lmdb_data_path = boost::filesystem::path(m_cur_dir_path + "/data.mdb"); + CHECK_AND_ASSERT_THROW_MES(boost::filesystem::exists(lmdb_data_path), "did not find lmdb data file"); + + // Close db, copy db file, open copy, then reopen the db + this->m_db->close(); + const auto temp_db_path = boost::filesystem::unique_path(); + const std::string dest_path = m_temp_db_dir + temp_db_path.string(); + CHECK_AND_ASSERT_THROW_MES(boost::filesystem::create_directories(dest_path), + "failed to create new db dirs"); + boost::filesystem::copy_file(lmdb_data_path, dest_path + "/data.mdb"); + + // Open db copy + BlockchainLMDBTest *copy_db = new BlockchainLMDBTest(true/*is_copy*/); + copy_db->m_db = new cryptonote::BlockchainLMDB(true/*batch_transactions*/, curve_trees); + copy_db->m_db->open(dest_path); + copy_db->m_cur_dir_path = dest_path; + + // Reopen original db so it's ready for use + this->m_db->open(m_cur_dir_path); + + return copy_db; + } + + cryptonote::BlockchainDB* m_db{nullptr}; + const std::string m_temp_db_dir; + std::string m_cur_dir_path{""}; + const bool m_is_copy{false}; + }; } +#define INIT_BLOCKCHAIN_LMDB_TEST_DB(test_db, curve_trees) \ + if (curve_trees != nullptr) \ + test_db.init_new_db(curve_trees); \ + auto hardfork = cryptonote::HardFork(*test_db.m_db, 1, 0); \ + test_db.init_hardfork(&hardfork); \ + auto scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){ \ + ASSERT_NO_THROW(test_db.m_db->close()); \ + delete test_db.m_db; \ + test_db.m_db = nullptr; \ + }) + # define ASSERT_EQ_MAP(val, map, key) \ do { \ auto found = map.find(key); \