diff --git a/.github/workflows/cron-daily-fuzz.yml b/.github/workflows/cron-daily-fuzz.yml new file mode 100644 index 0000000..5339112 --- /dev/null +++ b/.github/workflows/cron-daily-fuzz.yml @@ -0,0 +1,62 @@ +# Automatically generated by fuzz/generate-files.sh +name: Fuzz +on: + schedule: + # 6am every day UTC, this correlates to: + # - 11pm PDT + # - 7am CET + # - 5pm AEDT + - cron: '00 06 * * *' + +jobs: + fuzz: + if: ${{ !github.event.act }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + # We only get 20 jobs at a time, we probably don't want to go + # over that limit with fuzzing because of the hour run time. + fuzz_target: [ + minreq_http, + simple_http, + ] + steps: + - name: Install test dependencies + run: sudo apt-get update -y && sudo apt-get install -y binutils-dev libunwind8-dev libcurl4-openssl-dev libelf-dev libdw-dev cmake gcc libiberty-dev + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + id: cache-fuzz + with: + path: | + ~/.cargo/bin + fuzz/target + target + key: cache-${{ matrix.target }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: '1.65.0' + - name: fuzz + run: | + if [[ "${{ matrix.fuzz_target }}" =~ ^bitcoin ]]; then + export RUSTFLAGS='--cfg=hashes_fuzz --cfg=secp256k1_fuzz' + fi + echo "Using RUSTFLAGS $RUSTFLAGS" + cd fuzz && ./fuzz.sh "${{ matrix.fuzz_target }}" + - run: echo "${{ matrix.fuzz_target }}" >executed_${{ matrix.fuzz_target }} + - uses: actions/upload-artifact@v3 + with: + name: executed_${{ matrix.fuzz_target }} + path: executed_${{ matrix.fuzz_target }} + + verify-execution: + if: ${{ !github.event.act }} + needs: fuzz + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@v3 + - name: Display structure of downloaded files + run: ls -R + - run: find executed_* -type f -exec cat {} + | sort > executed + - run: source ./fuzz/fuzz-util.sh && listTargetNames | sort | diff - executed diff --git a/Cargo-minimal.lock b/Cargo-minimal.lock index 7947646..be28bd6 100644 --- a/Cargo-minimal.lock +++ b/Cargo-minimal.lock @@ -106,6 +106,12 @@ dependencies = [ "serde_json", ] +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "cc" version = "1.0.28" @@ -136,15 +142,20 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jsonrpc" version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3662a38d341d77efecb73caf01420cfa5aa63c0253fd7bc05289ef9f6616e1bf" dependencies = [ "base64", "minreq", "serde", "serde_json", + "socks", ] +[[package]] +name = "libc" +version = "0.2.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" + [[package]] name = "log" version = "0.4.18" @@ -237,6 +248,17 @@ dependencies = [ "serde", ] +[[package]] +name = "socks" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3dbbd9ae980613c6dd8e28a9407b50509d3803b57624d5dfe8315218cd58b" +dependencies = [ + "byteorder", + "libc", + "winapi", +] + [[package]] name = "syn" version = "2.0.56" @@ -253,3 +275,25 @@ name = "unicode-ident" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/Cargo-recent.lock b/Cargo-recent.lock index 7947646..be28bd6 100644 --- a/Cargo-recent.lock +++ b/Cargo-recent.lock @@ -106,6 +106,12 @@ dependencies = [ "serde_json", ] +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "cc" version = "1.0.28" @@ -136,15 +142,20 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jsonrpc" version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3662a38d341d77efecb73caf01420cfa5aa63c0253fd7bc05289ef9f6616e1bf" dependencies = [ "base64", "minreq", "serde", "serde_json", + "socks", ] +[[package]] +name = "libc" +version = "0.2.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" + [[package]] name = "log" version = "0.4.18" @@ -237,6 +248,17 @@ dependencies = [ "serde", ] +[[package]] +name = "socks" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3dbbd9ae980613c6dd8e28a9407b50509d3803b57624d5dfe8315218cd58b" +dependencies = [ + "byteorder", + "libc", + "winapi", +] + [[package]] name = "syn" version = "2.0.56" @@ -253,3 +275,25 @@ name = "unicode-ident" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/Cargo.toml b/Cargo.toml index 2b6b86f..4767917 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["client", "json"] +members = ["client", "json", "jsonrpc"] exclude = ["integration_test", "regtest"] resolver = "2" @@ -8,3 +8,6 @@ path = "client" [patch.crates-io.bitcoind-json-rpc-types] path = "json" + +[patch.crates-io.jsonrpc] +path = "jsonrpc" diff --git a/contrib/crates.sh b/contrib/crates.sh index 57ece3f..bb6ece1 100755 --- a/contrib/crates.sh +++ b/contrib/crates.sh @@ -2,4 +2,4 @@ # shellcheck disable=SC2148 # Crates in this workspace to test. -CRATES=("json" "client") +CRATES=("json" "client" "jsonrpc") diff --git a/fuzz/.gitignore b/fuzz/.gitignore new file mode 100644 index 0000000..572e03b --- /dev/null +++ b/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml new file mode 100644 index 0000000..741bda6 --- /dev/null +++ b/fuzz/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "jsonrpc-fuzz" +edition = "2021" +rust-version = "1.63.0" +version = "0.0.1" +authors = ["Generated by fuzz/generate-files.sh"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies] +honggfuzz = { version = "0.5.55", default-features = false } +jsonrpc = { path = "..", features = ["minreq_http"] } + +serde = { version = "1.0.103", features = [ "derive" ] } +serde_json = "1.0" + +[lints.rust] +unexpected_cfgs = { level = "deny", check-cfg = ['cfg(fuzzing)'] } + +[[bin]] +name = "minreq_http" +path = "fuzz_targets/minreq_http.rs" + +[[bin]] +name = "simple_http" +path = "fuzz_targets/simple_http.rs" diff --git a/fuzz/README.md b/fuzz/README.md new file mode 100644 index 0000000..d709207 --- /dev/null +++ b/fuzz/README.md @@ -0,0 +1,4 @@ +# Note to devs + +If you are considering adding fuzzing for the other crates take a look +at how we set up `fuzz_target` in `rust-bitcoin/fuzz`. diff --git a/fuzz/cycle.sh b/fuzz/cycle.sh new file mode 100755 index 0000000..294f32b --- /dev/null +++ b/fuzz/cycle.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# Continuosly cycle over fuzz targets running each for 1 hour. +# It uses chrt SCHED_IDLE so that other process takes priority. +# +# For hfuzz options see https://github.com/google/honggfuzz/blob/master/docs/USAGE.md + +set -e +REPO_DIR=$(git rev-parse --show-toplevel) +# shellcheck source=./fuzz-util.sh +source "$REPO_DIR/fuzz/fuzz-util.sh" + +while : +do + for targetFile in $(listTargetFiles); do + targetName=$(targetFileToName "$targetFile") + echo "Fuzzing target $targetName ($targetFile)" + + # fuzz for one hour + HFUZZ_RUN_ARGS='--run_time 3600' chrt -i 0 cargo hfuzz run "$targetName" + # minimize the corpus + HFUZZ_RUN_ARGS="-i hfuzz_workspace/$targetName/input/ -P -M" chrt -i 0 cargo hfuzz run "$targetName" + done +done diff --git a/fuzz/fuzz-util.sh b/fuzz/fuzz-util.sh new file mode 100755 index 0000000..dcce452 --- /dev/null +++ b/fuzz/fuzz-util.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +REPO_DIR=$(git rev-parse --show-toplevel) + +# Sort order is effected by locale. See `man sort`. +# > Set LC_ALL=C to get the traditional sort order that uses native byte values. +export LC_ALL=C + +listTargetFiles() { + pushd "$REPO_DIR/fuzz" > /dev/null || exit 1 + find fuzz_targets/ -type f -name "*.rs" | sort + popd > /dev/null || exit 1 +} + +targetFileToName() { + echo "$1" \ + | sed 's/^fuzz_targets\///' \ + | sed 's/\.rs$//' \ + | sed 's/\//_/g' +} + +targetFileToHFuzzInputArg() { + baseName=$(basename "$1") + dirName="${baseName%.*}" + if [ -d "hfuzz_input/$dirName" ]; then + echo "HFUZZ_INPUT_ARGS=\"-f hfuzz_input/$FILE/input\"" + fi +} + +listTargetNames() { + for target in $(listTargetFiles); do + targetFileToName "$target" + done +} + +# Utility function to avoid CI failures on Windows +checkWindowsFiles() { + incorrectFilenames=$(find . -type f -name "*,*" -o -name "*:*" -o -name "*<*" -o -name "*>*" -o -name "*|*" -o -name "*\?*" -o -name "*\**" -o -name "*\"*" | wc -l) + if [ "$incorrectFilenames" -gt 0 ]; then + echo "Bailing early because there is a Windows-incompatible filename in the tree." + exit 2 + fi +} + +# Checks whether a fuzz case output some report, and dumps it in hex +checkReport() { + reportFile="hfuzz_workspace/$1/HONGGFUZZ.REPORT.TXT" + if [ -f "$reportFile" ]; then + cat "$reportFile" + for CASE in "hfuzz_workspace/$1/SIG"*; do + xxd -p -c10000 < "$CASE" + done + exit 1 + fi +} diff --git a/fuzz/fuzz.sh b/fuzz/fuzz.sh new file mode 100755 index 0000000..30e07fa --- /dev/null +++ b/fuzz/fuzz.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -ex + +REPO_DIR=$(git rev-parse --show-toplevel) + +# shellcheck source=./fuzz-util.sh +source "$REPO_DIR/fuzz/fuzz-util.sh" + +# Check that input files are correct Windows file names +checkWindowsFiles + +if [ "$1" == "" ]; then + targetFiles="$(listTargetFiles)" +else + targetFiles=fuzz_targets/"$1".rs +fi + +cargo --version +rustc --version + +# Testing +cargo install --force honggfuzz --no-default-features +for targetFile in $targetFiles; do + targetName=$(targetFileToName "$targetFile") + echo "Fuzzing target $targetName ($targetFile)" + if [ -d "hfuzz_input/$targetName" ]; then + HFUZZ_INPUT_ARGS="-f hfuzz_input/$targetName/input\"" + else + HFUZZ_INPUT_ARGS="" + fi + RUSTFLAGS="--cfg=jsonrpc_fuzz" HFUZZ_RUN_ARGS="--run_time 30 --exit_upon_crash -v $HFUZZ_INPUT_ARGS" cargo hfuzz run "$targetName" + + checkReport "$targetName" +done diff --git a/fuzz/fuzz_targets/minreq_http.rs b/fuzz/fuzz_targets/minreq_http.rs new file mode 100644 index 0000000..ea358b3 --- /dev/null +++ b/fuzz/fuzz_targets/minreq_http.rs @@ -0,0 +1,59 @@ +extern crate jsonrpc; + +// Note, tests are empty if "jsonrpc_fuzz" is not set but still show up in output of `cargo test --workspace`. + +#[allow(unused_variables)] // `data` is not used when "jsonrpc_fuzz" is not set. +fn do_test(data: &[u8]) { + #[cfg(jsonrpc_fuzz)] + { + use std::io; + + use jsonrpc::minreq_http::{MinreqHttpTransport, FUZZ_TCP_SOCK}; + use jsonrpc::Client; + + *FUZZ_TCP_SOCK.lock().unwrap() = Some(io::Cursor::new(data.to_vec())); + + let t = MinreqHttpTransport::builder() + .url("localhost:123") + .expect("parse url") + .basic_auth("".to_string(), None) + .build(); + + let client = Client::with_transport(t); + let request = client.build_request("uptime", None); + let _ = client.send_request(request); + } +} + +fn main() { + loop { + honggfuzz::fuzz!(|data| { + do_test(data); + }); + } +} + +#[cfg(test)] +mod tests { + fn extend_vec_from_hex(hex: &str) -> Vec { + let mut out = vec![]; + let mut b = 0; + for (idx, c) in hex.as_bytes().iter().enumerate() { + b <<= 4; + match *c { + b'A'..=b'F' => b |= c - b'A' + 10, + b'a'..=b'f' => b |= c - b'a' + 10, + b'0'..=b'9' => b |= c - b'0', + _ => panic!("Bad hex"), + } + if (idx & 1) == 1 { + out.push(b); + b = 0; + } + } + out + } + + #[test] + fn duplicate_crash() { super::do_test(&extend_vec_from_hex("00")); } +} diff --git a/fuzz/fuzz_targets/simple_http.rs b/fuzz/fuzz_targets/simple_http.rs new file mode 100644 index 0000000..4dcfb27 --- /dev/null +++ b/fuzz/fuzz_targets/simple_http.rs @@ -0,0 +1,59 @@ +extern crate jsonrpc; + +// Note, tests are if empty "jsonrpc_fuzz" is not set but still show up in output of `cargo test --workspace`. + +#[allow(unused_variables)] // `data` is not used when "jsonrpc_fuzz" is not set. +fn do_test(data: &[u8]) { + #[cfg(jsonrpc_fuzz)] + { + use std::io; + + use jsonrpc::simple_http::{SimpleHttpTransport, FUZZ_TCP_SOCK}; + use jsonrpc::Client; + + *FUZZ_TCP_SOCK.lock().unwrap() = Some(io::Cursor::new(data.to_vec())); + + let t = SimpleHttpTransport::builder() + .url("localhost:123") + .expect("parse url") + .auth("", None) + .build(); + + let client = Client::with_transport(t); + let request = client.build_request("uptime", None); + let _ = client.send_request(request); + } +} + +fn main() { + loop { + honggfuzz::fuzz!(|data| { + do_test(data); + }); + } +} + +#[cfg(test)] +mod tests { + fn extend_vec_from_hex(hex: &str) -> Vec { + let mut out = vec![]; + let mut b = 0; + for (idx, c) in hex.as_bytes().iter().enumerate() { + b <<= 4; + match *c { + b'A'..=b'F' => b |= c - b'A' + 10, + b'a'..=b'f' => b |= c - b'a' + 10, + b'0'..=b'9' => b |= c - b'0', + _ => panic!("Bad hex"), + } + if (idx & 1) == 1 { + out.push(b); + b = 0; + } + } + out + } + + #[test] + fn duplicate_crash() { super::do_test(&extend_vec_from_hex("00")); } +} diff --git a/fuzz/generate-files.sh b/fuzz/generate-files.sh new file mode 100755 index 0000000..221d02d --- /dev/null +++ b/fuzz/generate-files.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +set -euo pipefail + +REPO_DIR=$(git rev-parse --show-toplevel) + +# can't find the file because of the ENV var +# shellcheck source=/dev/null +source "$REPO_DIR/fuzz/fuzz-util.sh" + +# 1. Generate fuzz/Cargo.toml +cat > "$REPO_DIR/fuzz/Cargo.toml" <> "$REPO_DIR/fuzz/Cargo.toml" < "$REPO_DIR/.github/workflows/cron-daily-fuzz.yml" <executed_\${{ matrix.fuzz_target }} + - uses: actions/upload-artifact@v3 + with: + name: executed_\${{ matrix.fuzz_target }} + path: executed_\${{ matrix.fuzz_target }} + + verify-execution: + if: \${{ !github.event.act }} + needs: fuzz + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@v3 + - name: Display structure of downloaded files + run: ls -R + - run: find executed_* -type f -exec cat {} + | sort > executed + - run: source ./fuzz/fuzz-util.sh && listTargetNames | sort | diff - executed +EOF diff --git a/jsonrpc/Cargo.toml b/jsonrpc/Cargo.toml new file mode 100644 index 0000000..f63a2e8 --- /dev/null +++ b/jsonrpc/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "jsonrpc" +version = "0.18.0" +authors = ["Andrew Poelstra "] +license = "CC0-1.0" +homepage = "https://github.com/apoelstra/rust-jsonrpc/" +repository = "https://github.com/apoelstra/rust-jsonrpc/" +documentation = "https://docs.rs/jsonrpc/" +description = "Rust support for the JSON-RPC 2.0 protocol" +keywords = [ "protocol", "json", "http", "jsonrpc" ] +readme = "README.md" +edition = "2021" +rust-version = "1.56.1" +exclude = ["tests", "contrib"] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[features] +default = [ "simple_http", "simple_tcp" ] +# A bare-minimum HTTP transport. +simple_http = [ "base64" ] +# A transport that uses `minreq` as the HTTP client. +minreq_http = [ "base64", "minreq" ] +# Basic transport over a raw TcpListener +simple_tcp = [] +# Basic transport over a raw UnixStream +simple_uds = [] +# Enable Socks5 Proxy in transport +proxy = ["socks"] + +[dependencies] +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = [ "raw_value" ] } + +base64 = { version = "0.13.0", optional = true } +minreq = { version = "2.7.0", features = ["json-using-serde"], optional = true } +socks = { version = "0.3.4", optional = true} + +[lints.rust] +unexpected_cfgs = { level = "deny", check-cfg = ['cfg(jsonrpc_fuzz)'] } diff --git a/jsonrpc/README.md b/jsonrpc/README.md new file mode 100644 index 0000000..40cbd5b --- /dev/null +++ b/jsonrpc/README.md @@ -0,0 +1,47 @@ +[![Status](https://travis-ci.org/apoelstra/rust-jsonrpc.png?branch=master)](https://travis-ci.org/apoelstra/rust-jsonrpc) + +# Rust Version compatibility + +This library is compatible with Rust **1.63.0** or higher. + +# Rust JSONRPC Client + +Rudimentary support for sending JSONRPC 2.0 requests and receiving responses. + +As an example, hit a local bitcoind JSON-RPC endpoint and call the `uptime` command. + +```rust +use jsonrpc::Client; +use jsonrpc::simple_http::{self, SimpleHttpTransport}; + +fn client(url: &str, user: &str, pass: &str) -> Result { + let t = SimpleHttpTransport::builder() + .url(url)? + .auth(user, Some(pass)) + .build(); + + Ok(Client::with_transport(t)) +} + +// Demonstrate an example JSON-RCP call against bitcoind. +fn main() { + let client = client("localhost:18443", "user", "pass").expect("failed to create client"); + let request = client.build_request("uptime", None); + let response = client.send_request(request).expect("send_request failed"); + + // For other commands this would be a struct matching the returned json. + let result: u64 = response.result().expect("response is an error, use check_error"); + println!("bitcoind uptime: {}", result); +} +``` + +## Githooks + +To assist devs in catching errors _before_ running CI we provide some githooks. If you do not +already have locally configured githooks you can use the ones in this repository by running, in the +root directory of the repository: +``` +git config --local core.hooksPath githooks/ +``` + +Alternatively add symlinks in your `.git/hooks` directory to any of the githooks we provide. diff --git a/jsonrpc/contrib/test_vars.sh b/jsonrpc/contrib/test_vars.sh new file mode 100644 index 0000000..72d3c96 --- /dev/null +++ b/jsonrpc/contrib/test_vars.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# `rust-jsonrpc` does not have a std feature. +FEATURES_WITH_STD="" + +# So this is the var to use for all tests. +FEATURES_WITHOUT_STD="simple_http minreq_http simple_tcp simple_uds proxy" + +# Run these examples. +EXAMPLES="" diff --git a/jsonrpc/src/client.rs b/jsonrpc/src/client.rs new file mode 100644 index 0000000..21042d5 --- /dev/null +++ b/jsonrpc/src/client.rs @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: CC0-1.0 + +//! # Client support +//! +//! Support for connecting to JSONRPC servers over HTTP, sending requests, +//! and parsing responses + +use std::borrow::Cow; +use std::collections::HashMap; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::sync::atomic; + +use serde_json::value::RawValue; +use serde_json::Value; + +use crate::error::Error; +use crate::{Request, Response}; + +/// An interface for a transport over which to use the JSONRPC protocol. +pub trait Transport: Send + Sync + 'static { + /// Sends an RPC request over the transport. + fn send_request(&self, _: Request) -> Result; + /// Sends a batch of RPC requests over the transport. + fn send_batch(&self, _: &[Request]) -> Result, Error>; + /// Formats the target of this transport. I.e. the URL/socket/... + fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result; +} + +/// A JSON-RPC client. +/// +/// Creates a new Client using one of the transport-specific constructors e.g., +/// [`Client::simple_http`] for a bare-minimum HTTP transport. +pub struct Client { + pub(crate) transport: Box, + nonce: atomic::AtomicUsize, +} + +impl Client { + /// Creates a new client with the given transport. + pub fn with_transport(transport: T) -> Client { + Client { transport: Box::new(transport), nonce: atomic::AtomicUsize::new(1) } + } + + /// Builds a request. + /// + /// To construct the arguments, one can use one of the shorthand methods + /// [`crate::arg`] or [`crate::try_arg`]. + pub fn build_request<'a>(&self, method: &'a str, params: Option<&'a RawValue>) -> Request<'a> { + let nonce = self.nonce.fetch_add(1, atomic::Ordering::Relaxed); + Request { method, params, id: serde_json::Value::from(nonce), jsonrpc: Some("2.0") } + } + + /// Sends a request to a client. + pub fn send_request(&self, request: Request) -> Result { + self.transport.send_request(request) + } + + /// Sends a batch of requests to the client. + /// + /// Note that the requests need to have valid IDs, so it is advised to create the requests + /// with [`Client::build_request`]. + /// + /// # Returns + /// + /// The return vector holds the response for the request at the corresponding index. If no + /// response was provided, it's [`None`]. + pub fn send_batch(&self, requests: &[Request]) -> Result>, Error> { + if requests.is_empty() { + return Err(Error::EmptyBatch); + } + + // If the request body is invalid JSON, the response is a single response object. + // We ignore this case since we are confident we are producing valid JSON. + let responses = self.transport.send_batch(requests)?; + if responses.len() > requests.len() { + return Err(Error::WrongBatchResponseSize); + } + + //TODO(stevenroose) check if the server preserved order to avoid doing the mapping + + // First index responses by ID and catch duplicate IDs. + let mut by_id = HashMap::with_capacity(requests.len()); + for resp in responses.into_iter() { + let id = HashableValue(Cow::Owned(resp.id.clone())); + if let Some(dup) = by_id.insert(id, resp) { + return Err(Error::BatchDuplicateResponseId(dup.id)); + } + } + // Match responses to the requests. + let results = + requests.iter().map(|r| by_id.remove(&HashableValue(Cow::Borrowed(&r.id)))).collect(); + + // Since we're also just producing the first duplicate ID, we can also just produce the + // first incorrect ID in case there are multiple. + if let Some(id) = by_id.keys().next() { + return Err(Error::WrongBatchResponseId((*id.0).clone())); + } + + Ok(results) + } + + /// Makes a request and deserializes the response. + /// + /// To construct the arguments, one can use one of the shorthand methods + /// [`crate::arg`] or [`crate::try_arg`]. + pub fn call serde::de::Deserialize<'a>>( + &self, + method: &str, + args: Option<&RawValue>, + ) -> Result { + let request = self.build_request(method, args); + let id = request.id.clone(); + + let response = self.send_request(request)?; + if response.jsonrpc.is_some() && response.jsonrpc != Some(From::from("2.0")) { + return Err(Error::VersionMismatch); + } + if response.id != id { + return Err(Error::NonceMismatch); + } + + response.result() + } +} + +impl fmt::Debug for crate::Client { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "jsonrpc::Client(")?; + self.transport.fmt_target(f)?; + write!(f, ")") + } +} + +impl From for Client { + fn from(t: T) -> Client { Client::with_transport(t) } +} + +/// Newtype around `Value` which allows hashing for use as hashmap keys, +/// this is needed for batch requests. +/// +/// The reason `Value` does not support `Hash` or `Eq` by itself +/// is that it supports `f64` values; but for batch requests we +/// will only be hashing the "id" field of the request/response +/// pair, which should never need decimal precision and therefore +/// never use `f64`. +#[derive(Clone, PartialEq, Debug)] +struct HashableValue<'a>(pub Cow<'a, Value>); + +impl<'a> Eq for HashableValue<'a> {} + +impl<'a> Hash for HashableValue<'a> { + fn hash(&self, state: &mut H) { + match *self.0.as_ref() { + Value::Null => "null".hash(state), + Value::Bool(false) => "false".hash(state), + Value::Bool(true) => "true".hash(state), + Value::Number(ref n) => { + "number".hash(state); + if let Some(n) = n.as_i64() { + n.hash(state); + } else if let Some(n) = n.as_u64() { + n.hash(state); + } else { + n.to_string().hash(state); + } + } + Value::String(ref s) => { + "string".hash(state); + s.hash(state); + } + Value::Array(ref v) => { + "array".hash(state); + v.len().hash(state); + for obj in v { + HashableValue(Cow::Borrowed(obj)).hash(state); + } + } + Value::Object(ref m) => { + "object".hash(state); + m.len().hash(state); + for (key, val) in m { + key.hash(state); + HashableValue(Cow::Borrowed(val)).hash(state); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use std::borrow::Cow; + use std::collections::HashSet; + use std::str::FromStr; + use std::sync; + + use super::*; + + struct DummyTransport; + impl Transport for DummyTransport { + fn send_request(&self, _: Request) -> Result { Err(Error::NonceMismatch) } + fn send_batch(&self, _: &[Request]) -> Result, Error> { Ok(vec![]) } + fn fmt_target(&self, _: &mut fmt::Formatter) -> fmt::Result { Ok(()) } + } + + #[test] + fn sanity() { + let client = Client::with_transport(DummyTransport); + assert_eq!(client.nonce.load(sync::atomic::Ordering::Relaxed), 1); + let req1 = client.build_request("test", None); + assert_eq!(client.nonce.load(sync::atomic::Ordering::Relaxed), 2); + let req2 = client.build_request("test", None); + assert_eq!(client.nonce.load(sync::atomic::Ordering::Relaxed), 3); + assert!(req1.id != req2.id); + } + + #[test] + fn hash_value() { + let val = HashableValue(Cow::Owned(Value::from_str("null").unwrap())); + let t = HashableValue(Cow::Owned(Value::from_str("true").unwrap())); + let f = HashableValue(Cow::Owned(Value::from_str("false").unwrap())); + let ns = + HashableValue(Cow::Owned(Value::from_str("[0, -0, 123.4567, -100000000]").unwrap())); + let m = + HashableValue(Cow::Owned(Value::from_str("{ \"field\": 0, \"field\": -0 }").unwrap())); + + let mut coll = HashSet::new(); + + assert!(!coll.contains(&val)); + coll.insert(val.clone()); + assert!(coll.contains(&val)); + + assert!(!coll.contains(&t)); + assert!(!coll.contains(&f)); + coll.insert(t.clone()); + assert!(coll.contains(&t)); + assert!(!coll.contains(&f)); + coll.insert(f.clone()); + assert!(coll.contains(&t)); + assert!(coll.contains(&f)); + + assert!(!coll.contains(&ns)); + coll.insert(ns.clone()); + assert!(coll.contains(&ns)); + + assert!(!coll.contains(&m)); + coll.insert(m.clone()); + assert!(coll.contains(&m)); + } +} diff --git a/jsonrpc/src/error.rs b/jsonrpc/src/error.rs new file mode 100644 index 0000000..39c520f --- /dev/null +++ b/jsonrpc/src/error.rs @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: CC0-1.0 + +//! # Error handling +//! +//! Some useful methods for creating Error objects. + +use std::{error, fmt}; + +use serde::{Deserialize, Serialize}; + +use crate::Response; + +/// A library error +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// A transport error + Transport(Box), + /// Json error + Json(serde_json::Error), + /// Error response + Rpc(RpcError), + /// Response to a request did not have the expected nonce + NonceMismatch, + /// Response to a request had a jsonrpc field other than "2.0" + VersionMismatch, + /// Batches can't be empty + EmptyBatch, + /// Too many responses returned in batch + WrongBatchResponseSize, + /// Batch response contained a duplicate ID + BatchDuplicateResponseId(serde_json::Value), + /// Batch response contained an ID that didn't correspond to any request ID + WrongBatchResponseId(serde_json::Value), +} + +impl From for Error { + fn from(e: serde_json::Error) -> Error { Error::Json(e) } +} + +impl From for Error { + fn from(e: RpcError) -> Error { Error::Rpc(e) } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use Error::*; + + match *self { + Transport(ref e) => write!(f, "transport error: {}", e), + Json(ref e) => write!(f, "JSON decode error: {}", e), + Rpc(ref r) => write!(f, "RPC error response: {:?}", r), + BatchDuplicateResponseId(ref v) => write!(f, "duplicate RPC batch response ID: {}", v), + WrongBatchResponseId(ref v) => write!(f, "wrong RPC batch response ID: {}", v), + NonceMismatch => write!(f, "nonce of response did not match nonce of request"), + VersionMismatch => write!(f, "`jsonrpc` field set to non-\"2.0\""), + EmptyBatch => write!(f, "batches can't be empty"), + WrongBatchResponseSize => write!(f, "too many responses returned in batch"), + } + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + use self::Error::*; + + match *self { + Rpc(_) + | NonceMismatch + | VersionMismatch + | EmptyBatch + | WrongBatchResponseSize + | BatchDuplicateResponseId(_) + | WrongBatchResponseId(_) => None, + Transport(ref e) => Some(&**e), + Json(ref e) => Some(e), + } + } +} + +/// Standard error responses, as described at at +/// +/// +/// # Documentation Copyright +/// Copyright (C) 2007-2010 by the JSON-RPC Working Group +/// +/// This document and translations of it may be used to implement JSON-RPC, it +/// may be copied and furnished to others, and derivative works that comment +/// on or otherwise explain it or assist in its implementation may be prepared, +/// copied, published and distributed, in whole or in part, without restriction +/// of any kind, provided that the above copyright notice and this paragraph +/// are included on all such copies and derivative works. However, this document +/// itself may not be modified in any way. +/// +/// The limited permissions granted above are perpetual and will not be revoked. +/// +/// This document and the information contained herein is provided "AS IS" and +/// ALL WARRANTIES, EXPRESS OR IMPLIED are DISCLAIMED, INCLUDING BUT NOT LIMITED +/// TO ANY WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT INFRINGE ANY +/// RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A +/// PARTICULAR PURPOSE. +/// +#[derive(Debug)] +pub enum StandardError { + /// Invalid JSON was received by the server. + /// An error occurred on the server while parsing the JSON text. + ParseError, + /// The JSON sent is not a valid Request object. + InvalidRequest, + /// The method does not exist / is not available. + MethodNotFound, + /// Invalid method parameter(s). + InvalidParams, + /// Internal JSON-RPC error. + InternalError, +} + +/// A JSONRPC error object +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct RpcError { + /// The integer identifier of the error + pub code: i32, + /// A string describing the error + pub message: String, + /// Additional data specific to the error + pub data: Option>, +} + +/// Create a standard error responses +pub fn standard_error( + code: StandardError, + data: Option>, +) -> RpcError { + match code { + StandardError::ParseError => + RpcError { code: -32700, message: "Parse error".to_string(), data }, + StandardError::InvalidRequest => + RpcError { code: -32600, message: "Invalid Request".to_string(), data }, + StandardError::MethodNotFound => + RpcError { code: -32601, message: "Method not found".to_string(), data }, + StandardError::InvalidParams => + RpcError { code: -32602, message: "Invalid params".to_string(), data }, + StandardError::InternalError => + RpcError { code: -32603, message: "Internal error".to_string(), data }, + } +} + +/// Converts a Rust `Result` to a JSONRPC response object +pub fn result_to_response( + result: Result, + id: serde_json::Value, +) -> Response { + match result { + Ok(data) => Response { + result: Some( + serde_json::value::RawValue::from_string(serde_json::to_string(&data).unwrap()) + .unwrap(), + ), + error: None, + id, + jsonrpc: Some(String::from("2.0")), + }, + Err(err) => + Response { result: None, error: Some(err), id, jsonrpc: Some(String::from("2.0")) }, + } +} + +#[cfg(test)] +mod tests { + use serde_json; + + use super::StandardError::{ + InternalError, InvalidParams, InvalidRequest, MethodNotFound, ParseError, + }; + use super::{result_to_response, standard_error}; + + #[test] + fn test_parse_error() { + let resp = result_to_response(Err(standard_error(ParseError, None)), From::from(1)); + assert!(resp.result.is_none()); + assert!(resp.error.is_some()); + assert_eq!(resp.id, serde_json::Value::from(1)); + assert_eq!(resp.error.unwrap().code, -32700); + } + + #[test] + fn test_invalid_request() { + let resp = result_to_response(Err(standard_error(InvalidRequest, None)), From::from(1)); + assert!(resp.result.is_none()); + assert!(resp.error.is_some()); + assert_eq!(resp.id, serde_json::Value::from(1)); + assert_eq!(resp.error.unwrap().code, -32600); + } + + #[test] + fn test_method_not_found() { + let resp = result_to_response(Err(standard_error(MethodNotFound, None)), From::from(1)); + assert!(resp.result.is_none()); + assert!(resp.error.is_some()); + assert_eq!(resp.id, serde_json::Value::from(1)); + assert_eq!(resp.error.unwrap().code, -32601); + } + + #[test] + fn test_invalid_params() { + let resp = result_to_response(Err(standard_error(InvalidParams, None)), From::from("123")); + assert!(resp.result.is_none()); + assert!(resp.error.is_some()); + assert_eq!(resp.id, serde_json::Value::from("123")); + assert_eq!(resp.error.unwrap().code, -32602); + } + + #[test] + fn test_internal_error() { + let resp = result_to_response(Err(standard_error(InternalError, None)), From::from(-1)); + assert!(resp.result.is_none()); + assert!(resp.error.is_some()); + assert_eq!(resp.id, serde_json::Value::from(-1)); + assert_eq!(resp.error.unwrap().code, -32603); + } +} diff --git a/jsonrpc/src/http/minreq_http.rs b/jsonrpc/src/http/minreq_http.rs new file mode 100644 index 0000000..4195624 --- /dev/null +++ b/jsonrpc/src/http/minreq_http.rs @@ -0,0 +1,270 @@ +//! This module implements the [`crate::client::Transport`] trait using [`minreq`] +//! as the underlying HTTP transport. +//! +//! [minreq]: + +#[cfg(jsonrpc_fuzz)] +use std::io::{self, Read, Write}; +#[cfg(jsonrpc_fuzz)] +use std::sync::Mutex; +use std::time::Duration; +use std::{error, fmt}; + +use crate::client::Transport; +use crate::{Request, Response}; + +const DEFAULT_URL: &str = "http://localhost"; +const DEFAULT_PORT: u16 = 8332; // the default RPC port for bitcoind. +#[cfg(not(jsonrpc_fuzz))] +const DEFAULT_TIMEOUT_SECONDS: u64 = 15; +#[cfg(jsonrpc_fuzz)] +const DEFAULT_TIMEOUT_SECONDS: u64 = 1; + +/// An HTTP transport that uses [`minreq`] and is useful for running a bitcoind RPC client. +#[derive(Clone, Debug)] +pub struct MinreqHttpTransport { + /// URL of the RPC server. + url: String, + /// timeout only supports second granularity. + timeout: Duration, + /// The value of the `Authorization` HTTP header, i.e., a base64 encoding of 'user:password'. + basic_auth: Option, +} + +impl Default for MinreqHttpTransport { + fn default() -> Self { + MinreqHttpTransport { + url: format!("{}:{}", DEFAULT_URL, DEFAULT_PORT), + timeout: Duration::from_secs(DEFAULT_TIMEOUT_SECONDS), + basic_auth: None, + } + } +} + +impl MinreqHttpTransport { + /// Constructs a new [`MinreqHttpTransport`] with default parameters. + pub fn new() -> Self { MinreqHttpTransport::default() } + + /// Returns a builder for [`MinreqHttpTransport`]. + pub fn builder() -> Builder { Builder::new() } + + fn request(&self, req: impl serde::Serialize) -> Result + where + R: for<'a> serde::de::Deserialize<'a>, + { + let req = match &self.basic_auth { + Some(auth) => minreq::Request::new(minreq::Method::Post, &self.url) + .with_timeout(self.timeout.as_secs()) + .with_header("Authorization", auth) + .with_json(&req)?, + None => minreq::Request::new(minreq::Method::Post, &self.url) + .with_timeout(self.timeout.as_secs()) + .with_json(&req)?, + }; + + // Send the request and parse the response. If the response is an error that does not + // contain valid JSON in its body (for instance if the bitcoind HTTP server work queue + // depth is exceeded), return the raw HTTP error so users can match against it. + let resp = req.send()?; + match resp.json() { + Ok(json) => Ok(json), + Err(minreq_err) => + if resp.status_code != 200 { + Err(Error::Http(HttpError { + status_code: resp.status_code, + body: resp.as_str().unwrap_or("").to_string(), + })) + } else { + Err(Error::Minreq(minreq_err)) + }, + } + } +} + +impl Transport for MinreqHttpTransport { + fn send_request(&self, req: Request) -> Result { + Ok(self.request(req)?) + } + + fn send_batch(&self, reqs: &[Request]) -> Result, crate::Error> { + Ok(self.request(reqs)?) + } + + fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.url) } +} + +/// Builder for simple bitcoind [`MinreqHttpTransport`]. +#[derive(Clone, Debug)] +pub struct Builder { + tp: MinreqHttpTransport, +} + +impl Builder { + /// Constructs a new [`Builder`] with default configuration and the URL to use. + pub fn new() -> Builder { Builder { tp: MinreqHttpTransport::new() } } + + /// Sets the timeout after which requests will abort if they aren't finished. + pub fn timeout(mut self, timeout: Duration) -> Self { + self.tp.timeout = timeout; + self + } + + /// Sets the URL of the server to the transport. + #[allow(clippy::assigning_clones)] // clone_into is only available in Rust 1.63 + pub fn url(mut self, url: &str) -> Result { + self.tp.url = url.to_owned(); + Ok(self) + } + + /// Adds authentication information to the transport. + pub fn basic_auth(mut self, user: String, pass: Option) -> Self { + let mut s = user; + s.push(':'); + if let Some(ref pass) = pass { + s.push_str(pass.as_ref()); + } + self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(s.as_bytes()))); + self + } + + /// Adds authentication information to the transport using a cookie string ('user:pass'). + /// + /// Does no checking on the format of the cookie string, just base64 encodes whatever is passed in. + /// + /// # Examples + /// + /// ```no_run + /// # use jsonrpc::minreq_http::MinreqHttpTransport; + /// # use std::fs::{self, File}; + /// # use std::path::Path; + /// # let cookie_file = Path::new("~/.bitcoind/.cookie"); + /// let mut file = File::open(cookie_file).expect("couldn't open cookie file"); + /// let mut cookie = String::new(); + /// fs::read_to_string(&mut cookie).expect("couldn't read cookie file"); + /// let client = MinreqHttpTransport::builder().cookie_auth(cookie); + /// ``` + pub fn cookie_auth>(mut self, cookie: S) -> Self { + self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes()))); + self + } + + /// Builds the final [`MinreqHttpTransport`]. + pub fn build(self) -> MinreqHttpTransport { self.tp } +} + +impl Default for Builder { + fn default() -> Self { Builder::new() } +} + +/// An HTTP error. +#[derive(Debug)] +pub struct HttpError { + /// Status code of the error response. + pub status_code: i32, + /// Raw body of the error response. + pub body: String, +} + +impl fmt::Display for HttpError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "status: {}, body: {}", self.status_code, self.body) + } +} + +impl error::Error for HttpError {} + +/// Error that can happen when sending requests. In case of error, a JSON error is returned if the +/// body of the response could be parsed as such. Otherwise, an HTTP error is returned containing +/// the status code and the raw body. +#[non_exhaustive] +#[derive(Debug)] +pub enum Error { + /// JSON parsing error. + Json(serde_json::Error), + /// Minreq error. + Minreq(minreq::Error), + /// HTTP error that does not contain valid JSON as body. + Http(HttpError), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::Json(ref e) => write!(f, "parsing JSON failed: {}", e), + Error::Minreq(ref e) => write!(f, "minreq: {}", e), + Error::Http(ref e) => write!(f, "http ({})", e), + } + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + use self::Error::*; + + match *self { + Json(ref e) => Some(e), + Minreq(ref e) => Some(e), + Http(ref e) => Some(e), + } + } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Self { Error::Json(e) } +} + +impl From for Error { + fn from(e: minreq::Error) -> Self { Error::Minreq(e) } +} + +impl From for crate::Error { + fn from(e: Error) -> crate::Error { + match e { + Error::Json(e) => crate::Error::Json(e), + e => crate::Error::Transport(Box::new(e)), + } + } +} + +/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream. +#[cfg(jsonrpc_fuzz)] +pub static FUZZ_TCP_SOCK: Mutex>>> = Mutex::new(None); + +#[cfg(jsonrpc_fuzz)] +#[derive(Clone, Debug)] +struct TcpStream; + +#[cfg(jsonrpc_fuzz)] +mod impls { + use super::*; + + impl Read for TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + match *FUZZ_TCP_SOCK.lock().unwrap() { + Some(ref mut cursor) => io::Read::read(cursor, buf), + None => Ok(0), + } + } + } + impl Write for TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result { io::sink().write(buf) } + fn flush(&mut self) -> io::Result<()> { Ok(()) } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Client; + + #[test] + fn construct() { + let tp = Builder::new() + .timeout(Duration::from_millis(100)) + .url("http://localhost:22") + .unwrap() + .basic_auth("user".to_string(), None) + .build(); + let _ = Client::with_transport(tp); + } +} diff --git a/jsonrpc/src/http/mod.rs b/jsonrpc/src/http/mod.rs new file mode 100644 index 0000000..318d8c2 --- /dev/null +++ b/jsonrpc/src/http/mod.rs @@ -0,0 +1,17 @@ +//! HTTP transport modules. + +#[cfg(feature = "simple_http")] +pub mod simple_http; + +#[cfg(feature = "minreq_http")] +pub mod minreq_http; + +/// The default TCP port to use for connections. +/// Set to 8332, the default RPC port for bitcoind. +pub const DEFAULT_PORT: u16 = 8332; + +/// The Default SOCKS5 Port to use for proxy connection. +/// Set to 9050, the default RPC port for tor. +// Currently only used by `simple_http` module, here for consistency. +#[cfg(feature = "proxy")] +pub const DEFAULT_PROXY_PORT: u16 = 9050; diff --git a/jsonrpc/src/http/simple_http.rs b/jsonrpc/src/http/simple_http.rs new file mode 100644 index 0000000..de104ff --- /dev/null +++ b/jsonrpc/src/http/simple_http.rs @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: CC0-1.0 + +//! This module implements a minimal and non standard conforming HTTP 1.0 +//! round-tripper that works with the bitcoind RPC server. This can be used +//! if minimal dependencies are a goal and synchronous communication is ok. + +use std::io::{BufRead, BufReader, Read, Write}; +#[cfg(not(jsonrpc_fuzz))] +use std::net::TcpStream; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::sync::{Arc, Mutex, MutexGuard}; +use std::time::Duration; +use std::{error, fmt, io, net, num}; + +#[cfg(feature = "proxy")] +use socks::Socks5Stream; + +use crate::client::Transport; +use crate::http::DEFAULT_PORT; +#[cfg(feature = "proxy")] +use crate::http::DEFAULT_PROXY_PORT; +use crate::{Request, Response}; + +/// Absolute maximum content length allowed before cutting off the response. +const FINAL_RESP_ALLOC: u64 = 1024 * 1024 * 1024; + +#[cfg(not(jsonrpc_fuzz))] +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(15); + +#[cfg(jsonrpc_fuzz)] +const DEFAULT_TIMEOUT: Duration = Duration::from_millis(1); + +/// Simple HTTP transport that implements the necessary subset of HTTP for +/// running a bitcoind RPC client. +#[derive(Clone, Debug)] +pub struct SimpleHttpTransport { + addr: net::SocketAddr, + path: String, + timeout: Duration, + /// The value of the `Authorization` HTTP header. + basic_auth: Option, + #[cfg(feature = "proxy")] + proxy_addr: net::SocketAddr, + #[cfg(feature = "proxy")] + proxy_auth: Option<(String, String)>, + sock: Arc>>>, +} + +impl Default for SimpleHttpTransport { + fn default() -> Self { + SimpleHttpTransport { + addr: net::SocketAddr::new( + net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)), + DEFAULT_PORT, + ), + path: "/".to_owned(), + timeout: DEFAULT_TIMEOUT, + basic_auth: None, + #[cfg(feature = "proxy")] + proxy_addr: net::SocketAddr::new( + net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)), + DEFAULT_PROXY_PORT, + ), + #[cfg(feature = "proxy")] + proxy_auth: None, + sock: Arc::new(Mutex::new(None)), + } + } +} + +impl SimpleHttpTransport { + /// Constructs a new [`SimpleHttpTransport`] with default parameters. + pub fn new() -> Self { SimpleHttpTransport::default() } + + /// Returns a builder for [`SimpleHttpTransport`]. + pub fn builder() -> Builder { Builder::new() } + + /// Replaces the URL of the transport. + pub fn set_url(&mut self, url: &str) -> Result<(), Error> { + let url = check_url(url)?; + self.addr = url.0; + self.path = url.1; + Ok(()) + } + + /// Replaces only the path part of the URL. + pub fn set_url_path(&mut self, path: String) { self.path = path; } + + fn request(&self, req: impl serde::Serialize) -> Result + where + R: for<'a> serde::de::Deserialize<'a>, + { + match self.try_request(req) { + Ok(response) => Ok(response), + Err(err) => { + // No part of this codebase should panic, so unwrapping a mutex lock is fine + *self.sock.lock().expect("poisoned mutex") = None; + Err(err) + } + } + } + + #[cfg(feature = "proxy")] + fn fresh_socket(&self) -> Result { + let stream = if let Some((username, password)) = &self.proxy_auth { + Socks5Stream::connect_with_password( + self.proxy_addr, + self.addr, + username.as_str(), + password.as_str(), + )? + } else { + Socks5Stream::connect(self.proxy_addr, self.addr)? + }; + Ok(stream.into_inner()) + } + + #[cfg(not(feature = "proxy"))] + fn fresh_socket(&self) -> Result { + let stream = TcpStream::connect_timeout(&self.addr, self.timeout)?; + stream.set_read_timeout(Some(self.timeout))?; + stream.set_write_timeout(Some(self.timeout))?; + Ok(stream) + } + + fn try_request(&self, req: impl serde::Serialize) -> Result + where + R: for<'a> serde::de::Deserialize<'a>, + { + // No part of this codebase should panic, so unwrapping a mutex lock is fine + let mut sock_lock: MutexGuard> = self.sock.lock().expect("poisoned mutex"); + if sock_lock.is_none() { + *sock_lock = Some(BufReader::new(self.fresh_socket()?)); + }; + // In the immediately preceding block, we made sure that `sock` is non-`None`, + // so unwrapping here is fine. + let sock: &mut BufReader<_> = sock_lock.as_mut().unwrap(); + + // Serialize the body first so we can set the Content-Length header. + let body = serde_json::to_vec(&req)?; + + let mut request_bytes = Vec::new(); + + request_bytes.write_all(b"POST ")?; + request_bytes.write_all(self.path.as_bytes())?; + request_bytes.write_all(b" HTTP/1.1\r\n")?; + // Write headers + request_bytes.write_all(b"host: ")?; + request_bytes.write_all(self.addr.to_string().as_bytes())?; + request_bytes.write_all(b"\r\n")?; + request_bytes.write_all(b"Content-Type: application/json\r\n")?; + request_bytes.write_all(b"Content-Length: ")?; + request_bytes.write_all(body.len().to_string().as_bytes())?; + request_bytes.write_all(b"\r\n")?; + if let Some(ref auth) = self.basic_auth { + request_bytes.write_all(b"Authorization: ")?; + request_bytes.write_all(auth.as_ref())?; + request_bytes.write_all(b"\r\n")?; + } + // Write body + request_bytes.write_all(b"\r\n")?; + request_bytes.write_all(&body)?; + + // Send HTTP request + let write_success = sock.get_mut().write_all(request_bytes.as_slice()).is_ok() + && sock.get_mut().flush().is_ok(); + + // This indicates the socket is broken so let's retry the send once with a fresh socket + if !write_success { + *sock.get_mut() = self.fresh_socket()?; + sock.get_mut().write_all(request_bytes.as_slice())?; + sock.get_mut().flush()?; + } + + // Parse first HTTP response header line + let mut header_buf = String::new(); + let read_success = sock.read_line(&mut header_buf).is_ok(); + + // This is another possible indication that the socket is broken so let's retry the send once + // with a fresh socket IF the write attempt has not already experienced a failure + if (!read_success || header_buf.is_empty()) && write_success { + *sock.get_mut() = self.fresh_socket()?; + sock.get_mut().write_all(request_bytes.as_slice())?; + sock.get_mut().flush()?; + + sock.read_line(&mut header_buf)?; + } + + if header_buf.len() < 12 { + return Err(Error::HttpResponseTooShort { actual: header_buf.len(), needed: 12 }); + } + if !header_buf.as_bytes()[..12].is_ascii() { + return Err(Error::HttpResponseNonAsciiHello(header_buf.as_bytes()[..12].to_vec())); + } + if !header_buf.starts_with("HTTP/1.1 ") { + return Err(Error::HttpResponseBadHello { + actual: header_buf[0..9].into(), + expected: "HTTP/1.1 ".into(), + }); + } + let response_code = match header_buf[9..12].parse::() { + Ok(n) => n, + Err(e) => return Err(Error::HttpResponseBadStatus(header_buf[9..12].into(), e)), + }; + + // Parse response header fields + let mut content_length = None; + loop { + header_buf.clear(); + sock.read_line(&mut header_buf)?; + if header_buf == "\r\n" { + break; + } + header_buf.make_ascii_lowercase(); + + const CONTENT_LENGTH: &str = "content-length: "; + if let Some(s) = header_buf.strip_prefix(CONTENT_LENGTH) { + content_length = Some( + s.trim() + .parse::() + .map_err(|e| Error::HttpResponseBadContentLength(s.into(), e))?, + ); + } + + const TRANSFER_ENCODING: &str = "transfer-encoding: "; + if let Some(s) = header_buf.strip_prefix(TRANSFER_ENCODING) { + const CHUNKED: &str = "chunked"; + if s.trim() == CHUNKED { + return Err(Error::HttpResponseChunked); + } + } + } + + if response_code == 401 { + // There is no body in a 401 response, so don't try to read it + return Err(Error::HttpErrorCode(response_code)); + } + + // Read up to `content_length` bytes. Note that if there is no content-length + // header, we will assume an effectively infinite content length, i.e. we will + // just keep reading from the socket until it is closed. + let mut reader = match content_length { + None => sock.take(FINAL_RESP_ALLOC), + Some(n) if n > FINAL_RESP_ALLOC => { + return Err(Error::HttpResponseContentLengthTooLarge { + length: n, + max: FINAL_RESP_ALLOC, + }); + } + Some(n) => sock.take(n), + }; + + // Attempt to parse the response. Don't check the HTTP error code until + // after parsing, since Bitcoin Core will often return a descriptive JSON + // error structure which is more useful than the error code. + match serde_json::from_reader(&mut reader) { + Ok(s) => { + if content_length.is_some() { + reader.bytes().count(); // consume any trailing bytes + } + Ok(s) + } + Err(e) => { + // If the response was not 200, assume the parse failed because of that + if response_code != 200 { + Err(Error::HttpErrorCode(response_code)) + } else { + // If it was 200 then probably it was legitimately a parse error + Err(e.into()) + } + } + } + } +} + +/// Does some very basic manual URL parsing because the uri/url crates +/// all have unicode-normalization as a dependency and that's broken. +fn check_url(url: &str) -> Result<(SocketAddr, String), Error> { + // The fallback port in case no port was provided. + // This changes when the http or https scheme was provided. + let mut fallback_port = DEFAULT_PORT; + + // We need to get the hostname and the port. + // (1) Split scheme + let after_scheme = { + let mut split = url.splitn(2, "://"); + let s = split.next().unwrap(); + match split.next() { + None => s, // no scheme present + Some(after) => { + // Check if the scheme is http or https. + if s == "http" { + fallback_port = 80; + } else if s == "https" { + fallback_port = 443; + } else { + return Err(Error::url(url, "scheme should be http or https")); + } + after + } + } + }; + // (2) split off path + let (before_path, path) = { + if let Some(slash) = after_scheme.find('/') { + (&after_scheme[0..slash], &after_scheme[slash..]) + } else { + (after_scheme, "/") + } + }; + // (3) split off auth part + let after_auth = { + let mut split = before_path.splitn(2, '@'); + let s = split.next().unwrap(); + split.next().unwrap_or(s) + }; + + // (4) Parse into socket address. + // At this point we either have or : + // `std::net::ToSocketAddrs` requires `&str` to have : format. + let mut addr = match after_auth.to_socket_addrs() { + Ok(addr) => addr, + Err(_) => { + // Invalid socket address. Try to add port. + format!("{}:{}", after_auth, fallback_port).to_socket_addrs()? + } + }; + + match addr.next() { + Some(a) => Ok((a, path.to_owned())), + None => Err(Error::url(url, "invalid hostname: error extracting socket address")), + } +} + +impl Transport for SimpleHttpTransport { + fn send_request(&self, req: Request) -> Result { + Ok(self.request(req)?) + } + + fn send_batch(&self, reqs: &[Request]) -> Result, crate::Error> { + Ok(self.request(reqs)?) + } + + fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "http://{}:{}{}", self.addr.ip(), self.addr.port(), self.path) + } +} + +/// Builder for simple bitcoind [`SimpleHttpTransport`]. +#[derive(Clone, Debug)] +pub struct Builder { + tp: SimpleHttpTransport, +} + +impl Builder { + /// Constructs a new [`Builder`] with default configuration. + pub fn new() -> Builder { Builder { tp: SimpleHttpTransport::new() } } + + /// Sets the timeout after which requests will abort if they aren't finished. + pub fn timeout(mut self, timeout: Duration) -> Self { + self.tp.timeout = timeout; + self + } + + /// Sets the URL of the server to the transport. + pub fn url(mut self, url: &str) -> Result { + self.tp.set_url(url)?; + Ok(self) + } + + /// Adds authentication information to the transport. + pub fn auth>(mut self, user: S, pass: Option) -> Self { + let mut auth = user.as_ref().to_owned(); + auth.push(':'); + if let Some(ref pass) = pass { + auth.push_str(pass.as_ref()); + } + self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(auth.as_bytes()))); + self + } + + /// Adds authentication information to the transport using a cookie string ('user:pass'). + pub fn cookie_auth>(mut self, cookie: S) -> Self { + self.tp.basic_auth = Some(format!("Basic {}", &base64::encode(cookie.as_ref().as_bytes()))); + self + } + + /// Adds proxy address to the transport for SOCKS5 proxy. + #[cfg(feature = "proxy")] + pub fn proxy_addr>(mut self, proxy_addr: S) -> Result { + // We don't expect path in proxy address. + self.tp.proxy_addr = check_url(proxy_addr.as_ref())?.0; + Ok(self) + } + + /// Adds optional proxy authentication as ('username', 'password'). + #[cfg(feature = "proxy")] + pub fn proxy_auth>(mut self, user: S, pass: S) -> Self { + self.tp.proxy_auth = + Some((user, pass)).map(|(u, p)| (u.as_ref().to_string(), p.as_ref().to_string())); + self + } + + /// Builds the final [`SimpleHttpTransport`]. + pub fn build(self) -> SimpleHttpTransport { self.tp } +} + +impl Default for Builder { + fn default() -> Self { Builder::new() } +} + +impl crate::Client { + /// Creates a new JSON-RPC client using a bare-minimum HTTP transport. + pub fn simple_http( + url: &str, + user: Option, + pass: Option, + ) -> Result { + let mut builder = Builder::new().url(url)?; + if let Some(user) = user { + builder = builder.auth(user, pass); + } + Ok(crate::Client::with_transport(builder.build())) + } + + /// Creates a new JSON_RPC client using a HTTP-Socks5 proxy transport. + #[cfg(feature = "proxy")] + pub fn http_proxy( + url: &str, + user: Option, + pass: Option, + proxy_addr: &str, + proxy_auth: Option<(&str, &str)>, + ) -> Result { + let mut builder = Builder::new().url(url)?; + if let Some(user) = user { + builder = builder.auth(user, pass); + } + builder = builder.proxy_addr(proxy_addr)?; + if let Some((user, pass)) = proxy_auth { + builder = builder.proxy_auth(user, pass); + } + let tp = builder.build(); + Ok(crate::Client::with_transport(tp)) + } +} + +/// Error that can happen when sending requests. +#[derive(Debug)] +pub enum Error { + /// An invalid URL was passed. + InvalidUrl { + /// The URL passed. + url: String, + /// The reason the URL is invalid. + reason: &'static str, + }, + /// An error occurred on the socket layer. + SocketError(io::Error), + /// The HTTP response was too short to even fit a HTTP 1.1 header. + HttpResponseTooShort { + /// The total length of the response. + actual: usize, + /// Minimum length we can parse. + needed: usize, + }, + /// The HTTP response started with a HTTP/1.1 line which was not ASCII. + HttpResponseNonAsciiHello(Vec), + /// The HTTP response did not start with HTTP/1.1 + HttpResponseBadHello { + /// Actual HTTP-whatever string. + actual: String, + /// The hello string of the HTTP version we support. + expected: String, + }, + /// Could not parse the status value as a number. + HttpResponseBadStatus(String, num::ParseIntError), + /// Could not parse the status value as a number. + HttpResponseBadContentLength(String, num::ParseIntError), + /// The indicated content-length header exceeded our maximum. + HttpResponseContentLengthTooLarge { + /// The length indicated in the content-length header. + length: u64, + /// Our hard maximum on number of bytes we'll try to read. + max: u64, + }, + /// The server is replying with chunked encoding which is not supported + HttpResponseChunked, + /// Unexpected HTTP error code (non-200). + HttpErrorCode(u16), + /// Received EOF before getting as many bytes as were indicated by the content-length header. + IncompleteResponse { + /// The content-length header. + content_length: u64, + /// The number of bytes we actually read. + n_read: u64, + }, + /// JSON parsing error. + Json(serde_json::Error), +} + +impl Error { + /// Utility method to create [`Error::InvalidUrl`] variants. + fn url>(url: U, reason: &'static str) -> Error { + Error::InvalidUrl { url: url.into(), reason } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + use Error::*; + + match *self { + InvalidUrl { ref url, ref reason } => write!(f, "invalid URL '{}': {}", url, reason), + SocketError(ref e) => write!(f, "Couldn't connect to host: {}", e), + HttpResponseTooShort { ref actual, ref needed } => { + write!(f, "HTTP response too short: length {}, needed {}.", actual, needed) + } + HttpResponseNonAsciiHello(ref bytes) => { + write!(f, "HTTP response started with non-ASCII {:?}", bytes) + } + HttpResponseBadHello { ref actual, ref expected } => { + write!(f, "HTTP response started with `{}`; expected `{}`.", actual, expected) + } + HttpResponseBadStatus(ref status, ref err) => { + write!(f, "HTTP response had bad status code `{}`: {}.", status, err) + } + HttpResponseBadContentLength(ref len, ref err) => { + write!(f, "HTTP response had bad content length `{}`: {}.", len, err) + } + HttpResponseContentLengthTooLarge { length, max } => { + write!(f, "HTTP response content length {} exceeds our max {}.", length, max) + } + HttpErrorCode(c) => write!(f, "unexpected HTTP code: {}", c), + IncompleteResponse { content_length, n_read } => { + write!( + f, + "read {} bytes but HTTP response content-length header was {}.", + n_read, content_length + ) + } + Json(ref e) => write!(f, "JSON error: {}", e), + HttpResponseChunked => { + write!(f, "The server replied with a chunked response which is not supported") + } + } + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + use self::Error::*; + + match *self { + InvalidUrl { .. } + | HttpResponseTooShort { .. } + | HttpResponseNonAsciiHello(..) + | HttpResponseBadHello { .. } + | HttpResponseBadStatus(..) + | HttpResponseBadContentLength(..) + | HttpResponseContentLengthTooLarge { .. } + | HttpErrorCode(_) + | IncompleteResponse { .. } + | HttpResponseChunked => None, + SocketError(ref e) => Some(e), + Json(ref e) => Some(e), + } + } +} + +impl From for Error { + fn from(e: io::Error) -> Self { Error::SocketError(e) } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Self { Error::Json(e) } +} + +impl From for crate::Error { + fn from(e: Error) -> crate::Error { + match e { + Error::Json(e) => crate::Error::Json(e), + e => crate::Error::Transport(Box::new(e)), + } + } +} + +/// Global mutex used by the fuzzing harness to inject data into the read end of the TCP stream. +#[cfg(jsonrpc_fuzz)] +pub static FUZZ_TCP_SOCK: Mutex>>> = Mutex::new(None); + +#[cfg(jsonrpc_fuzz)] +#[derive(Clone, Debug)] +struct TcpStream; + +#[cfg(jsonrpc_fuzz)] +mod impls { + use super::*; + impl Read for TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + match *FUZZ_TCP_SOCK.lock().unwrap() { + Some(ref mut cursor) => io::Read::read(cursor, buf), + None => Ok(0), + } + } + } + impl Write for TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result { io::sink().write(buf) } + fn flush(&mut self) -> io::Result<()> { Ok(()) } + } + + impl TcpStream { + pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result { Ok(TcpStream) } + pub fn set_read_timeout(&self, _: Option) -> io::Result<()> { Ok(()) } + pub fn set_write_timeout(&self, _: Option) -> io::Result<()> { Ok(()) } + } +} + +#[cfg(test)] +mod tests { + use std::net; + #[cfg(feature = "proxy")] + use std::str::FromStr; + + use super::*; + use crate::Client; + + #[test] + fn test_urls() { + let addr: net::SocketAddr = ("localhost", 22).to_socket_addrs().unwrap().next().unwrap(); + let urls = [ + "localhost:22", + "http://localhost:22/", + "https://localhost:22/walletname/stuff?it=working", + "http://me:weak@localhost:22/wallet", + ]; + for u in &urls { + let tp = Builder::new().url(u).unwrap().build(); + assert_eq!(tp.addr, addr); + } + + // Default port and 80 and 443 fill-in. + let addr: net::SocketAddr = ("localhost", 80).to_socket_addrs().unwrap().next().unwrap(); + let tp = Builder::new().url("http://localhost/").unwrap().build(); + assert_eq!(tp.addr, addr); + let addr: net::SocketAddr = ("localhost", 443).to_socket_addrs().unwrap().next().unwrap(); + let tp = Builder::new().url("https://localhost/").unwrap().build(); + assert_eq!(tp.addr, addr); + let addr: net::SocketAddr = + ("localhost", super::DEFAULT_PORT).to_socket_addrs().unwrap().next().unwrap(); + let tp = Builder::new().url("localhost").unwrap().build(); + assert_eq!(tp.addr, addr); + + let valid_urls = [ + "localhost", + "127.0.0.1:8080", + "http://127.0.0.1:8080/", + "http://127.0.0.1:8080/rpc/test", + "https://127.0.0.1/rpc/test", + "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8300", + "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]", + ]; + for u in &valid_urls { + let (addr, path) = check_url(u).unwrap(); + let builder = Builder::new().url(u).unwrap_or_else(|_| panic!("error for: {}", u)); + assert_eq!(builder.tp.addr, addr); + assert_eq!(builder.tp.path, path); + assert_eq!(builder.tp.timeout, DEFAULT_TIMEOUT); + assert_eq!(builder.tp.basic_auth, None); + #[cfg(feature = "proxy")] + assert_eq!(builder.tp.proxy_addr, SocketAddr::from_str("127.0.0.1:9050").unwrap()); + } + + let invalid_urls = [ + "127.0.0.1.0:8080", + "httpx://127.0.0.1:8080/", + "ftp://127.0.0.1:8080/rpc/test", + "http://127.0.0./rpc/test", + // NB somehow, Rust's IpAddr accepts "127.0.0" and adds the extra 0.. + ]; + for u in &invalid_urls { + if let Ok(b) = Builder::new().url(u) { + let tp = b.build(); + panic!("expected error for url {}, got {:?}", u, tp); + } + } + } + + #[test] + fn construct() { + let tp = Builder::new() + .timeout(Duration::from_millis(100)) + .url("localhost:22") + .unwrap() + .auth("user", None) + .build(); + let _ = Client::with_transport(tp); + + let _ = Client::simple_http("localhost:22", None, None).unwrap(); + } + + #[cfg(feature = "proxy")] + #[test] + fn construct_with_proxy() { + let tp = Builder::new() + .timeout(Duration::from_millis(100)) + .url("localhost:22") + .unwrap() + .auth("user", None) + .proxy_addr("127.0.0.1:9050") + .unwrap() + .build(); + let _ = Client::with_transport(tp); + + let _ = Client::http_proxy( + "localhost:22", + None, + None, + "127.0.0.1:9050", + Some(("user", "password")), + ) + .unwrap(); + } + + /// Test that the client will detect that a socket is closed and open a fresh one before sending + /// the request + #[cfg(all(not(feature = "proxy"), not(jsonrpc_fuzz)))] + #[test] + fn request_to_closed_socket() { + use std::net::{Shutdown, TcpListener}; + use std::sync::mpsc; + use std::thread; + + use serde_json::{Number, Value}; + + let (tx, rx) = mpsc::sync_channel(1); + + thread::spawn(move || { + let server = TcpListener::bind("localhost:0").expect("Binding a Tcp Listener"); + tx.send(server.local_addr().unwrap().port()).unwrap(); + for (request_id, stream) in server.incoming().enumerate() { + let mut stream = stream.unwrap(); + + let buf_reader = BufReader::new(&mut stream); + + let _http_request: Vec<_> = buf_reader + .lines() + .map(|result| result.unwrap()) + .take_while(|line| !line.is_empty()) + .collect(); + + let response = Response { + result: None, + error: None, + id: Value::Number(Number::from(request_id)), + jsonrpc: Some(String::from("2.0")), + }; + let response_str = serde_json::to_string(&response).unwrap(); + + stream.write_all(b"HTTP/1.1 200\r\n").unwrap(); + stream.write_all(b"Content-Length: ").unwrap(); + stream.write_all(response_str.len().to_string().as_bytes()).unwrap(); + stream.write_all(b"\r\n").unwrap(); + stream.write_all(b"\r\n").unwrap(); + stream.write_all(response_str.as_bytes()).unwrap(); + stream.flush().unwrap(); + + stream.shutdown(Shutdown::Both).unwrap(); + } + }); + + // Give the server thread a second to start up and listen + thread::sleep(Duration::from_secs(1)); + + let port = rx.recv().unwrap(); + let client = + Client::simple_http(format!("localhost:{}", port).as_str(), None, None).unwrap(); + let request = client.build_request("test_request", None); + let result = client.send_request(request).unwrap(); + assert_eq!(result.id, Value::Number(Number::from(0))); + thread::sleep(Duration::from_secs(1)); + let request = client.build_request("test_request2", None); + let result2 = client.send_request(request) + .expect("This second request should not be an Err like `Err(Transport(HttpResponseTooShort { actual: 0, needed: 12 }))`"); + assert_eq!(result2.id, Value::Number(Number::from(1))); + } +} diff --git a/jsonrpc/src/lib.rs b/jsonrpc/src/lib.rs new file mode 100644 index 0000000..f6a3c69 --- /dev/null +++ b/jsonrpc/src/lib.rs @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: CC0-1.0 + +//! # Rust JSON-RPC Library +//! +//! Rust support for the JSON-RPC 2.0 protocol. + +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +// Coding conventions +#![warn(missing_docs)] + +/// Re-export `serde` crate. +pub extern crate serde; +/// Re-export `serde_json` crate. +pub extern crate serde_json; + +/// Re-export `base64` crate. +#[cfg(feature = "base64")] +pub extern crate base64; + +/// Re-export `minreq` crate if the feature is set. +#[cfg(feature = "minreq")] +pub extern crate minreq; + +pub mod client; +pub mod error; +pub mod http; + +#[cfg(feature = "minreq_http")] +pub use http::minreq_http; +#[cfg(feature = "simple_http")] +pub use http::simple_http; + +#[cfg(feature = "simple_tcp")] +pub mod simple_tcp; + +#[cfg(all(feature = "simple_uds", not(windows)))] +pub mod simple_uds; + +use serde::{Deserialize, Serialize}; +use serde_json::value::RawValue; + +pub use crate::client::{Client, Transport}; +pub use crate::error::Error; + +/// Shorthand method to convert an argument into a boxed [`serde_json::value::RawValue`]. +/// +/// Since serializers rarely fail, it's probably easier to use [`arg`] instead. +pub fn try_arg(arg: T) -> Result, serde_json::Error> { + RawValue::from_string(serde_json::to_string(&arg)?) +} + +/// Shorthand method to convert an argument into a boxed [`serde_json::value::RawValue`]. +/// +/// This conversion should not fail, so to avoid returning a [`Result`], +/// in case of an error, the error is serialized as the return value. +pub fn arg(arg: T) -> Box { + match try_arg(arg) { + Ok(v) => v, + Err(e) => RawValue::from_string(format!("<>", e)) + .unwrap_or_else(|_| { + RawValue::from_string("<>".to_owned()).unwrap() + }), + } +} + +/// A JSONRPC request object. +#[derive(Debug, Clone, Serialize)] +pub struct Request<'a> { + /// The name of the RPC call. + pub method: &'a str, + /// Parameters to the RPC call. + pub params: Option<&'a RawValue>, + /// Identifier for this request, which should appear in the response. + pub id: serde_json::Value, + /// jsonrpc field, MUST be "2.0". + pub jsonrpc: Option<&'a str>, +} + +/// A JSONRPC response object. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct Response { + /// A result if there is one, or [`None`]. + pub result: Option>, + /// An error if there is one, or [`None`]. + pub error: Option, + /// Identifier for this response, which should match that of the request. + pub id: serde_json::Value, + /// jsonrpc field, MUST be "2.0". + pub jsonrpc: Option, +} + +impl Response { + /// Extracts the result from a response. + pub fn result serde::de::Deserialize<'a>>(&self) -> Result { + if let Some(ref e) = self.error { + return Err(Error::Rpc(e.clone())); + } + + if let Some(ref res) = self.result { + serde_json::from_str(res.get()).map_err(Error::Json) + } else { + serde_json::from_value(serde_json::Value::Null).map_err(Error::Json) + } + } + + /// Returns the RPC error, if there was one, but does not check the result. + pub fn check_error(self) -> Result<(), Error> { + if let Some(e) = self.error { + Err(Error::Rpc(e)) + } else { + Ok(()) + } + } + + /// Returns whether or not the `result` field is empty. + pub fn is_none(&self) -> bool { self.result.is_none() } +} + +#[cfg(test)] +mod tests { + use serde_json::json; + use serde_json::value::{to_raw_value, RawValue}; + + use super::*; + + #[test] + fn response_is_none() { + let joanna = Response { + result: Some(RawValue::from_string(serde_json::to_string(&true).unwrap()).unwrap()), + error: None, + id: From::from(81), + jsonrpc: Some(String::from("2.0")), + }; + + let bill = Response { + result: None, + error: None, + id: From::from(66), + jsonrpc: Some(String::from("2.0")), + }; + + assert!(!joanna.is_none()); + assert!(bill.is_none()); + } + + #[test] + fn response_extract() { + let obj = vec!["Mary", "had", "a", "little", "lamb"]; + let response = Response { + result: Some(RawValue::from_string(serde_json::to_string(&obj).unwrap()).unwrap()), + error: None, + id: serde_json::Value::Null, + jsonrpc: Some(String::from("2.0")), + }; + let recovered1: Vec = response.result().unwrap(); + assert!(response.clone().check_error().is_ok()); + let recovered2: Vec = response.result().unwrap(); + assert_eq!(obj, recovered1); + assert_eq!(obj, recovered2); + } + + #[test] + fn null_result() { + let s = r#"{"result":null,"error":null,"id":"test"}"#; + let response: Response = serde_json::from_str(s).unwrap(); + let recovered1: Result<(), _> = response.result(); + let recovered2: Result<(), _> = response.result(); + assert!(recovered1.is_ok()); + assert!(recovered2.is_ok()); + + let recovered1: Result = response.result(); + let recovered2: Result = response.result(); + assert!(recovered1.is_err()); + assert!(recovered2.is_err()); + } + + #[test] + fn batch_response() { + // from the jsonrpc.org spec example + let s = r#"[ + {"jsonrpc": "2.0", "result": 7, "id": "1"}, + {"jsonrpc": "2.0", "result": 19, "id": "2"}, + {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null}, + {"jsonrpc": "2.0", "error": {"code": -32601, "message": "Method not found"}, "id": "5"}, + {"jsonrpc": "2.0", "result": ["hello", 5], "id": "9"} + ]"#; + let batch_response: Vec = serde_json::from_str(s).unwrap(); + assert_eq!(batch_response.len(), 5); + } + + #[test] + fn test_arg() { + macro_rules! test_arg { + ($val:expr, $t:ty) => {{ + let val1: $t = $val; + let arg = super::arg(val1.clone()); + let val2: $t = serde_json::from_str(arg.get()).expect(stringify!($val)); + assert_eq!(val1, val2, "failed test for {}", stringify!($val)); + }}; + } + + test_arg!(true, bool); + test_arg!(42, u8); + test_arg!(42, usize); + test_arg!(42, isize); + test_arg!(vec![42, 35], Vec); + test_arg!(String::from("test"), String); + + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] + struct Test { + v: String, + } + test_arg!(Test { v: String::from("test") }, Test); + } + + #[test] + fn test_request_list() { + let list = json!([0]); + let raw_value = Some(to_raw_value(&list).unwrap()); + + let request = Request { + method: "list", + params: raw_value.as_deref(), + id: serde_json::json!(2), + jsonrpc: Some("2.0"), + }; + assert_eq!( + serde_json::to_string(&request).unwrap(), + r#"{"method":"list","params":[0],"id":2,"jsonrpc":"2.0"}"# + ); + } + + #[test] + fn test_request_object() { + let object = json!({ "height": 0 }); + let raw_value = Some(to_raw_value(&object).unwrap()); + + let request = Request { + method: "object", + params: raw_value.as_deref(), + id: serde_json::json!(2), + jsonrpc: Some("2.0"), + }; + assert_eq!( + serde_json::to_string(&request).unwrap(), + r#"{"method":"object","params":{"height":0},"id":2,"jsonrpc":"2.0"}"# + ); + } +} diff --git a/jsonrpc/src/simple_tcp.rs b/jsonrpc/src/simple_tcp.rs new file mode 100644 index 0000000..9b1ed24 --- /dev/null +++ b/jsonrpc/src/simple_tcp.rs @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: CC0-1.0 + +//! This module implements a synchronous transport over a raw [`std::net::TcpListener`]. +//! Note that it does not handle TCP over Unix Domain Sockets, see `simple_uds` for this. + +use std::{error, fmt, io, net, time}; + +use crate::client::Transport; +use crate::{Request, Response}; + +#[derive(Debug, Clone)] +/// Simple synchronous TCP transport. +pub struct TcpTransport { + /// The internet socket address to connect to. + pub addr: net::SocketAddr, + /// The read and write timeout to use for this connection. + pub timeout: Option, +} + +impl TcpTransport { + /// Creates a new `TcpTransport` without timeouts. + pub fn new(addr: net::SocketAddr) -> TcpTransport { TcpTransport { addr, timeout: None } } + + fn request(&self, req: impl serde::Serialize) -> Result + where + R: for<'a> serde::de::Deserialize<'a>, + { + let mut sock = net::TcpStream::connect(self.addr)?; + sock.set_read_timeout(self.timeout)?; + sock.set_write_timeout(self.timeout)?; + + serde_json::to_writer(&mut sock, &req)?; + + // NOTE: we don't check the id there, so it *must* be synchronous + let resp: R = serde_json::Deserializer::from_reader(&mut sock) + .into_iter() + .next() + .ok_or(Error::Timeout)??; + Ok(resp) + } +} + +impl Transport for TcpTransport { + fn send_request(&self, req: Request) -> Result { + Ok(self.request(req)?) + } + + fn send_batch(&self, reqs: &[Request]) -> Result, crate::Error> { + Ok(self.request(reqs)?) + } + + fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.addr) } +} + +/// Error that can occur while using the TCP transport. +#[derive(Debug)] +pub enum Error { + /// An error occurred on the socket layer. + SocketError(io::Error), + /// We didn't receive a complete response till the deadline ran out. + Timeout, + /// JSON parsing error. + Json(serde_json::Error), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + use Error::*; + + match *self { + SocketError(ref e) => write!(f, "couldn't connect to host: {}", e), + Timeout => f.write_str("didn't receive response data in time, timed out."), + Json(ref e) => write!(f, "JSON error: {}", e), + } + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + use self::Error::*; + + match *self { + SocketError(ref e) => Some(e), + Timeout => None, + Json(ref e) => Some(e), + } + } +} + +impl From for Error { + fn from(e: io::Error) -> Self { Error::SocketError(e) } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Self { Error::Json(e) } +} + +impl From for crate::Error { + fn from(e: Error) -> crate::Error { + match e { + Error::Json(e) => crate::Error::Json(e), + e => crate::Error::Transport(Box::new(e)), + } + } +} + +#[cfg(test)] +mod tests { + use std::io::{Read, Write}; + use std::thread; + + use super::*; + use crate::Client; + + // Test a dummy request / response over a raw TCP transport + #[test] + fn sanity_check_tcp_transport() { + let addr: net::SocketAddr = + net::SocketAddrV4::new(net::Ipv4Addr::new(127, 0, 0, 1), 0).into(); + let server = net::TcpListener::bind(addr).unwrap(); + let addr = server.local_addr().unwrap(); + let dummy_req = Request { + method: "arandommethod", + params: None, + id: serde_json::Value::Number(4242242.into()), + jsonrpc: Some("2.0"), + }; + let dummy_req_ser = serde_json::to_vec(&dummy_req).unwrap(); + let dummy_resp = Response { + result: None, + error: None, + id: serde_json::Value::Number(4242242.into()), + jsonrpc: Some("2.0".into()), + }; + let dummy_resp_ser = serde_json::to_vec(&dummy_resp).unwrap(); + + let client_thread = thread::spawn(move || { + let transport = TcpTransport { addr, timeout: Some(time::Duration::from_secs(5)) }; + let client = Client::with_transport(transport); + + client.send_request(dummy_req.clone()).unwrap() + }); + + let (mut stream, _) = server.accept().unwrap(); + stream.set_read_timeout(Some(time::Duration::from_secs(5))).unwrap(); + let mut recv_req = vec![0; dummy_req_ser.len()]; + let mut read = 0; + while read < dummy_req_ser.len() { + read += stream.read(&mut recv_req[read..]).unwrap(); + } + assert_eq!(recv_req, dummy_req_ser); + + stream.write_all(&dummy_resp_ser).unwrap(); + stream.flush().unwrap(); + let recv_resp = client_thread.join().unwrap(); + assert_eq!(serde_json::to_vec(&recv_resp).unwrap(), dummy_resp_ser); + } +} diff --git a/jsonrpc/src/simple_uds.rs b/jsonrpc/src/simple_uds.rs new file mode 100644 index 0000000..0f324d7 --- /dev/null +++ b/jsonrpc/src/simple_uds.rs @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: CC0-1.0 + +//! This module implements a synchronous transport over a raw [`std::os::unix::net::UnixStream`]. + +use std::os::unix::net::UnixStream; +use std::{error, fmt, io, path, time}; + +use crate::client::Transport; +use crate::{Request, Response}; + +/// Simple synchronous UDS transport. +#[derive(Debug, Clone)] +pub struct UdsTransport { + /// The path to the Unix Domain Socket. + pub sockpath: path::PathBuf, + /// The read and write timeout to use. + pub timeout: Option, +} + +impl UdsTransport { + /// Creates a new [`UdsTransport`] without timeouts to use. + pub fn new>(sockpath: P) -> UdsTransport { + UdsTransport { sockpath: sockpath.as_ref().to_path_buf(), timeout: None } + } + + fn request(&self, req: impl serde::Serialize) -> Result + where + R: for<'a> serde::de::Deserialize<'a>, + { + let mut sock = UnixStream::connect(&self.sockpath)?; + sock.set_read_timeout(self.timeout)?; + sock.set_write_timeout(self.timeout)?; + + serde_json::to_writer(&mut sock, &req)?; + + // NOTE: we don't check the id there, so it *must* be synchronous + let resp: R = serde_json::Deserializer::from_reader(&mut sock) + .into_iter() + .next() + .ok_or(Error::Timeout)??; + Ok(resp) + } +} + +impl Transport for UdsTransport { + fn send_request(&self, req: Request) -> Result { + Ok(self.request(req)?) + } + + fn send_batch(&self, reqs: &[Request]) -> Result, crate::error::Error> { + Ok(self.request(reqs)?) + } + + fn fmt_target(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.sockpath.to_string_lossy()) + } +} + +/// Error that can occur while using the UDS transport. +#[derive(Debug)] +pub enum Error { + /// An error occurred on the socket layer. + SocketError(io::Error), + /// We didn't receive a complete response till the deadline ran out. + Timeout, + /// JSON parsing error. + Json(serde_json::Error), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + use Error::*; + + match *self { + SocketError(ref e) => write!(f, "couldn't connect to host: {}", e), + Timeout => f.write_str("didn't receive response data in time, timed out."), + Json(ref e) => write!(f, "JSON error: {}", e), + } + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + use self::Error::*; + + match *self { + SocketError(ref e) => Some(e), + Timeout => None, + Json(ref e) => Some(e), + } + } +} + +impl From for Error { + fn from(e: io::Error) -> Self { Error::SocketError(e) } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Self { Error::Json(e) } +} + +impl From for crate::error::Error { + fn from(e: Error) -> crate::error::Error { + match e { + Error::Json(e) => crate::error::Error::Json(e), + e => crate::error::Error::Transport(Box::new(e)), + } + } +} + +#[cfg(test)] +mod tests { + use std::io::{Read, Write}; + use std::os::unix::net::UnixListener; + use std::{fs, process, thread}; + + use super::*; + use crate::Client; + + // Test a dummy request / response over an UDS + #[test] + fn sanity_check_uds_transport() { + let socket_path: path::PathBuf = format!("uds_scratch_{}.socket", process::id()).into(); + // Any leftover? + fs::remove_file(&socket_path).unwrap_or(()); + + let server = UnixListener::bind(&socket_path).unwrap(); + let dummy_req = Request { + method: "getinfo", + params: None, + id: serde_json::Value::Number(111.into()), + jsonrpc: Some("2.0"), + }; + let dummy_req_ser = serde_json::to_vec(&dummy_req).unwrap(); + let dummy_resp = Response { + result: None, + error: None, + id: serde_json::Value::Number(111.into()), + jsonrpc: Some("2.0".into()), + }; + let dummy_resp_ser = serde_json::to_vec(&dummy_resp).unwrap(); + + let cli_socket_path = socket_path.clone(); + let client_thread = thread::spawn(move || { + let transport = UdsTransport { + sockpath: cli_socket_path, + timeout: Some(time::Duration::from_secs(5)), + }; + let client = Client::with_transport(transport); + + client.send_request(dummy_req.clone()).unwrap() + }); + + let (mut stream, _) = server.accept().unwrap(); + stream.set_read_timeout(Some(time::Duration::from_secs(5))).unwrap(); + let mut recv_req = vec![0; dummy_req_ser.len()]; + let mut read = 0; + while read < dummy_req_ser.len() { + read += stream.read(&mut recv_req[read..]).unwrap(); + } + assert_eq!(recv_req, dummy_req_ser); + + stream.write_all(&dummy_resp_ser).unwrap(); + stream.flush().unwrap(); + let recv_resp = client_thread.join().unwrap(); + assert_eq!(serde_json::to_vec(&recv_resp).unwrap(), dummy_resp_ser); + + // Clean up + drop(server); + fs::remove_file(&socket_path).unwrap(); + } +} diff --git a/justfile b/justfile index 78311ce..f5fd526 100644 --- a/justfile +++ b/justfile @@ -21,6 +21,10 @@ fmt: format: cargo +$(cat ./nightly-version) fmt --all --check +# Generate documentation. +docsrs *flags: + RUSTDOCFLAGS="--cfg docsrs -D warnings -D rustdoc::broken-intra-doc-links" cargo +$(cat ./nightly-version) doc --all-features {{flags}} + # Update the recent and minimal lock files. update-lock-files: contrib/update-lock-files.sh