Skip to content

Commit

Permalink
ci: create workflows and do some housekeeping (#154)
Browse files Browse the repository at this point in the history
Adds a CI pipeline along with some quality-of-life improvements such as an editorconfig, Makefile, functional tests refinements, dependency updates, and fixes suggested by `clippy` and `rustfmt`.

Closes #108, #80
  • Loading branch information
Rajil1213 authored Jul 26, 2024
1 parent db231b8 commit 1191dc4
Show file tree
Hide file tree
Showing 106 changed files with 1,647 additions and 552 deletions.
3 changes: 3 additions & 0 deletions .codespellrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[codespell]
skip = .git,target,Cargo.toml,Cargo.lock,mutants*
ignore-words-list = crate,ser
1 change: 1 addition & 0 deletions .config/mutants.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
test_tool = "nextest"
135 changes: 135 additions & 0 deletions .config/nextest.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
[store]
# The directory under the workspace root at which nextest-related files are
# written. Profile-specific storage is currently written to dir/<profile-name>.
dir = "target/nextest"

# This section defines the default nextest profile. Custom profiles are layered
# on top of the default profile.
[profile.default]
# "retries" defines the number of times a test should be retried. If set to a
# non-zero value, tests that succeed on a subsequent attempt will be marked as
# flaky. Can be overridden through the `--retries` option.
# Examples
# * retries = 3
# * retries = { backoff = "fixed", count = 2, delay = "1s" }
retries = { backoff = "exponential", count = 3, delay = "1s", jitter = true, max-delay = "10s" }

# The number of threads to run tests with. Supported values are either an integer or
# the string "num-cpus". Can be overridden through the `--test-threads` option.
test-threads = "num-cpus"

# The number of threads required for each test. This is generally used in overrides to
# mark certain tests as heavier than others. However, it can also be set as a global parameter.
threads-required = 1

# Show these test statuses in the output.
#
# The possible values this can take are:
# * none: no output
# * fail: show failed (including exec-failed) tests
# * retry: show flaky and retried tests
# * slow: show slow tests
# * pass: show passed tests
# * skip: show skipped tests (most useful for CI)
# * all: all of the above
#
# Each value includes all the values above it; for example, "slow" includes
# failed and retried tests.
#
# Can be overridden through the `--status-level` flag.
status-level = "all"

# Similar to status-level, show these test statuses at the end of the run.
final-status-level = "flaky"

# "failure-output" defines when standard output and standard error for failing tests are produced.
# Accepted values are
# * "immediate": output failures as soon as they happen
# * "final": output failures at the end of the test run
# * "immediate-final": output failures as soon as they happen and at the end of
# the test run; combination of "immediate" and "final"
# * "never": don't output failures at all
#
# For large test suites and CI it is generally useful to use "immediate-final".
#
# Can be overridden through the `--failure-output` option.
failure-output = "immediate"

# "success-output" controls production of standard output and standard error on success. This should
# generally be set to "never".
success-output = "never"

# Cancel the test run on the first failure. For CI runs, consider setting this
# to false.
fail-fast = true

# Treat a test that takes longer than the configured 'period' as slow, and print a message.
# See <https://nexte.st/book/slow-tests> for more information.
#
# Optional: specify the parameter 'terminate-after' with a non-zero integer,
# which will cause slow tests to be terminated after the specified number of
# periods have passed.
# Example: slow-timeout = { period = "60s", terminate-after = 2 }
slow-timeout = { period = "60s" }

# Treat a test as leaky if after the process is shut down, standard output and standard error
# aren't closed within this duration.
#
# This usually happens in case of a test that creates a child process and lets it inherit those
# handles, but doesn't clean the child process up (especially when it fails).
#
# See <https://nexte.st/book/leaky-tests> for more information.
leak-timeout = "100ms"

# `nextest archive` automatically includes any build output required by a standard build.
# However sometimes extra non-standard files are required.
# To address this, "archive.include" specifies additional paths that will be included in the archive.
archive.include = [
# Examples:
#
# { path = "application-data", relative-to = "target" },
# { path = "data-from-some-dependency/file.txt", relative-to = "target" },
#
# In the above example:
# * the directory and its contents at "target/application-data" will be included recursively in the archive.
# * the file "target/data-from-some-dependency/file.txt" will be included in the archive.
]

[profile.default.junit]
# Output a JUnit report into the given file inside 'store.dir/<profile-name>'.
# If unspecified, JUnit is not written out.

path = "junit.xml"

# The name of the top-level "report" element in JUnit report. If aggregating
# reports across different test runs, it may be useful to provide separate names
# for each report.
report-name = "nextest-run"

# Whether standard output and standard error for passing tests should be stored in the JUnit report.
# Output is stored in the <system-out> and <system-err> elements of the <testcase> element.
store-success-output = false

# Whether standard output and standard error for failing tests should be stored in the JUnit report.
# Output is stored in the <system-out> and <system-err> elements of the <testcase> element.
#
# Note that if a description can be extracted from the output, it is always stored in the
# <description> element.
store-failure-output = true

# This profile is activated if MIRI_SYSROOT is set.
[profile.default-miri]

# This profile is used in CI (use with `--profile ci`)
[profile.ci]
# Print out output for failing tests as soon as they fail, and also at the end
# of the run (for easy scrollability).
failure-output = "immediate-final"
# Do not cancel the test run on the first failure.
fail-fast = false

[profile.ci.junit]
path = "junit.xml"
report-name = "nextest-run-ci"
store-success-output = true
store-failure-output = true
29 changes: 29 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors and IDEs
# editorconfig.org

root = true

[*]
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4

[*.rs]
max_line_length = 100

[*.{yml,yaml,toml}]
indent_size = 2

[*.md]
# double whitespace at end of line
# denotes a line break in Markdown
trim_trailing_whitespace = false

[Makefile]
indent_style = tab

[]
36 changes: 36 additions & 0 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
## Description

<!--
Provide a brief summary of the changes and the motivation behind them.
-->

### Type of Change

<!--
Select the type of change your PR introduces (put an `x` in all that apply):-
-->

- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature/Enhancement (non-breaking change which adds functionality or enhances an existing one)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] Documentation update
- [ ] Refactor

## Checklist

<!--
Ensure all the following are checked:
-->

- [ ] I have performed a self-review of my code.
- [ ] I have commented my code where necessary.
- [ ] I have updated the documentation if needed.
- [ ] My changes do not introduce new warnings.
- [ ] I have added tests that prove my changes are effective or that my feature works.
- [ ] New and existing tests pass with my changes.

## Related Issues

<!--
Link any related issues (e.g., `closes #123`, `fixes #456`).
-->
10 changes: 10 additions & 0 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "pip"
directory: "test/"
schedule:
interval: "monthly"
61 changes: 61 additions & 0 deletions .github/workflows/dependencies.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Runs `cargo update` periodically.

name: Update Dependencies

on:
schedule:
# Run at the first of every month
- cron: "0 0 1 * *"
workflow_dispatch:
# Needed so we can run it manually

env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: cargo-update
TITLE: "chore(deps): monthly `cargo update`"
BODY: |
Automation to keep dependencies in `Cargo.lock` current.
<details><summary><strong>cargo update log</strong></summary>
<p>
```log
$cargo_update_log
```
</p>
</details>
jobs:
update:
name: Update
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly

- name: cargo update
# Remove first line that always just says "Updating crates.io index"
run: cargo update --color never 2>&1 | sed '/crates.io index/d' | tee -a cargo_update.log

- name: craft commit message and PR body
id: msg
run: |
export cargo_update_log="$(cat cargo_update.log)"
echo "commit_message<<EOF" >> $GITHUB_OUTPUT
printf "$TITLE\n\n$cargo_update_log\n" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "body<<EOF" >> $GITHUB_OUTPUT
echo "$BODY" | envsubst >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
with:
add-paths: ./Cargo.lock
commit-message: ${{ steps.msg.outputs.commit_message }}
title: ${{ env.TITLE }}
body: ${{ steps.msg.outputs.body }}
branch: ${{ env.BRANCH }}
31 changes: 31 additions & 0 deletions .github/workflows/docs.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: Docs

on:
push:
branches: [master]

env:
CARGO_TERM_COLOR: always

jobs:
docs:
name: Generate docs
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- run: cargo doc --document-private-items
env:
RUSTDOCFLAGS: --show-type-layout --enable-index-page -Zunstable-options -D warnings
- name: Compress docs artifacts
run: tar czf vertex-core-docs.tar.gz target/doc
- name: Archive docs
uses: actions/upload-artifact@v4
if: always()
with:
name: docs
path: vertex-core-docs.tar.gz
70 changes: 70 additions & 0 deletions .github/workflows/functional.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
name: Run functional tests

on:
pull_request:
merge_group:
push:
branches: [master]

env:
CARGO_TERM_COLOR: always

jobs:
run-functional-tests:
name: Run functional tests
runs-on: ubuntu-latest
timeout-minutes: 30

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "^3.10" # Keep in sync with `pyproject.toml`

- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python -
echo "${HOME}/.local/bin" >> $GITHUB_PATH
- name: Configure Poetry to create the virtual environment inside the project
run: poetry config virtualenvs.in-project true

- name: Install python dependencies
run: cd functional-tests && poetry install --no-root && cd -

- name: Set up Rust
uses: dtolnay/rust-toolchain@nightly
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

- name: Build Cargo project
run: cargo build

- name: Install bitcoind
run: |
wget https://bitcoin.org/bin/bitcoin-core-27.0/bitcoin-27.0-x86_64-linux-gnu.tar.gz
tar xzf bitcoin-27.0-x86_64-linux-gnu.tar.gz
sudo install -m 0755 -t /usr/local/bin bitcoin-27.0/bin/*
bitcoind --version
- name: Run functional tests (1)
id: funcTestsRun1
continue-on-error: true
run: |
export PATH=$(realpath target/debug/):$PATH
which alpen-vertex-sequencer
cd functional-tests && \
poetry run python entry.py
# Run again just to be sure as some tests are flaky
- name: Run functional tests (2)
if: steps.funcTestsRun1.outcome == 'failure'
run: |
export PATH=$(realpath target/debug/):$PATH
which alpen-vertex-sequencer
cd functional-tests && \
poetry run python entry.py
Loading

0 comments on commit 1191dc4

Please sign in to comment.