Skip to content

fixup! Restrict number of schedulers when running tests #1555

fixup! Restrict number of schedulers when running tests

fixup! Restrict number of schedulers when running tests #1555

Workflow file for this run

name: "Arweave Tests"
on:
workflow_dispatch:
push:
branches: ["**"]
jobs:
build:
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
with:
submodules: "recursive"
- name: Build arweave test sources
run: |
rm -rf _build || true
./ar-rebar3 test compile
chmod -R u+w ./_build
tar --dereference -czf /tmp/arweave_build_${{ github.run_id }}.tar.gz _build
eunit-tests:
needs: build
runs-on: self-hosted
strategy:
fail-fast: true
max-parallel: 12
matrix:
core_test_mod: [
## Long-running tests. Put these first to limit the overall runtime of the
## test suite
ar_coordinated_mining_tests,
ar_data_sync_tests,
ar_fork_recovery_tests,
ar_multiple_txs_per_wallet_tests,
ar_packing_tests,
ar_poa,
ar_vdf_server_tests,
ar_post_block_tests,
## Modules containing tests
ar,
ar_block,
ar_block_cache,
ar_chunk_storage,
ar_data_sync_worker_master,
ar_deep_hash,
ar_diff_dag,
ar_ets_intervals,
ar_events,
ar_inflation,
ar_intervals,
ar_join,
ar_kv,
ar_merkle,
ar_mining_server,
ar_mining_stats,
ar_node,
ar_node_utils,
ar_nonce_limiter,
# ar_p3,
# ar_p3_config,
# ar_p3_db,
ar_packing_server,
ar_patricia_tree,
ar_peers,
ar_pricing,
ar_retarget,
ar_serialize,
ar_storage_module,
ar_storage,
ar_sync_buckets,
ar_tx,
ar_tx_db,
ar_unbalanced_merkle,
ar_util,
ar_wallet,
ar_webhook,
ar_pool,
## Test modules (note: that _tests are implicitly run by a matching prefix name
ar_base64_compatibility_tests,
ar_config_tests,
ar_difficulty_tests,
ar_header_sync_tests,
ar_http_iface_tests,
ar_http_util_tests,
ar_mempool_tests,
ar_mine_randomx_tests,
ar_mine_vdf_tests,
ar_mining_io_tests,
ar_poller_tests,
ar_reject_chunks_tests,
ar_semaphore_tests,
ar_tx_blacklist_tests,
ar_tx_replay_pool_tests,
ar_vdf_tests,
]
steps:
- uses: actions/checkout@v4
with:
submodules: "recursive"
- name: Extract build artifact
run: |
if [ ! -d "_build_${{ github.run_id }}" ]; then
rm -rf _build || true
tar -xzf /tmp/arweave_build_${{ github.run_id }}.tar.gz
mv ./_build ./_build_${{ github.run_id }}
fi
- name: Restart epmd
run: |
if ! pgrep -x "epmd" > /dev/null
then
echo "Starting epmd"
epmd -relaxed_command_check -daemon
fi
- name: ${{ matrix.core_test_mod }}.erl
id: tests
run: |
rm -f *.out || true
EXIT_CODE=0
export PATH=$(pwd)/_build_${{ github.run_id }}/erts/bin:$PATH
export ERL_EPMD_ADDRESS=127.0.0.1
export TIMESTAMP_IN_MILLISECONDS=$(date +%s%3N)
export NAMESPACE="${{ matrix.core_test_mod }}_${TIMESTAMP_IN_MILLISECONDS}"
export ERL_TEST_OPTS="-pa $(echo $(pwd)/_build_${{ github.run_id }}/test/lib/*/ebin) $(pwd)/_build_${{ github.run_id }}/test/lib/arweave/test -config $(pwd)/config/sys.config"
RETRYABLE=1
while [[ $RETRYABLE -eq 1 ]]; do
RETRYABLE=0
set +e
set -x
erl +S 8:8 $ERL_TEST_OPTS -noshell -name main-${NAMESPACE}@127.0.0.1 -setcookie ${{ matrix.core_test_mod }} -run ar tests "${{ matrix.core_test_mod }}" -s init stop 2>&1 | tee main.out
EXIT_CODE=${PIPESTATUS[0]}
set +x
set -e
# For debugging purposes, print the peer1 output if the tests failed
if [[ $EXIT_CODE -ne 0 ]]; then
echo -e "\033[0;32m===> Checking for retry\033[0m"
if ls peer1-*.out 1> /dev/null 2>&1; then
first_line_peer1=$(head -n 1 peer1-*.out)
fi
first_line_main=$(head -n 1 main.out)
echo -e "\033[0;31m===> First line of peer1 node's output: $first_line_peer1\033[0m"
echo -e "\033[0;31m===> First line of main node's output: $first_line_main\033[0m"
# Check if it is a retryable error
if [[ "$first_line_peer1" == "Protocol 'inet_tcp': register/listen error: "* ]]; then
echo "Retrying test because of inet_tcp error..."
RETRYABLE=1
sleep 1
elif [[ "$first_line_peer1" == "Protocol 'inet_tcp': the name"* ]]; then
echo "Retrying test because of inet_tcp clash..."
RETRYABLE=1
sleep 1
elif [[ "$first_line_main" == *"econnrefused"* ]]; then
echo "Retrying test because of econnrefused..."
RETRYABLE=1
sleep 1
else
if ls peer1-*.out 1> /dev/null 2>&1; then
echo -e "\033[0;31m===> Test failed, printing the peer1 node's output...\033[0m"
cat peer1-*.out
else
echo -e "\033[0;31m===> Test failed without peer1 output...\033[0m"
fi
if ls peer2-*.out 1> /dev/null 2>&1; then
echo -e "\033[0;31m===> Test failed, printing the peer2 node's output...\033[0m"
cat peer2-*.out
else
echo -e "\033[0;31m===> Test failed without peer2 output...\033[0m"
fi
if ls peer3-*.out 1> /dev/null 2>&1; then
echo -e "\033[0;31m===> Test failed, printing the peer3 node's output...\033[0m"
cat peer3-*.out
else
echo -e "\033[0;31m===> Test failed without peer3 output...\033[0m"
fi
if ls peer4-*.out 1> /dev/null 2>&1; then
echo -e "\033[0;31m===> Test failed, printing the peer4 node's output...\033[0m"
cat peer4-*.out
else
echo -e "\033[0;31m===> Test failed without peer4 output...\033[0m"
fi
fi
fi
done
echo "exit_code=$EXIT_CODE" >> $GITHUB_ENV # Set the exit_code output variable using Environment Files
exit $EXIT_CODE # exit with the exit code of the tests
- name: Cleanup successful test
if: steps.tests.outputs.exit_code == '0' # Conditional based on the output variable
run: rm -rf logs *.out .tmp
cleanup:
needs: eunit-tests
runs-on: self-hosted
if: ${{ success() && needs.eunit-tests.result == 'success' }}
steps:
- name: Cleanup build directories
run: |
rm -rf /tmp/arweave_build_${{ github.run_id }}.tar.gz
find /home/runner -type d -name "_build_${{ github.run_id }}" -exec rm -rf {} +
- name: Cleanup old build artifacts
run: |
find /tmp -maxdepth 1 -name "arweave_build_*" -type f -mtime +7 -exec rm -f {} +
find /home/runner -name "_build_*" -type d -mtime +7 -exec rm -rf {} +