Skip to content

update yaml merge key error compose > v2.7 #12

update yaml merge key error compose > v2.7

update yaml merge key error compose > v2.7 #12

Workflow file for this run

name: Pull, build, push, test
on: [push]
jobs:
pull_build_push:
name: Pull, build, push
runs-on: ubuntu-20.04
if: "!contains(github.event.head_commit.message, 'ci skip')"
steps:
- name: Check out repository
uses: actions/checkout@v1
with:
submodules: recursive
- name: Clean up disk space
run: ./.github/free-up-disk-space.sh
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Upgrade Pip / setuptools
run: pip install -U pip setuptools wheel
- name: Update APT listing
run: sudo apt-get -y update
- name: Install parallel
run: sudo apt-get -y install parallel
# We need it as a Python module
- name: Install docker-compose
run: pip install docker-compose
# Install PyYAML for docker-compose.yml validation
- name: Install PyYAML
run: pip install PyYAML
# FIXME upgrade Docker?
- name: Print kernel and Docker information
run: |
uname -a
docker version
docker-compose version
- name: Log in to container registry
run: echo "${DOCKER_GCR_SERVICE_ACCOUNT}" | docker login -u _json_key --password-stdin https://gcr.io
env:
DOCKER_GCR_SERVICE_ACCOUNT: ${{ secrets.DOCKER_GCR_SERVICE_ACCOUNT }}
- name: Pull images
# Don't stop on a single failure because the image might not exist or a
# network error might have happened
run: ./dev/pull.py || { echo "One or more images couldn't be pulled"; }
- name: Build images
# Prune images after every rebuild because otherwise CI instance might run
# out of disk space on bigger rebuilds
run: ./dev/build.py --prune_images
- name: Push images
run: ./dev/push.py
run_tests:
name: Run tests
runs-on: ubuntu-20.04
needs: pull_build_push
if: "!contains(github.event.head_commit.message, 'ci skip')"
env:
# Keep in sync with "strategy/matrix/chunk_number"
TEST_CHUNK_COUNT: 5
strategy:
# Try running all tests even if some of them fail
fail-fast: false
matrix:
# Keep in sync with "env/TEST_CHUNK_COUNT"
chunk_number: [1, 2, 3, 4, 5]
steps:
- name: Check out repository
uses: actions/checkout@v1
with:
submodules: recursive
- name: Clean up disk space
run: ./.github/free-up-disk-space.sh
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Upgrade Pip / setuptools
run: pip install -U pip setuptools wheel
- name: Install parallel
run: sudo apt-get -y install parallel
# We need it as a Python module
- name: Install docker-compose
run: pip install docker-compose
# Install PyYAML for docker-compose.yml validation
- name: Install PyYAML
run: pip install PyYAML
# FIXME upgrade Docker?
- name: Print kernel and Docker information
run: |
uname -a
docker version
docker-compose version
- name: Log in to container registry
run: echo "${DOCKER_GCR_SERVICE_ACCOUNT}" | docker login -u _json_key --password-stdin https://gcr.io
env:
DOCKER_GCR_SERVICE_ACCOUNT: ${{ secrets.DOCKER_GCR_SERVICE_ACCOUNT }}
- name: Pull images
# Don't stop on a single failure because the image might not exist or a
# network error might have happened
run: ./dev/pull.py || { echo "One or more images couldn't be pulled"; }
# List all tests (commands) to be run into a file
- name: Enumerate all tests
run: ./dev/run_all_tests.py --print_commands > tests_all_ordered
# Randomize test order so that both chunks run for about the same amount of
# time; use a fixed seed to make it predictable which particular chunk of tests
# every test will run on
- name: Randomize test order with fixed seed
run: >-
shuf
--random-source=<(openssl enc -aes-256-ctr -pass pass:"42" -nosalt </dev/zero 2>/dev/null)
tests_all_ordered > tests_all &&
rm tests_all_ordered
- name: Split tests into equal parts
# Assuming that there won't be more than 9 chunks here
run: >-
split
--number=l/$TEST_CHUNK_COUNT
--numeric-suffixes=1
--suffix-length=1
tests_all
tests_chunk_
- name: Print tests that are going to be run in this chunk
run: cat tests_chunk_${{ matrix.chunk_number }}
# Run a selected chunk of tests in parallel, keep a log; on error, print out said log
- name: Run tests
run: >-
cat tests_chunk_${{ matrix.chunk_number }}
| sort
| parallel --timeout 600 --group --joblog joblog.txt
|| { cat joblog.txt && exit 1; }
env:
# Map secrets to environment variables
MC_BRANDWATCH_PASSWORD: ${{ secrets.MC_BRANDWATCH_PASSWORD }}
MC_BRANDWATCH_USER: ${{ secrets.MC_BRANDWATCH_USER }}
MC_CRIMSON_HEXAGON_API_KEY: ${{ secrets.MC_CRIMSON_HEXAGON_API_KEY }}
MC_DOWNLOADS_AMAZON_S3_ACCESS_KEY_ID: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_ACCESS_KEY_ID }}
MC_DOWNLOADS_AMAZON_S3_BUCKET_NAME: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_BUCKET_NAME }}
MC_DOWNLOADS_AMAZON_S3_DIRECTORY_NAME: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_DIRECTORY_NAME }}
MC_DOWNLOADS_AMAZON_S3_SECRET_ACCESS_KEY: ${{ secrets.MC_DOWNLOADS_AMAZON_S3_SECRET_ACCESS_KEY }}
MC_FACEBOOK_APP_ID: ${{ secrets.MC_FACEBOOK_APP_ID }}
MC_FACEBOOK_APP_SECRET: ${{ secrets.MC_FACEBOOK_APP_SECRET }}
MC_PODCAST_FETCH_EPISODE_BUCKET_NAME: ${{ secrets.MC_PODCAST_FETCH_EPISODE_BUCKET_NAME }}
MC_PODCAST_FETCH_TRANSCRIPT_RUN_COSTLY_TEST: ${{ secrets.MC_PODCAST_FETCH_TRANSCRIPT_RUN_COSTLY_TEST }}
MC_PODCAST_GC_AUTH_JSON_BASE64: ${{ secrets.MC_PODCAST_GC_AUTH_JSON_BASE64 }}
MC_TWITTER_ACCESS_TOKEN: ${{ secrets.MC_TWITTER_ACCESS_TOKEN }}
MC_TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.MC_TWITTER_ACCESS_TOKEN_SECRET }}
MC_TWITTER_CONSUMER_KEY: ${{ secrets.MC_TWITTER_CONSUMER_KEY }}
MC_TWITTER_CONSUMER_SECRET: ${{ secrets.MC_TWITTER_CONSUMER_SECRET }}