diff --git a/.github/jobs/baseinstall.sh b/.github/jobs/baseinstall.sh index 55fdf0ccf2..165ea73623 100755 --- a/.github/jobs/baseinstall.sh +++ b/.github/jobs/baseinstall.sh @@ -24,14 +24,25 @@ section_end section_start "Install domserver" make configure -./configure \ - --with-baseurl='https://localhost/domjudge/' \ - --with-domjudge-user=root \ - --enable-doc-build=no \ - --enable-judgehost-build=no | tee "$ARTIFACTS"/configure.txt - -make domserver -make install-domserver +if [ "$version" = "all" ]; then + # Note that we use http instead of https here as python requests doesn't + # like our self-signed cert. We should fix this separately. + ./configure \ + --with-baseurl='http://localhost/domjudge/' \ + --with-domjudge-user=domjudge \ + --with-judgehost-chrootdir=/chroot/domjudge | tee "$ARTIFACTS"/configure.txt + make build-scripts domserver judgehost docs + make install-domserver install-judgehost install-docs +else + ./configure \ + --with-baseurl='https://localhost/domjudge/' \ + --with-domjudge-user=root \ + --enable-doc-build=no \ + --enable-judgehost-build=no | tee "$ARTIFACTS"/configure.txt + make domserver + make install-domserver +fi + section_end section_start "SQL settings" @@ -116,6 +127,10 @@ elif [ "$version" = "balloon" ]; then elif [ "$version" = "admin" ]; then # Add admin to admin user mysql_root "INSERT INTO userrole (userid, roleid) VALUES (1, 1);" domjudge +elif [ "$version" = "all" ]; then + mysql_root "INSERT INTO userrole (userid, roleid) VALUES (1, 1);" domjudge + mysql_root "INSERT INTO userrole (userid, roleid) VALUES (1, 3);" domjudge + mysql_root "UPDATE user SET teamid = 1 WHERE userid = 1;" domjudge fi section_end diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml new file mode 100644 index 0000000000..c1a2c61e16 --- /dev/null +++ b/.github/workflows/integration.yml @@ -0,0 +1,140 @@ +name: Run integration tests +on: + push: + branches-ignore: + - main + - '[0-9]+.[0-9]+' + - gh-readonly-queue/main/* + - gh-readonly-queue/main/[0-9]+.[0-9]+ + pull_request: + branches: + - main + - '[0-9]+.[0-9]+' + +jobs: + integration: + runs-on: ubuntu-24.04 + container: + image: domjudge/gitlabci:24.04 + options: --privileged --cgroupns=host --init + services: + sqlserver: + image: mariadb + ports: + - 3306:3306 + env: + MYSQL_ROOT_PASSWORD: root + MYSQL_USER: domjudge + MYSQL_PASSWORD: domjudge + options: --health-cmd="healthcheck.sh --connect --innodb_initialized" --health-interval=10s --health-timeout=5s --health-retries=3 + steps: + - uses: actions/checkout@v4 + - name: info + run: | + cat /proc/cmdline && echo && + cat /proc/mounts && echo && + ls -al /sys/fs/cgroup && echo && + uname -a && echo && + stat -fc %T /sys/fs/cgroup && echo && + cat /proc/self/cgroup && echo && + cat /proc/cpuinfo + - name: pstree + run: pstree -p + - name: Install DOMjudge + run: .github/jobs/baseinstall.sh all + - name: Set up chroot + run: sudo misc-tools/dj_make_chroot -a amd64 + - name: Check nginx + run: curl -v https://localhost/domjudge/ + - name: Testing submit client + working-directory: submit + run: make check-full + - name: Configure judgehost + run: sudo cp /opt/domjudge/judgehost/etc/sudoers-domjudge /etc/sudoers.d/ && sudo chmod 400 /etc/sudoers.d/sudoers-domjudge && cat /opt/domjudge/judgehost/etc/sudoers-domjudge + - name: Create user + run: sudo userdel -f -r domjudge-run-0 ; sudo useradd -d /nonexistent -g nogroup -s /bin/false -u 2222 domjudge-run-0 + - name: Start judging + run: sudo -u domjudge sh -c 'cd /opt/domjudge/judgehost/ && nohup bin/judgedaemon -n 0 &' + - name: Import Kattis example problems + run: | + cd /tmp + git clone --depth=1 https://github.com/Kattis/problemtools.git + cd problemtools/examples + mv hello hello_kattis + # Remove 2 submissions that will not pass validation. The first is because it is + # a Python 2 submission. The latter has a judgement type we do not understand. + rm different/submissions/accepted/different_py2.py different/submissions/slow_accepted/different_slow.py + for i in hello_kattis different guess; do + ( + cd "$i" + zip -r "../${i}.zip" -- * + ) + curl --fail -X POST -n -N -F zip=@${i}.zip http://localhost/domjudge/api/contests/demo/problems + done + - name: Monitor judgehost log and stop once all submissions are judged + run: | + tail -f /opt/domjudge/judgehost/log/judge*-0.log | while read line; do + echo "$line" + grep "No submissions in queue" /opt/domjudge/judgehost/log/judge*-0.log && break + done + - name: dump the db + run: mysqldump -uroot -proot domjudge > /tmp/db.sql + - name: Upload artifact for debugging + uses: actions/upload-artifact@v3 + with: + name: DB-dump + path: /tmp/db.sql + - name: Verifying submissions + shell: bash + run: | + set -x + export CURLOPTS="--fail -sq -m 30 -b /tmp/cookiejar" + # Make an initial request which will get us a session id, and grab the csrf token from it + CSRFTOKEN=$(curl $CURLOPTS -c /tmp/cookiejar "http://localhost/domjudge/login" | sed -n 's/.*_csrf_token.*value="\(.*\)".*/\1/p') + # Make a second request with our session + csrf token to actually log in + curl $CURLOPTS -c /tmp/cookiejar -F "_csrf_token=$CSRFTOKEN" -F "_username=admin" -F "_password=password" "http://localhost/domjudge/login" + # Send a general clarification to later test if we see the event. + curl $CURLOPTS -F "sendto=" -F "problem=1-" -F "bodytext=Testing" -F "submit=Send" \ + "http://localhost/domjudge/jury/clarifications/send" -o /dev/null + curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier?verify_multiple=1" -o /dev/null + NUMNOTVERIFIED=$(curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier" | grep "submissions checked" | sed -r 's/^.* ([0-9]+) submissions checked.*$/\1/') + NUMVERIFIED=$( curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier" | grep "submissions not checked" | sed -r 's/^.* ([0-9]+) submissions not checked.*$/\1/') + NUMNOMAGIC=$( curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier" | grep "without magic string" | sed -r 's/^.* ([0-9]+) without magic string.*$/\1/') + NUMSUBS=$(curl $CURLOPTS http://localhost/domjudge/api/contests/demo/submissions | python3 -mjson.tool | grep -c '"id":') + # We expect + # - two submissions with ambiguous outcome, + # - one submissions submitted through the submit client, and thus the magic string ignored, + # - and all submissions to be judged. + if [ $NUMNOTVERIFIED -ne 2 ] || [ $NUMNOMAGIC -ne 1 ] || [ $NUMSUBS -gt $((NUMVERIFIED+NUMNOTVERIFIED)) ]; then + echo "verified subs: $NUMVERIFIED, unverified subs: $NUMNOTVERIFIED, total subs: $NUMSUBS" + echo "(expected 2 submissions to be unverified, but all to be processed)" + echo "Of these $NUMNOMAGIC do not have the EXPECTED_RESULTS string (should be 1)." + curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier?verify_multiple=1" | w3m -dump -T text/html + exit 1 + fi + - name: Finalize contest so that awards appear in the feed + shell: bash + run: | + set -x + export CURLOPTS="--fail -m 30 -b $COOKIEJAR" + curl $CURLOPTS http://localhost/domjudge/jury/contests/1/freeze/doNow || true + curl $CURLOPTS http://localhost/domjudge/jury/contests/1/end/doNow || true + curl $CURLOPTS -X POST -d 'finalize_contest[b]=0&finalize_contest[finalizecomment]=gitlab&finalize_contest[finalize]=' http://localhost/domjudge/jury/contests/1/finalize + - name: Verify no errors in prod.log + shell: bash + run: | + if cat /opt/domjudge/domserver/webapp/var/log/prod.log | egrep '(CRITICAL|ERROR):'; then + exit 1 + fi + - name: Download and perform API check + shell: bash + run: | + cd $HOME + curl -o yajsv https://github.com/neilpa/yajsv/releases/download/v1.4.1/yajsv.linux.amd64 + chmod a+x yajsv + echo -e "\033[0m" + git clone https://github.com/icpc/ccs-specs.git + export CCS_SPECS_PINNED_SHA1='a68aff54c4e60fc2bff2fc5c36c119bffa4d30f1' + ( cd ccs-specs && git reset --hard $CCS_SPECS_PINNED_SHA1 ) + export CHECK_API="${HOME}/ccs-specs/check-api.sh -j ${HOME}/yajsv" + $CHECK_API -n -C -e -a 'strict=1' http://admin:password@localhost/domjudge/api diff --git a/.github/workflows/runpipe.yml b/.github/workflows/runpipe.yml index 415510c2c3..804e721cfa 100644 --- a/.github/workflows/runpipe.yml +++ b/.github/workflows/runpipe.yml @@ -1,4 +1,4 @@ -name: Run runpipe tests +name: Run runpipe and runguard tests on: push: branches-ignore: @@ -13,11 +13,14 @@ on: jobs: runpipe: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 container: image: domjudge/gitlabci:24.04 + options: --privileged --cgroupns=host --init steps: - uses: actions/checkout@v4 + - name: info + run: cat /proc/cmdline && echo && cat /proc/mounts && echo && ls -al /sys/fs/cgroup && echo && uname -a && echo && stat -fc %T /sys/fs/cgroup && echo && cat /proc/self/cgroup - name: Create the configure file run: make configure - name: Do the default configure @@ -27,4 +30,14 @@ jobs: - name: Run the actual runpipe tests working-directory: judge/runpipe_test run: make test + - name: Add user/group + run: sudo addgroup domjudge-run-0 && sudo usermod -g domjudge-run-0 domjudge-run-0 + - name: Create dir + run: mkdir -p /opt/domjudge/judgehost/tmp/ + - name: Run the actual runguard tests + working-directory: judge/runguard_test + env: + judgehost_tmpdir: /tmp + judgehost_judgedir: /tmp + run: make test diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f5e87061a9..18ef56d2a9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,12 +1,10 @@ include: - '/gitlab/ci/unit.yml' - - '/gitlab/ci/integration.yml' - '/gitlab/ci/template.yml' - '/gitlab/ci/misc.yml' stages: - test - - integration - chroot_checks - unit - style diff --git a/doc/manual/install-judgehost.rst b/doc/manual/install-judgehost.rst index 4066cad4ee..9fd6d02c5a 100644 --- a/doc/manual/install-judgehost.rst +++ b/doc/manual/install-judgehost.rst @@ -179,16 +179,9 @@ Optionally the timings can be made more stable by not letting the OS schedule any other tasks on the same CPU core the judgedaemon is using: ``GRUB_CMDLINE_LINUX_DEFAULT="quiet cgroup_enable=memory swapaccount=1 isolcpus=2"`` -On modern distros (e.g. Debian bullseye and Ubuntu Jammy Jellyfish) which have -cgroup v2 enabled by default, you need to add ``systemd.unified_cgroup_hierarchy=0`` -as well. Then run ``update-grub`` and reboot. -After rebooting check that ``/proc/cmdline`` actually contains the -added kernel options. On VM hosting providers such as Google Cloud or -DigitalOcean, ``GRUB_CMDLINE_LINUX_DEFAULT`` may be overwritten -by other files in ``/etc/default/grub.d/``. - You have now configured the system to use cgroups. To create -the actual cgroups that DOMjudge will use, run:: +the actual cgroups that DOMjudge will use, on systems with cgroups v1, you need +to run:: sudo systemctl enable create-cgroups --now @@ -197,7 +190,8 @@ Note that this service will automatically be started if you use the customize the script ``judge/create_cgroups`` as required and run it after each boot. -The script `jvm_footprint` can be used to measure the memory overhead of the JVM for languages such as Kotlin and Java. +The script `jvm_footprint` can be used to measure the memory overhead of the +JVM for languages such as Kotlin and Java. REST API credentials diff --git a/gitlab/ci/integration.yml b/gitlab/ci/integration.yml deleted file mode 100644 index d4152ab687..0000000000 --- a/gitlab/ci/integration.yml +++ /dev/null @@ -1,64 +0,0 @@ -.integration_job: - extends: [.long_job,.cached_vendor] - stage: integration - script: - - set -eux - - if [ -z ${PHPVERSION+x} ]; then export PHPVERSION=8.3; fi - - if [ "$TEST" = "E2E" ]; then exit 0; fi - - if [ "$CRAWL_SHADOW_MODE" != "0" ]; then exit 0; fi - - timeout --signal=15 40m ./gitlab/integration.sh $PHPVERSION - artifacts: - when: always - paths: - - gitlabartifacts - -# TODO: Re-enable when gitlab is in better shape... -# cache: -# key: integration -# paths: -# - chroot - -integration_mysql: - only: - - main - - /^[0-9].[0-9]$/ - extends: [.mysql_job,.integration_job] - variables: - MYSQL_ROOT_PASSWORD: password - MARIADB_PORT_3306_TCP_ADDR: sqlserver - MYSQL_REQUIRE_PRIMARY_KEY: 1 - PIN_JUDGEDAEMON: 1 - TEST: "Unit" - CRAWL_SHADOW_MODE: "0" - -integration_mariadb_pr: - except: - - main - - /^[0-9].[0-9]$/ - extends: [.mariadb_job,.phpsupported_job_pr,.integration_job] - variables: - MYSQL_ROOT_PASSWORD: password - MARIADB_PORT_3306_TCP_ADDR: sqlserver - PIN_JUDGEDAEMON: 1 - -integration_mariadb: - only: - - main - - /^[0-9].[0-9]$/ - extends: [.mariadb_job,.phpsupported_job,.integration_job] - variables: - MYSQL_ROOT_PASSWORD: password - MARIADB_PORT_3306_TCP_ADDR: sqlserver - PIN_JUDGEDAEMON: 1 - -integration_unpinned_judgehost: - only: - - main - - /^[0-9].[0-9]$/ - extends: [.mariadb_job,.integration_job] - variables: - MYSQL_ROOT_PASSWORD: password - MARIADB_PORT_3306_TCP_ADDR: sqlserver - PIN_JUDGEDAEMON: 0 - TEST: "Unit" - CRAWL_SHADOW_MODE: "0" diff --git a/gitlab/integration.sh b/gitlab/integration.sh deleted file mode 100755 index 6d74f2cad9..0000000000 --- a/gitlab/integration.sh +++ /dev/null @@ -1,288 +0,0 @@ -#!/bin/bash - -. gitlab/ci_settings.sh - -export version=$1 - -show_phpinfo $version - -function finish() { - echo -e "\\n\\n=======================================================\\n" - echo "Storing artifacts..." - trace_on - set +e - mysqldump domjudge > "$GITLABARTIFACTS/db.sql" - cp /var/log/nginx/domjudge.log "$GITLABARTIFACTS/nginx.log" - cp /opt/domjudge/domserver/webapp/var/log/prod.log "$GITLABARTIFACTS/symfony.log" - cp /opt/domjudge/domserver/webapp/var/log/prod.log.errors "$GITLABARTIFACTS/symfony_errors.log" - cp /tmp/judgedaemon.log "$GITLABARTIFACTS/judgedaemon.log" - cp /proc/cmdline "$GITLABARTIFACTS/cmdline" - CHROOTDIR=/chroot/domjudge - if [ -n "${CI+x}" ]; then - CHROOTDIR=${DIR}${CHROOTDIR} - fi - cp $CHROOTDIR/etc/apt/sources.list "$GITLABARTIFACTS/sources.list" - cp $CHROOTDIR/debootstrap/debootstrap.log "$GITLABARTIFACTS/debootstrap.log" - cp "${DIR}"/misc-tools/icpctools/*json "$GITLABARTIFACTS/" -} -trap finish EXIT - -export integration=1 -section_start setup "Setup and install" - -# Set up -"$( dirname "${BASH_SOURCE[0]}" )"/base.sh - -# Add jury to demo user -echo "INSERT INTO userrole (userid, roleid) VALUES (3, 2);" | mysql domjudge - -# Add netrc file for demo user login -echo "machine localhost login demo password demo" > ~/.netrc - -cd /opt/domjudge/domserver - -# This needs to be done before we do any submission. -# 8 hours as a helper so we can adjust contest start/endtime -TIMEHELP=$((8*60*60)) -UNIX_TIMESTAMP=$(date +%s) -STARTTIME=$((UNIX_TIMESTAMP-TIMEHELP)) -export TZ="Europe/Amsterdam" -STARTTIME_STRING="$(date -d @$STARTTIME +'%F %T Europe/Amsterdam')" -FREEZETIME=$((UNIX_TIMESTAMP+TIMEHELP)) -FREEZETIME_STRING="$(date -d @$FREEZETIME +'%F %T Europe/Amsterdam')" -ENDTIME=$((UNIX_TIMESTAMP+TIMEHELP+TIMEHELP)) -ENDTIME_STRING="$(date -d @$ENDTIME +'%F %T Europe/Amsterdam')" -# Database changes to make the REST API and event feed match better. -cat </dev/null; then - userdel -f -r $RUN_USER -fi - -sudo useradd -d /nonexistent -g nogroup -s /bin/false -u $((2000+(RANDOM%1000))) $RUN_USER - -# Since ubuntu20.04 gitlabci image this is sometimes needed -# It should be safe to remove this when it creates issues -set +e -mount -t proc proc /proc -set -e - -if [ $PIN_JUDGEDAEMON -eq 1 ]; then - PINNING="-n 0" -fi -section_end more_setup - -if [ $cgroupv1 -ne 0 ]; then - section_start runguard_tests "Running isolated runguard tests" - sudo addgroup domjudge-run-0 - sudo usermod -g domjudge-run-0 domjudge-run-0 - cd ${DIR}/judge/runguard_test - make test - section_end runguard_tests -fi - -if [ $cgroupv1 -ne 0 ]; then - section_start start_judging "Start judging" - cd /opt/domjudge/judgehost/ - - sudo -u domjudge bin/judgedaemon $PINNING |& tee /tmp/judgedaemon.log & - sleep 5 - section_end start_judging -fi - -section_start submitting "Importing Kattis examples" -export SUBMITBASEURL='http://localhost/domjudge/' - -# Prepare to load example problems from Kattis/problemtools -echo "INSERT INTO userrole (userid, roleid) VALUES (3, 1);" | mysql domjudge -cd /tmp -git clone --depth=1 https://github.com/Kattis/problemtools.git -cd problemtools/examples -mv hello hello_kattis -# Remove 2 submissions that will not pass validation. The first is because it is -# a Python 2 submission. The latter has a judgement type we do not understand. -rm different/submissions/accepted/different_py2.py different/submissions/slow_accepted/different_slow.py -for i in hello_kattis different guess; do - ( - cd "$i" - zip -r "../${i}.zip" -- * - ) - curl --fail -X POST -n -N -F zip=@${i}.zip http://localhost/domjudge/api/contests/demo/problems -done -section_end submitting - -section_start curlcookie "Preparing cookie jar for curl" -export COOKIEJAR -COOKIEJAR=$(mktemp --tmpdir) -export CURLOPTS="--fail -sq -m 30 -b $COOKIEJAR" - -# Make an initial request which will get us a session id, and grab the csrf token from it -CSRFTOKEN=$(curl $CURLOPTS -c $COOKIEJAR "http://localhost/domjudge/login" 2>/dev/null | sed -n 's/.*_csrf_token.*value="\(.*\)".*/\1/p') -# Make a second request with our session + csrf token to actually log in -curl $CURLOPTS -c $COOKIEJAR -F "_csrf_token=$CSRFTOKEN" -F "_username=admin" -F "_password=$ADMINPASS" "http://localhost/domjudge/login" - -# Send a general clarification to later test if we see the event. -curl $CURLOPTS -F "sendto=" -F "problem=1-" -F "bodytext=Testing" -F "submit=Send" \ - "http://localhost/domjudge/jury/clarifications/send" -o /dev/null - -section_end curlcookie - -if [ $cgroupv1 -ne 0 ]; then - section_start judging "Waiting until all submissions are judged" - # wait for and check results - NUMSUBS=$(curl --fail http://admin:$ADMINPASS@localhost/domjudge/api/contests/demo/submissions | python3 -mjson.tool | grep -c '"id":') - - # Don't spam the log. - set +x - - while /bin/true; do - sleep 30s - curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier?verify_multiple=1" -o /dev/null - - # Check if we are done, i.e. everything is judged or something got disabled by internal error... - if tail /tmp/judgedaemon.log | grep -q "No submissions in queue"; then - break - fi - # ... or something has crashed. - if ! pgrep -f judgedaemon; then - break - fi - done - - NUMNOTVERIFIED=$(curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier" | grep "submissions checked" | sed -r 's/^.* ([0-9]+) submissions checked.*$/\1/') - NUMVERIFIED=$( curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier" | grep "submissions not checked" | sed -r 's/^.* ([0-9]+) submissions not checked.*$/\1/') - NUMNOMAGIC=$( curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier" | grep "without magic string" | sed -r 's/^.* ([0-9]+) without magic string.*$/\1/') - section_end judging - - # We expect - # - two submissions with ambiguous outcome, - # - one submissions submitted through the submit client, and thus the magic string ignored, - # - and all submissions to be judged. - if [ $NUMNOTVERIFIED -ne 2 ] || [ $NUMNOMAGIC -ne 1 ] || [ $NUMSUBS -gt $((NUMVERIFIED+NUMNOTVERIFIED)) ]; then - section_start error "Short error description" - # We error out below anyway, so no need to fail earlier than that. - set +e - echo "verified subs: $NUMVERIFIED, unverified subs: $NUMNOTVERIFIED, total subs: $NUMSUBS" - echo "(expected 2 submissions to be unverified, but all to be processed)" - echo "Of these $NUMNOMAGIC do not have the EXPECTED_RESULTS string (should be 1)." - curl $CURLOPTS "http://localhost/domjudge/jury/judging-verifier?verify_multiple=1" | w3m -dump -T text/html - section_end error - - section_start logfiles "All the more or less useful logfiles" - for i in /opt/domjudge/judgehost/judgings/*/*/*/*/*/compile.out; do - echo $i; - head -n 100 $i; - dir=$(dirname $i) - if [ -r $dir/testcase001/system.out ]; then - head $dir/testcase001/system.out - head $dir/testcase001/runguard.err - head $dir/testcase001/program.err - head $dir/testcase001/program.meta - fi - echo; - done - exit 1; - fi -fi - -section_start api_check "Performing API checks" -# Start logging again -set -x - -# Finalize contest so that awards appear in the feed; first freeze and end the -# contest if that has not already been done. -export CURLOPTS="--fail -m 30 -b $COOKIEJAR" -curl $CURLOPTS http://localhost/domjudge/jury/contests/1/freeze/doNow || true -curl $CURLOPTS http://localhost/domjudge/jury/contests/1/end/doNow || true -curl $CURLOPTS -X POST -d 'finalize_contest[b]=0&finalize_contest[finalizecomment]=gitlab&finalize_contest[finalize]=' http://localhost/domjudge/jury/contests/1/finalize - -# shellcheck disable=SC2002,SC2196 -if cat /opt/domjudge/domserver/webapp/var/log/prod.log | egrep '(CRITICAL|ERROR):'; then - exit 1 -fi - -# Check the Contest API: -if [ $cgroupv1 -ne 0 ]; then - $CHECK_API -n -C -e -a 'strict=1' http://admin:$ADMINPASS@localhost/domjudge/api -else - # With cgroup v1 not being available we don't judge, so we cannot do - # consistency checks, so running the above command without -C. - $CHECK_API -n -e -a 'strict=1' http://admin:$ADMINPASS@localhost/domjudge/api -fi -section_end api_check |& tee "$GITLABARTIFACTS/check_api.log" - -section_start validate_feed "Validate the eventfeed against API (ignoring failures)" -cd ${DIR}/misc-tools -./compare-cds.sh http://localhost/domjudge 1 |& tee "$GITLABARTIFACTS/compare_cds.log" || true -section_end validate_feed diff --git a/judge/create_cgroups.in b/judge/create_cgroups.in index 56d1338a31..1b96cfee66 100755 --- a/judge/create_cgroups.in +++ b/judge/create_cgroups.in @@ -9,6 +9,13 @@ JUDGEHOSTUSER=@DOMJUDGE_USER@ CGROUPBASE="/sys/fs/cgroup" +# We do not need to do any of this with cgroup v2. +fs_type=$(awk '$2 == "/sys/fs/cgroup" {print $3}' /proc/mounts) +if [ "$fs_type" = "cgroup2" ]; then + echo "cgroup v2 detected, skipping cgroup creation" >&2 + exit 0 +fi + cgroup_error_and_usage () { echo "$1" >&2 echo "To fix this, please make the following changes: diff --git a/judge/runguard.cc b/judge/runguard.cc index 1dda7d9879..494d933b0c 100644 --- a/judge/runguard.cc +++ b/judge/runguard.cc @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -139,6 +140,7 @@ int show_version; int in_error_handling = 0; pid_t runpipe_pid = -1; +bool is_cgroup_v2 = false; double walltimelimit[2], cputimelimit[2]; /* in seconds, soft and hard limits */ int walllimit_reached, cpulimit_reached; /* 1=soft, 2=hard, 3=both limits reached */ @@ -442,7 +444,11 @@ void output_exit_time(int exitcode, double cpudiff) void check_remaining_procs() { char path[1024]; - snprintf(path, 1023, "/sys/fs/cgroup/cpuacct%scgroup.procs", cgroupname); + if (is_cgroup_v2) { + snprintf(path, 1023, "/sys/fs/cgroup/%scgroup.procs", cgroupname); + } else { + snprintf(path, 1023, "/sys/fs/cgroup/cpuacct/%scgroup.procs", cgroupname); + } FILE *file = fopen(path, "r"); if (file == nullptr) { @@ -456,7 +462,7 @@ void check_remaining_procs() if (fclose(file) != 0) error(errno, "closing file `%s'", path); } -void output_cgroup_stats(double *cputime) +void output_cgroup_stats_v1(double *cputime) { struct cgroup *cg; if ( (cg = cgroup_new_cgroup(cgroupname))==nullptr ) error(0,"cgroup_new_cgroup"); @@ -483,6 +489,45 @@ void output_cgroup_stats(double *cputime) cgroup_free(&cg); } +void output_cgroup_stats_v2(double *cputime) +{ + struct cgroup *cg; + if ( (cg = cgroup_new_cgroup(cgroupname))==NULL ) error(0,"cgroup_new_cgroup"); + + int ret; + if ((ret = cgroup_get_cgroup(cg)) != 0) error(ret,"get cgroup information"); + + struct cgroup_controller *cg_controller = cgroup_get_controller(cg, "memory"); + int64_t max_usage = 0; + ret = cgroup_get_value_int64(cg_controller, "memory.peak", &max_usage); + if ( ret == ECGROUPVALUENOTEXIST ) { + write_meta("internal-warning", "Kernel too old and does not support memory.peak"); + } else if ( ret!=0 ) { + error(ret,"get cgroup value memory.peak"); + } + + // There is no need to check swap usage, as we limit it to 0. + verbose("total memory used: %" PRId64 " kB", max_usage/1024); + write_meta("memory-bytes","%" PRId64, max_usage); + + struct cgroup_stat stat; + void *handle; + ret = cgroup_read_stats_begin("cpu", cgroupname, &handle, &stat); + while (ret == 0) { + verbose("cpu.stat: %s = %s", stat.name, stat.value); + if (strcmp(stat.name, "usage_usec") == 0) { + long long usec = strtoll(stat.value, NULL, 10); + *cputime = usec / 1e6; + } + ret = cgroup_read_stats_next(&handle, &stat); + } + if ( ret!=ECGEOF ) error(ret,"get cgroup value cpu.stat"); + cgroup_read_stats_end(&handle); + + cgroup_free(&cg); + +} + /* Temporary shorthand define for error handling. */ #define cgroup_add_value(type,name,value) \ ret = cgroup_add_value_ ## type(cg_controller, name, value); \ @@ -502,8 +547,19 @@ void cgroup_create() } int ret; - cgroup_add_value(int64, "memory.limit_in_bytes", memsize); - cgroup_add_value(int64, "memory.memsw.limit_in_bytes", memsize); + if (is_cgroup_v2) { + // TODO: do we want to set cpu.weight here as well? + if (memsize != RLIM_INFINITY) { + cgroup_add_value(int64, "memory.max", memsize); + cgroup_add_value(int64, "memory.swap.max", 0); + } else { + cgroup_add_value(string, "memory.max", "max"); + cgroup_add_value(string, "memory.swap.max", "max"); + } + } else { + cgroup_add_value(int64, "memory.limit_in_bytes", memsize); + cgroup_add_value(int64, "memory.memsw.limit_in_bytes", memsize); + } /* Set up cpu restrictions; we pin the task to a specific set of cpus. We also give it exclusive access to those cores, and set @@ -521,8 +577,13 @@ void cgroup_create() verbose("cpuset undefined"); } - if ( (cg_controller = cgroup_add_controller(cg, "cpuacct"))==nullptr ) { - error(0,"cgroup_add_controller cpuacct"); + if (!is_cgroup_v2) { + if ( (cg_controller = cgroup_add_controller(cg, "cpu"))==nullptr ) { + error(0,"cgroup_add_controller cpu"); + } + if ((cg_controller = cgroup_add_controller(cg, "cpuacct")) == nullptr) { + error(0, "cgroup_add_controller cpuacct"); + } } /* Perform the actual creation of the cgroup */ @@ -551,15 +612,27 @@ void cgroup_attach() void cgroup_kill() { - void *handle = nullptr; - pid_t pid; - /* kill any remaining tasks, and wait for them to be gone */ - while(1) { - int ret = cgroup_get_task_begin(cgroupname, "memory", &handle, &pid); - cgroup_get_task_end(&handle); - if (ret == ECGEOF) break; - kill(pid, SIGKILL); + if (is_cgroup_v2) { + int size; + do { + pid_t* pids; + int ret = cgroup_get_procs(cgroupname, "memory", &pids, &size); + if (ret != 0) error(ret, "cgroup_get_procs"); + for(int i = 0; i < size; i++) { + kill(pids[i], SIGKILL); + } + free(pids); + } while (size > 0); + } else { + while(1) { + void *handle = nullptr; + pid_t pid; + int ret = cgroup_get_task_begin(cgroupname, "memory", &handle, &pid); + cgroup_get_task_end(&handle); + if (ret == ECGEOF) break; + kill(pid, SIGKILL); + } } } @@ -569,7 +642,10 @@ void cgroup_delete() cg = cgroup_new_cgroup(cgroupname); if (!cg) error(0,"cgroup_new_cgroup"); - if ( cgroup_add_controller(cg, "cpuacct")==nullptr ) error(0,"cgroup_add_controller cpuacct"); + if (cgroup_add_controller(cg, "cpu") == nullptr) error(0, "cgroup_add_controller cpu"); + if (!is_cgroup_v2) { + if (cgroup_add_controller(cg, "cpuacct") == nullptr) error(0, "cgroup_add_controller cpuacct"); + } if ( cgroup_add_controller(cg, "memory")==nullptr ) error(0,"cgroup_add_controller memory"); if ( cpuset!=nullptr && strlen(cpuset)>0 ) { @@ -578,7 +654,8 @@ void cgroup_delete() /* Clean up our cgroup */ nanosleep(&cg_delete_delay,nullptr); int ret = cgroup_delete_cgroup_ext(cg, CGFLAG_DELETE_IGNORE_MIGRATION | CGFLAG_DELETE_RECURSIVE); - if ( ret!=0 ) error(ret,"deleting cgroup"); + // TODO: is this actually benign to ignore ECGOTHER here? + if ( ret!=0 && ret!=ECGOTHER ) error(ret,"deleting cgroup"); cgroup_free(&cg); @@ -792,7 +869,14 @@ void setrestrictions() } /* Put the child process in the cgroup */ - cgroup_attach(); + if (is_cgroup_v2) { + const char *controllers[] = { "memory", NULL }; + if (cgroup_change_cgroup_path(cgroupname, getpid(), controllers) != 0) { + error(0, "Failed to move the process to the cgroup"); + } + } else { + cgroup_attach(); + } /* Run the command in a separate process group so that the command and all its children can be killed off with one signal. */ @@ -938,6 +1022,29 @@ void pump_pipes(fd_set* readfds, size_t data_read[], size_t data_passed[]) } +bool cgroup_is_v2() { + bool ret = false; + FILE *fp = setmntent("/proc/mounts", "r"); + if (!fp) { + perror("Error opening /proc/mounts"); + return false; + } + + struct mntent *entry; + while ((entry = getmntent(fp)) != nullptr) { + if (strcmp(entry->mnt_dir, "/sys/fs/cgroup") == 0) { + if (strcmp(entry->mnt_type, "cgroup2") == 0) { + ret = true; + } + break; + } + } + + endmntent(fp); + + return ret; +} + int main(int argc, char **argv) { int ret; @@ -1112,6 +1219,8 @@ int main(int argc, char **argv) cmdname = argv[optind]; cmdargs = argv+optind; + is_cgroup_v2 = cgroup_is_v2(); + if ( outputmeta && (metafile = fopen(metafilename,"w"))==nullptr ) { error(errno,"cannot open `%s'",metafilename); } @@ -1171,6 +1280,7 @@ int main(int argc, char **argv) } } } + /* Make libcgroup ready for use */ ret = cgroup_init(); if ( ret!=0 ) { @@ -1184,8 +1294,8 @@ int main(int argc, char **argv) } else { str[0] = 0; } - snprintf(cgroupname, 255, "/domjudge/dj_cgroup_%d_%.16s_%d.%06d/", - getpid(), str, (int)progstarttime.tv_sec, (int)progstarttime.tv_usec); + snprintf(cgroupname, 255, "domjudge/dj_cgroup_%d_%.16s_%d.%06d/", + getpid(), str, (int) progstarttime.tv_sec, (int) progstarttime.tv_usec); cgroup_create(); @@ -1245,9 +1355,12 @@ int main(int argc, char **argv) /* And execute child command. */ execvp(cmdname,cmdargs); - error(errno,"cannot start `%s'",cmdname); + struct rlimit limit; + getrlimit(RLIMIT_NPROC, &limit); + error(errno,"cannot start `%s', limit: %ld/%ld | ",cmdname, limit.rlim_cur, limit.rlim_max); default: /* become watchdog */ + verbose("child pid = %d", child_pid); /* Shed privileges, only if not using a separate child uid, because in that case we may need root privileges to kill the child process. Do not use Linux specific setresuid() @@ -1431,7 +1544,11 @@ int main(int argc, char **argv) check_remaining_procs(); double cputime = -1; - output_cgroup_stats(&cputime); + if (is_cgroup_v2) { + output_cgroup_stats_v2(&cputime); + } else { + output_cgroup_stats_v1(&cputime); + } cgroup_kill(); cgroup_delete();