diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 000000000..8e22b33a9 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,21 @@ +changelog: + exclude: + authors: + - dependabot + categories: + - title: ๐Ÿ’ช Improvements + labels: + - kind/feature + - title: ๐Ÿ› Bug Fixes + labels: + - kind/bug + - title: ๐Ÿ“– Documentation improvements + labels: + - kind/documentation + - title: ๐Ÿงช Test improvements and Misc Fixes + labels: + - kind/test + - kind/cleanup + - title: Other Changes + labels: + - "*" diff --git a/.github/workflows/benchmark_execution_time.yml b/.github/workflows/benchmark_execution_time.yml index 87b177b6c..2a0373171 100644 --- a/.github/workflows/benchmark_execution_time.yml +++ b/.github/workflows/benchmark_execution_time.yml @@ -14,20 +14,17 @@ jobs: - name: Checkout to PR branch uses: actions/checkout@v3 - - name: Setup Linux env - run: | - sudo apt -y update - sudo apt install libsystemd-dev librust-libdbus-sys-dev libseccomp-dev + - name: Install requirements + run: sudo ./.github/scripts/dependency.sh - - name: Setting rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - profile: minimal + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 + + - name: Install Just + uses: taiki-e/install-action@just - name: Building PR branch - run: make youki-release + run: just youki-release - name: Uploading PR build to artifact uses: actions/upload-artifact@v2 @@ -46,20 +43,16 @@ jobs: with: ref: main - - name: Setup Linux env - run: | - sudo apt -y update - sudo apt install libsystemd-dev librust-libdbus-sys-dev libseccomp-dev + - name: Install requirements + run: sudo ./.github/scripts/dependency.sh - - name: Setting rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - profile: minimal + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 + - name: Install just + uses: taiki-e/install-action@just - name: Building main branch - run: make youki-release + run: just youki-release - name: Uploading main build to artifact uses: actions/upload-artifact@v2 diff --git a/.github/workflows/containerd_integration_tests.yaml b/.github/workflows/containerd_integration_tests.yaml deleted file mode 100644 index 3978e3e6d..000000000 --- a/.github/workflows/containerd_integration_tests.yaml +++ /dev/null @@ -1,60 +0,0 @@ -on: - push: - branches: - - main - pull_request: - branches: - - main - -jobs: - youki-build: - runs-on: ubuntu-22.04 - timeout-minutes: 15 - steps: - - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - toolchain: '1.67.0' - override: true - - name: Cache youki - uses: Swatinem/rust-cache@v1 - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev - - name: Build youki - run: make youki-release - - name: Upload youki binary - uses: actions/upload-artifact@v3 - with: - name: youki - path: youki - - containerd-integration-tests: - runs-on: ubuntu-22.04 - needs: [youki-build] - timeout-minutes: 40 - steps: - - uses: actions/checkout@v3 - with: - repository: containerd/containerd - ref: v1.6.9 - - uses: actions/setup-go@v3 - with: - go-version: '1.18.3' - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev btrfs-progs libbtrfs-dev - - name: Build containerd - run: | - make build - make binaries - - name: Download youki binary - uses: actions/download-artifact@v3 - with: - name: youki - - name: Replace runc to youki - run: | - sudo rm -f /usr/bin/runc /usr/local/bin/runc /usr/sbin/runc - sudo chmod 755 youki - sudo cp youki /usr/bin/runc - runc --version - - name: Integration Test - run: sudo make TEST_RUNTIME=io.containerd.runc.v2 TESTFLAGS="-timeout 40m" integration diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 000000000..e7a277f48 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,129 @@ +name: ๐Ÿงช e2e test + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + youki-build: + runs-on: ubuntu-22.04 + timeout-minutes: 15 + steps: + - uses: actions/checkout@v3 + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 + - name: Install just + uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare + - name: Build youki + run: just youki-release + - name: Upload youki binary + uses: actions/upload-artifact@v3 + with: + name: youki + path: youki + + containerd-integration-tests: + runs-on: ubuntu-22.04 + needs: [youki-build] + timeout-minutes: 40 + steps: + - uses: actions/checkout@v3 + with: + repository: containerd/containerd + ref: v1.6.20 + - uses: actions/setup-go@v3 + with: + go-version: '1.19.9' + cache: true + - run: sudo apt-get -y update + - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev btrfs-progs libbtrfs-dev + - name: Build containerd + run: | + make build + make binaries + - name: Download youki binary + uses: actions/download-artifact@v3 + with: + name: youki + - name: Replace runc to youki + run: | + sudo rm -f /usr/bin/runc /usr/local/bin/runc /usr/sbin/runc + sudo chmod 755 youki + sudo cp youki /usr/bin/runc + runc --version + - name: Integration Test + run: sudo make TEST_RUNTIME=io.containerd.runc.v2 TESTFLAGS="-timeout 40m" integration + + k8s-tests: + runs-on: ubuntu-22.04 + needs: [youki-build] + timeout-minutes: 40 + steps: + - uses: actions/checkout@v3 + - name: Download youki binary + uses: actions/download-artifact@v3 + with: + name: youki + - name: Add the permission to run + run: chmod +x ./youki + - name: Install just + uses: taiki-e/install-action@just + - name: test/k8s/deploy + run: just test-kind + + oci-validation-go: + runs-on: ubuntu-22.04 + needs: [youki-build] + timeout-minutes: 15 + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1 + - name: Install just + uses: taiki-e/install-action@just + - uses: actions/setup-go@v3 + with: + go-version: '1.19.9' + cache: true + cache-dependency-path: tests/oci-runtime-tests/src/github.com/opencontainers/runtime-tools/go.sum + - name: Download youki binary + uses: actions/download-artifact@v3 + with: + name: youki + - name: Add the permission to run + run: chmod +x ./youki + - name: Run integration tests + run: just oci-tests + + oci-validation-rust: + runs-on: ubuntu-22.04 + needs: [youki-build] + timeout-minutes: 15 + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1 + - name: Install just + uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare + - name: Download youki binary + uses: actions/download-artifact@v3 + with: + name: youki + - name: Add the permission to run + run: chmod +x ./youki + - name: Build + run: just runtimetest rust-oci-tests-bin + - name: Validate tests on youki + run: just rust-oci-tests diff --git a/.github/workflows/integration_tests_validation.yaml b/.github/workflows/integration_tests_validation.yaml index 32867349e..8961a6406 100644 --- a/.github/workflows/integration_tests_validation.yaml +++ b/.github/workflows/integration_tests_validation.yaml @@ -5,46 +5,48 @@ on: pull_request: branches: - main + workflow_dispatch: jobs: changes: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 timeout-minutes: 15 outputs: - dirs: ${{ steps.filter.outputs.changes }} + any_modified: ${{ steps.filter.outputs.any_modified }} steps: - uses: actions/checkout@v3 - - uses: dorny/paths-filter@v2 + - uses: tj-actions/changed-files@v36 id: filter with: - filters: | - ./integration_test: ./tests/rust-integration-tests/** + files: | + .github/workflows/integration_tests_validation.yaml + tests/rust-integration-tests/** + files_ignore: | + **.md + - name: List all changed files + run: | + for file in ${{ steps.filter.outputs.all_modified_files }}; do + echo "$file was changed" + done validate: needs: [changes] - if: ${{ !contains(needs.changes.outputs.dirs, '[]') }} + if: needs.changes.outputs.any_modified == 'true' runs-on: ubuntu-20.04 - timeout-minutes: 15 - strategy: - matrix: - rust: [1.66.0, 1.67.0] + timeout-minutes: 30 steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.rust }} - override: true - - name: Cache youki - uses: Swatinem/rust-cache@v1 - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 + - name: Install just + uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare - name: Install runc 1.1.0 run: | wget -q https://github.com/opencontainers/runc/releases/download/v1.1.0/runc.amd64 sudo mv runc.amd64 /usr/bin/runc sudo chmod 755 /usr/bin/runc - name: Build - run: make youki-release runtimetest rust-oci-tests-bin + run: just runtimetest rust-oci-tests-bin - name: Validate tests on runc - run: make validate-rust-oci-runc - - name: Validate tests on youki - run: make rust-oci-tests + run: just validate-rust-oci-runc diff --git a/.github/workflows/label.yaml b/.github/workflows/label.yaml new file mode 100644 index 000000000..9538bfddc --- /dev/null +++ b/.github/workflows/label.yaml @@ -0,0 +1,17 @@ +name: ๐Ÿท๏ธ Pull Request Labels + +on: + pull_request: + types: [opened, labeled, unlabeled, synchronize] +jobs: + label: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: mheap/github-action-required-labels@v5 + with: + mode: exactly + count: 1 + labels: "kind/feature, kind/bug, kind/documentation, kind/test, kind/cleanup, dependencies" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 62ac1d1d7..36a5d0b94 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -5,125 +5,124 @@ on: pull_request: branches: - main + workflow_dispatch: jobs: changes: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 timeout-minutes: 15 outputs: - dirs: ${{ steps.filter.outputs.changes }} + any_modified: ${{ steps.filter.outputs.any_modified }} steps: - uses: actions/checkout@v3 - - uses: dorny/paths-filter@v2 + - uses: tj-actions/changed-files@v36 id: filter with: - filters: | - crates/youki: crates/youki/** - crates/libcontainer: crates/libcontainer/** - crates/libcgroups: crates/libcgroups/** - tests/rust-integration-tests/runtimetest: ./tests/rust-integration-tests/runtimetest - tests/rust-integration-tests/integration_test: ./tests/rust-integration-tests/integration_test - tests/rust-integration-tests/test_framework: ./tests/rust-integration-tests/test_framework + files_ignore: | + docs + LICENSE + **.md + - name: List all changed files + run: | + for file in ${{ steps.filter.outputs.all_modified_files }}; do + echo "$file was changed" + done + check: needs: [changes] - if: ${{ !contains(needs.changes.outputs.dirs, '[]') }} - runs-on: ubuntu-20.04 + if: needs.changes.outputs.any_modified == 'true' + runs-on: ubuntu-22.04 timeout-minutes: 15 - strategy: - matrix: - rust: [1.66.0, 1.67.0] - dirs: ${{ fromJSON(needs.changes.outputs.dirs) }} steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1 with: - toolchain: ${{ matrix.rust }} - override: true - - name: Cache youki - uses: Swatinem/rust-cache@v1 - - run: rustup component add rustfmt clippy - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev - - name: Check formatting - run: cargo fmt --all -- --check - working-directory: ${{matrix.dirs}} - - name: Check clippy lints - working-directory: ${{matrix.dirs}} - run: cargo clippy --all-targets --all-features -- -D warnings + components: rustfmt, clippy + + - name: typos-action + uses: crate-ci/typos@v1.14.12 + + - uses: taiki-e/install-action@just + + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare + + - name: Check formatting and lints + run: just lint + tests: - runs-on: ubuntu-20.04 + needs: [changes] + if: needs.changes.outputs.any_modified == 'true' + runs-on: ubuntu-22.04 timeout-minutes: 15 - strategy: - matrix: - rust: [1.66.0, 1.67.0] steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.rust }} - override: true - - name: Cache youki - uses: Swatinem/rust-cache@v1 - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1 + - uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare - name: Run tests run: | export LD_LIBRARY_PATH=$HOME/.wasmedge/lib cd ./crates && cargo test --all --all-features --no-fail-fast + - name: Run feature tests + run: just test-features + + # TODO: musl testing is flaky. Turn it back on when we can resolve it. + # # musl target testing is religated to a separate job because our current build + # # for musl requires nightly. It is difficult to mix stable and nightly + # # toolchains in the same job. + # musl: + # needs: [changes] + # if: needs.changes.outputs.any_modified == 'true' + # runs-on: ubuntu-22.04 + # timeout-minutes: 20 + # steps: + # - uses: actions/checkout@v3 + # # We do not use `actions-rust-lang/setup-rust-toolchain` here because we + # # want to override the default toolchain file to use nightly toolchains. + # # The `setup-rust-toolchain` action will always choose toolchain file with + # # no way to override. + # - name: Setup Rust toolchain and cache + # uses: dtolnay/rust-toolchain@v1 + # with: + # toolchain: nightly + # target: x86_64-unknown-linux-musl + # components: rust-src + # - uses: taiki-e/install-action@just + # - name: Install requirements + # run: sudo env PATH=$PATH just ci-musl-prepare + # - name: Run test against musl target + # run: just test-musl + + coverage: - runs-on: ubuntu-20.04 + needs: [changes] + if: needs.changes.outputs.any_modified == 'true' + runs-on: ubuntu-22.04 timeout-minutes: 15 name: Run test coverage steps: - uses: actions/checkout@v3 - - name: Toolchain setup - uses: actions-rs/toolchain@v1 - with: - toolchain: 1.67.0 - override: true - profile: minimal - components: llvm-tools-preview - - name: Cache youki - uses: Swatinem/rust-cache@v1 + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 + - name: Install llvm-tools-preview + run: rustup component add llvm-tools-preview - name: install cargo-llvm-cov uses: taiki-e/install-action@v1 with: tool: cargo-llvm-cov@0.4.0 - - name: Update System Libraries - run: sudo apt-get -y update - - name: Install System Libraries - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev + - uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare - name: Run Test Coverage for youki run: | cargo llvm-cov clean --workspace cargo llvm-cov --no-report cargo llvm-cov --no-run --lcov --ignore-filename-regex "libcgroups/src/systemd/dbus/systemd_api.rs" --output-path ./coverage.lcov - name: Upload Youki Code Coverage Results - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: file: ./coverage.lcov - integration_tests: - runs-on: ubuntu-20.04 - timeout-minutes: 15 - strategy: - matrix: - rust: [1.66.0, 1.67.0] - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.rust }} - override: true - - name: Cache youki - uses: Swatinem/rust-cache@v1 - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev - - uses: actions/setup-go@v2 - with: - go-version: "1.17.6" - - name: Build - run: make youki-release - - name: Run integration tests - run: make oci-tests diff --git a/.github/workflows/podman_tests.yaml b/.github/workflows/podman_tests.yaml index 77735ebeb..a189bd9a1 100644 --- a/.github/workflows/podman_tests.yaml +++ b/.github/workflows/podman_tests.yaml @@ -1,3 +1,5 @@ +name: Test for podman + on: schedule: - cron: "0 0 * * *" @@ -7,15 +9,19 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev libgpgme-dev bats - - run: make youki-dev + - name: Install just + uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare + - run: just youki-dev - run: sudo cp youki /usr/local/bin + - name: Install requirements for Podman + run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev libgpgme-dev bats - name: Clone podman repository uses: actions/checkout@v3 with: repository: containers/podman - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v4 with: go-version: '1.18' - name: Build podman @@ -26,4 +32,4 @@ jobs: run: sudo OCI_RUNTIME=/usr/local/bin/youki ./hack/bats 2>&1 | tee build.log - name: Adding Summary run: | - echo "Total tests: 360 Failed tests: $(cat build.log | grep " ok " | wc -l)" >> $GITHUB_STEP_SUMMARY + echo "Total tests: 577 Failed tests: $(cat build.log | grep " ok " | wc -l)" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 70f950e29..32aacd59f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,17 +12,18 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev libclang-dev - - name: Set up cargo - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 + - name: Install just + uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare - name: Build - run: make youki-release + run: just youki-release - name: test - run: make unittest featuretest oci-tests + # TODO(utam0k): The feature test needs nightly + # run: just unittest featuretest oci-tests + run: just unittest oci-tests upload: name: Upload @@ -30,15 +31,13 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - - run: sudo apt-get -y update - - run: sudo apt-get install -y pkg-config libsystemd-dev libdbus-glib-1-dev libelf-dev libseccomp-dev - - name: Set up cargo - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 + - uses: taiki-e/install-action@just + - name: Install requirements + run: sudo env PATH=$PATH just ci-prepare - name: Release build - run: make youki-release + run: just youki-release - name: Create output directory run: mkdir output - name: Copy files to output @@ -104,11 +103,8 @@ jobs: CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} steps: - uses: actions/checkout@v3 - - name: Set up cargo - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true + - name: Setup Rust toolchain and cache + uses: actions-rust-lang/setup-rust-toolchain@v1.3.7 - name: Publish libcgroups run: cargo publish -p libcgroups --no-verify - name: Publish libcontainer diff --git a/.gitignore b/.gitignore index 0dd74a3f7..fdeed4a22 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ tags.temp /bundle.tar.gz /test.log + +/tests/k8s/_out/ diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 000000000..a40007f8f --- /dev/null +++ b/.typos.toml @@ -0,0 +1,21 @@ +# Configuration Reference: +# - https://github.com/crate-ci/typos/blob/927308c726b1fba730f7aaa8bde602148b82004d/docs/reference.md + +[files] +extend-exclude = [ + "**/*.svg", + "tests/oci-runtime-tests/**" +] + +[default.extend-identifiers] +# This is a cgroup slice ID used in examples. It is easier to ignore this +# instance than write a regex. +569d5ce3afe1074769f67 = "569d5ce3afe1074769f67" + +[type.rust.extend-words] +ser = "ser" +flate = "flate" +clos = "clos" +Setted = "Setted" +hve = "hve" + diff --git a/Cargo.lock b/Cargo.lock index 555ad7542..03ffc12be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,20 +4,20 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli 0.26.2", + "gimli 0.27.2", ] [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ - "gimli 0.27.0", + "gimli 0.27.2", ] [[package]] @@ -37,20 +37,37 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] [[package]] name = "ambient-authority" -version = "0.0.1" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9d4ee0d472d1cd2e28c97dfa124b3d8d992e10eb0a035f33f5d12e3a177ba3b" + +[[package]] +name = "android-tzdata" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8ad6edb4840b78c5c3d88de606b22252d552b55f3a4699fbb10fc070ec3049" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" @@ -61,17 +78,78 @@ dependencies = [ "libc", ] +[[package]] +name = "anstream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + +[[package]] +name = "anstyle-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + +[[package]] +name = "any_ascii" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70033777eb8b5124a81a1889416543dddef2de240019b674c81285a2635a7e1e" + [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" + +[[package]] +name = "arbitrary" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "ascii" @@ -81,13 +159,22 @@ checksum = "3ae7d751998c189c1d4468cf0a39bb2eae052a9c58d50ebb3b9591ee3813ad50" [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.26", +] + +[[package]] +name = "atomic-polyfill" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28" +dependencies = [ + "critical-section", ] [[package]] @@ -98,16 +185,16 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ - "addr2line 0.19.0", + "addr2line 0.20.0", "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.30.0", + "object 0.31.1", "rustc-demangle", ] @@ -119,9 +206,9 @@ checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base64" -version = "0.13.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "bincode" @@ -134,22 +221,23 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.61.0" +version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a022e58a142a46fea340d68012b9201c094e93ec3d033a944a24f8fd4a4f09a" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", "lazycell", "peeking_take_while", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn", + "syn 2.0.26", ] [[package]] @@ -158,40 +246,59 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytecheck" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" dependencies = [ "bytecheck_derive", "ptr_meta", + "simdutf8", ] [[package]] name = "bytecheck_derive" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -200,40 +307,49 @@ version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +[[package]] +name = "bytes" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +dependencies = [ + "serde", +] + [[package]] name = "cap-fs-ext" -version = "1.0.3" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f8079425cfd20227020f2bff1320710ca68d6eddb4f64aba8e2741b2b4d8133" +checksum = "58bc48200a1a0fa6fba138b1802ad7def18ec1cdd92f7b2a04e21f1bd887f7b9" dependencies = [ "cap-primitives", "cap-std", - "io-lifetimes", - "windows-sys 0.42.0", + "io-lifetimes 1.0.11", + "windows-sys 0.48.0", ] [[package]] name = "cap-primitives" -version = "1.0.3" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84bf8faa0b6397a4e26082818be03641a40e3aba1afc4ec44cbd6228c73c3a61" +checksum = "a4b6df5b295dca8d56f35560be8c391d59f0420f72e546997154e24e765e6451" dependencies = [ "ambient-authority", "fs-set-times", "io-extras", - "io-lifetimes", + "io-lifetimes 1.0.11", "ipnet", "maybe-owned", - "rustix", - "windows-sys 0.42.0", - "winx", + "rustix 0.37.19", + "windows-sys 0.48.0", + "winx 0.35.1", ] [[package]] name = "cap-rand" -version = "1.0.4" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a986c3b8fb6e25bbef961b237756ff7a771aa71c9ffdeb01c29f3f208577bf9" +checksum = "383800fa434a9e7a463fa35196bd93dcd84a6bdc5d9aeae4e60b554134e852a2" dependencies = [ "ambient-authority", "rand", @@ -241,27 +357,26 @@ dependencies = [ [[package]] name = "cap-std" -version = "1.0.3" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ad2b9e262a5c3b67ee92e4b9607ace704384c50c32aa6017a9282ddf15df20" +checksum = "3373a62accd150b4fcba056d4c5f3b552127f0ec86d3c8c102d60b978174a012" dependencies = [ "cap-primitives", "io-extras", - "io-lifetimes", - "ipnet", - "rustix", + "io-lifetimes 1.0.11", + "rustix 0.37.19", ] [[package]] name = "cap-time-ext" -version = "1.0.3" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dcbdbcced5c88b20f27c637faaed5dd283898cbefea48d2d8f3dcfaf048e5cc" +checksum = "e95002993b7baee6b66c8950470e59e5226a23b3af39fc59c47fe416dd39821a" dependencies = [ "cap-primitives", "once_cell", - "rustix", - "winx", + "rustix 0.37.19", + "winx 0.35.1", ] [[package]] @@ -306,25 +421,22 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", - "js-sys", - "num-integer", "num-traits", "serde", - "time 0.1.45", - "wasm-bindgen", "winapi", ] [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -333,68 +445,80 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.32" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39" +checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" dependencies = [ - "bitflags", + "clap_builder", "clap_derive", + "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" +dependencies = [ + "anstream", + "anstyle", + "bitflags 1.3.2", "clap_lex", - "is-terminal", "once_cell", "strsim", - "termcolor", ] [[package]] name = "clap_complete" -version = "4.0.7" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10861370d2ba66b0f5989f83ebf35db6421713fd92351790e7fdd6c36774c56b" +checksum = "a04ddfaacc3bc9e6ea67d024575fafc2a813027cf374b8f24f7bc233c6b6be12" dependencies = [ "clap", ] [[package]] name = "clap_derive" -version = "4.0.21" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" +checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" dependencies = [ - "heck", - "proc-macro-error", + "heck 0.4.1", "proc-macro2", "quote", - "syn", + "syn 2.0.26", ] [[package]] name = "clap_lex" -version = "0.3.1" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + +[[package]] +name = "clone3" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade" +checksum = "5ee4e061ea30800291ca09663878f3953840a69b08ce244b3e8b26e894d9f60f" dependencies = [ - "os_str_bytes", + "bitflags 1.3.2", + "uapi", ] [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] [[package]] -name = "codespan-reporting" -version = "0.11.1" +name = "colorchoice" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "combine" @@ -412,11 +536,27 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" +[[package]] +name = "cooked-waker" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147be55d677052dabc6b22252d5dd0fd4c29c8c27aa4f2fbef0f94aa003b406f" + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "corosensei" @@ -442,135 +582,148 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] [[package]] name = "cranelift-bforest" -version = "0.82.3" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38faa2a16616c8e78a18d37b4726b98bfd2de192f2fdc8a39ddf568a408a0f75" +checksum = "2a2ab4512dfd3a6f4be184403a195f76e81a8a9f9e6c898e19d2dc3ce20e0115" dependencies = [ - "cranelift-entity 0.82.3", + "cranelift-entity 0.91.1", ] [[package]] name = "cranelift-bforest" -version = "0.91.0" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc952b310b24444fc14ab8b9cbe3fafd7e7329e3eec84c3a9b11d2b5cf6f3be1" +checksum = "5c289b8eac3a97329a524e953b5fd68a8416ca629e1a37287f12d9e0760aadbc" dependencies = [ - "cranelift-entity 0.91.0", + "cranelift-entity 0.97.1", ] [[package]] name = "cranelift-codegen" -version = "0.82.3" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f192472a3ba23860afd07d2b0217dc628f21fcc72617aa1336d98e1671f33b" +checksum = "98b022ed2a5913a38839dfbafe6cf135342661293b08049843362df4301261dc" dependencies = [ - "cranelift-bforest 0.82.3", - "cranelift-codegen-meta 0.82.3", - "cranelift-codegen-shared 0.82.3", - "cranelift-entity 0.82.3", + "arrayvec", + "bumpalo", + "cranelift-bforest 0.91.1", + "cranelift-codegen-meta 0.91.1", + "cranelift-codegen-shared 0.91.1", + "cranelift-egraph", + "cranelift-entity 0.91.1", + "cranelift-isle 0.91.1", "gimli 0.26.2", "log", - "regalloc", + "regalloc2 0.5.1", "smallvec", "target-lexicon", ] [[package]] name = "cranelift-codegen" -version = "0.91.0" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73470419b33011e50dbf0f6439cbccbaabe9381de172da4e1b6efcda4bb8fa7" +checksum = "7bf07ba80f53fa7f7dc97b11087ea867f7ae4621cfca21a909eca92c0b96c7d9" dependencies = [ - "arrayvec", "bumpalo", - "cranelift-bforest 0.91.0", - "cranelift-codegen-meta 0.91.0", - "cranelift-codegen-shared 0.91.0", - "cranelift-egraph", - "cranelift-entity 0.91.0", - "cranelift-isle", - "gimli 0.26.2", + "cranelift-bforest 0.97.1", + "cranelift-codegen-meta 0.97.1", + "cranelift-codegen-shared 0.97.1", + "cranelift-control", + "cranelift-entity 0.97.1", + "cranelift-isle 0.97.1", + "gimli 0.27.2", + "hashbrown 0.13.2", "log", - "regalloc2", + "regalloc2 0.9.1", "smallvec", "target-lexicon", ] [[package]] name = "cranelift-codegen-meta" -version = "0.82.3" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32ddb89e9b89d3d9b36a5b7d7ea3261c98235a76ac95ba46826b8ec40b1a24" +checksum = "639307b45434ad112a98f8300c0f0ab085cbefcd767efcdef9ef19d4c0756e74" dependencies = [ - "cranelift-codegen-shared 0.82.3", + "cranelift-codegen-shared 0.91.1", ] [[package]] name = "cranelift-codegen-meta" -version = "0.91.0" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911a1872464108a11ac9965c2b079e61bbdf1bc2e0b9001264264add2e12a38f" +checksum = "40a7ca088173130c5c033e944756e3e441fbf3f637f32b4f6eb70252580c6dd4" dependencies = [ - "cranelift-codegen-shared 0.91.0", + "cranelift-codegen-shared 0.97.1", ] [[package]] name = "cranelift-codegen-shared" -version = "0.82.3" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01fd0d9f288cc1b42d9333b7a776b17e278fc888c28e6a0f09b5573d45a150bc" +checksum = "278e52e29c53fcf32431ef08406c295699a70306d05a0715c5b1bf50e33a9ab7" [[package]] name = "cranelift-codegen-shared" -version = "0.91.0" +version = "0.97.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0114095ec7d2fbd658ed100bd007006360bc2530f57c6eee3d3838869140dbf9" + +[[package]] +name = "cranelift-control" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e036f3f07adb24a86fb46e977e8fe03b18bb16b1eada949cf2c48283e5f8a862" +checksum = "1d56031683a55a949977e756d21826eb17a1f346143a1badc0e120a15615cd38" +dependencies = [ + "arbitrary", +] [[package]] name = "cranelift-egraph" -version = "0.91.0" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6c623f4b5d2a6bad32c403f03765d4484a827eb93ee78f8cb6219ef118fd59" +checksum = "624b54323b06e675293939311943ba82d323bb340468ce1889be5da7932c8d73" dependencies = [ - "cranelift-entity 0.91.0", + "cranelift-entity 0.91.1", "fxhash", "hashbrown 0.12.3", - "indexmap", + "indexmap 1.9.3", "log", "smallvec", ] [[package]] name = "cranelift-entity" -version = "0.82.3" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3bfe172b83167604601faf9dc60453e0d0a93415b57a9c4d1a7ae6849185cf" +checksum = "9a59bcbca89c3f1b70b93ab3cbba5e5e0cbf3e63dadb23c7525cb142e21a9d4c" [[package]] name = "cranelift-entity" -version = "0.91.0" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74385eb5e405b3562f0caa7bcc4ab9a93c7958dd5bcd0e910bffb7765eacd6fc" +checksum = "d6565198b5684367371e2b946ceca721eb36965e75e3592fad12fc2e15f65d7b" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.82.3" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a006e3e32d80ce0e4ba7f1f9ddf66066d052a8c884a110b91d05404d6ce26dce" +checksum = "0d70abacb8cfef3dc8ff7e8836e9c1d70f7967dfdac824a4cd5e30223415aca6" dependencies = [ - "cranelift-codegen 0.82.3", + "cranelift-codegen 0.91.1", "log", "smallvec", "target-lexicon", @@ -578,11 +731,11 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.91.0" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4ac920422ee36bff2c66257fec861765e3d95a125cdf58d8c0f3bba7e40e61" +checksum = "25f28cc44847c8b98cb921e6bfc0f7b228f4d27519376fea724d181da91709a6" dependencies = [ - "cranelift-codegen 0.91.0", + "cranelift-codegen 0.97.1", "log", "smallvec", "target-lexicon", @@ -590,34 +743,40 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.91.0" +version = "0.91.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "393bc73c451830ff8dbb3a07f61843d6cb41a084f9996319917c0b291ed785bb" + +[[package]] +name = "cranelift-isle" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c541263fb37ad2baa53ec8c37218ee5d02fa0984670d9419dedd8002ea68ff08" +checksum = "80b658177e72178c438f7de5d6645c56d97af38e17fcb0b500459007b4e05cc5" [[package]] name = "cranelift-native" -version = "0.91.0" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1de5d7a063e8563d670aaca38de16591a9b70dc66cbad4d49a7b4ae8395fd1ce" +checksum = "bf1c7de7221e6afcc5e13ced3b218faab3bc65b47eac67400046a05418aecd6a" dependencies = [ - "cranelift-codegen 0.91.0", + "cranelift-codegen 0.97.1", "libc", "target-lexicon", ] [[package]] name = "cranelift-wasm" -version = "0.91.0" +version = "0.97.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbc4dd03b713b5d71b582915b8c272f4813cdd8c99a3e03d9ba70c44468a6e0" +checksum = "76b0d28ebe8edb6b503630c489aa4669f1e2d13b97bec7271a0fcb0e159be3ad" dependencies = [ - "cranelift-codegen 0.91.0", - "cranelift-entity 0.91.0", - "cranelift-frontend 0.91.0", + "cranelift-codegen 0.97.1", + "cranelift-entity 0.97.1", + "cranelift-frontend 0.97.1", "itertools", "log", "smallvec", - "wasmparser 0.95.0", + "wasmparser 0.107.0", "wasmtime-types", ] @@ -630,6 +789,12 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "critical-section" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52" + [[package]] name = "crossbeam" version = "0.8.2" @@ -646,9 +811,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -656,9 +821,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", @@ -667,14 +832,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if 1.0.0", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.9.0", "scopeguard", ] @@ -690,9 +855,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", ] @@ -708,92 +873,82 @@ dependencies = [ ] [[package]] -name = "cxx" -version = "1.0.89" +name = "darling" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] -name = "cxx-build" -version = "1.0.89" +name = "darling" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn", + "darling_core 0.20.1", + "darling_macro 0.20.1", ] [[package]] -name = "cxxbridge-flags" -version = "1.0.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.89" +name = "darling_core" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ + "fnv", + "ident_case", "proc-macro2", "quote", - "syn", + "strsim", + "syn 1.0.109", ] [[package]] -name = "darling" -version = "0.14.2" +name = "darling_core" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ - "darling_core", - "darling_macro", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "syn 2.0.26", ] [[package]] -name = "darling_core" -version = "0.14.2" +name = "darling_macro" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "fnv", - "ident_case", - "proc-macro2", + "darling_core 0.14.4", "quote", - "strsim", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ - "darling_core", + "darling_core 0.20.1", "quote", - "syn", + "syn 2.0.26", ] [[package]] name = "dashmap" -version = "5.4.0" +version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.12.3", + "hashbrown 0.14.0", "lock_api", "once_cell", "parking_lot_core", @@ -811,33 +966,32 @@ dependencies = [ ] [[package]] -name = "derive_builder" -version = "0.11.2" +name = "debugid" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ - "derive_builder_macro 0.11.2", + "uuid", ] [[package]] -name = "derive_builder" -version = "0.12.0" +name = "derivative" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "derive_builder_macro 0.12.0", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "derive_builder_core" -version = "0.11.2" +name = "derive_builder" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" +checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn", + "derive_builder_macro", ] [[package]] @@ -846,20 +1000,10 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ - "darling", + "darling 0.14.4", "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "derive_builder_macro" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" -dependencies = [ - "derive_builder_core 0.11.2", - "syn", + "syn 1.0.109", ] [[package]] @@ -868,8 +1012,8 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" dependencies = [ - "derive_builder_core 0.12.0", - "syn", + "derive_builder_core", + "syn 1.0.109", ] [[package]] @@ -880,9 +1024,9 @@ checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", @@ -943,26 +1087,26 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] -name = "enum-iterator" -version = "0.7.0" +name = "encoding_rs" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eeac5c5edb79e4e39fe8439ef35207780a11f69c52cbe424ce3dfad4cb78de6" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ - "enum-iterator-derive 0.7.0", + "cfg-if 1.0.0", ] [[package]] name = "enum-iterator" -version = "1.2.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91a4ec26efacf4aeff80887a175a419493cb6f8b5480d26387eb0bd038976187" +checksum = "4eeac5c5edb79e4e39fe8439ef35207780a11f69c52cbe424ce3dfad4cb78de6" dependencies = [ - "enum-iterator-derive 1.1.0", + "enum-iterator-derive", ] [[package]] @@ -973,39 +1117,28 @@ checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "enum-iterator-derive" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "828de45d0ca18782232dfb8f3ea9cc428e8ced380eb26a520baaacfc70de39ce" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "enumset" -version = "1.0.12" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753" +checksum = "e875f1719c16de097dee81ed675e2d9bb63096823ed3f0ca827b7dea3028bbbb" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.6.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ - "darling", + "darling 0.20.1", "proc-macro2", "quote", - "syn", + "syn 2.0.26", ] [[package]] @@ -1031,15 +1164,21 @@ dependencies = [ "termcolor", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1060,29 +1199,26 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.8.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" -dependencies = [ - "instant", -] +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" [[package]] name = "fd-lock" -version = "3.0.9" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c0190ff0bd3b28bfdd4d0cf9f92faa12880fb0b8ae2054723dd6c76a4efd42" +checksum = "0b0377f1edc77dbd1118507bc7a66e4ab64d2b90c66f90726dc801e73a8c68f9" dependencies = [ "cfg-if 1.0.0", - "rustix", - "windows-sys 0.42.0", + "rustix 0.38.1", + "windows-sys 0.48.0", ] [[package]] name = "file-per-thread-logger" -version = "0.1.6" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f2e425d9790201ba4af4630191feac6dcc98765b118d4d18e91d23c2353866" +checksum = "8a3cc21c33af89af0930c8cae4ade5e6fdc17b5d2c97b3d2e2edb67a1cf683f3" dependencies = [ "env_logger 0.10.0", "log", @@ -1090,14 +1226,14 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.19" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9" +checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", - "windows-sys 0.42.0", + "redox_syscall 0.2.16", + "windows-sys 0.48.0", ] [[package]] @@ -1108,9 +1244,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "miniz_oxide", @@ -1131,11 +1267,26 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -1148,20 +1299,32 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-set-times" -version = "0.18.0" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25ca26b0001154679ce0901527330e6153b670d17ccd1f86bab4e45dfba1a74" +checksum = "6d167b646a876ba8fda6b50ac645cfd96242553cbaf0ca4fccaa39afcbf0801f" dependencies = [ - "io-lifetimes", - "rustix", - "windows-sys 0.42.0", + "io-lifetimes 1.0.11", + "rustix 0.38.1", + "windows-sys 0.48.0", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1174,9 +1337,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1184,15 +1347,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1202,38 +1365,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.26", ] [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -1257,19 +1420,23 @@ dependencies = [ ] [[package]] -name = "generational-arena" -version = "0.2.8" +name = "fxprof-processed-profile" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d3b771574f62d0548cee0ad9057857e9fc25d7a3335f140c84f6acd0bf601" +checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" dependencies = [ - "cfg-if 0.1.10", + "bitflags 2.3.3", + "debugid", + "fxhash", + "serde", + "serde_json", ] [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1277,13 +1444,15 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", + "wasm-bindgen", ] [[package]] @@ -1295,7 +1464,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1305,27 +1474,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" dependencies = [ "fallible-iterator", - "indexmap", + "indexmap 1.9.3", "stable_deref_trait", ] [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" - -[[package]] -name = "git2" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994bee4a3a6a51eb90c218523be382fd7ea09b16380b9312e9dbe955ff7c7d1" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" dependencies = [ - "bitflags", - "libc", - "libgit2-sys", - "log", - "url", + "fallible-iterator", + "indexmap 1.9.3", + "stable_deref_trait", ] [[package]] @@ -1335,12 +1496,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] -name = "hashbrown" -version = "0.11.2" +name = "h2" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap 1.9.3", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + +[[package]] +name = "hash32" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" dependencies = [ - "ahash", + "byteorder", ] [[package]] @@ -1349,7 +1535,44 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.3", +] + +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + +[[package]] +name = "heapless" +version = "0.7.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743" +dependencies = [ + "atomic-polyfill", + "hash32", + "rustc_version 0.4.0", + "spin 0.9.8", + "stable_deref_trait", +] + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", ] [[package]] @@ -1360,12 +1583,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.2.6" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1373,36 +1593,126 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "http" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +dependencies = [ + "futures-util", + "http", + "hyper", + "rustls", + "tokio", + "tokio-rustls", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] +[[package]] +name = "id-arena" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" + [[package]] name = "ident_case" version = "1.0.1" @@ -1411,9 +1721,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1421,9 +1731,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -1431,12 +1741,13 @@ dependencies = [ ] [[package]] -name = "instant" -version = "0.1.12" +name = "indexmap" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ - "cfg-if 1.0.0", + "equivalent", + "hashbrown 0.14.0", ] [[package]] @@ -1450,67 +1761,76 @@ dependencies = [ "flate2", "libcgroups", "libcontainer", - "log", "nix", "num_cpus", - "oci-spec 0.6.0", + "oci-spec", "once_cell", "pnet_datalink", "procfs", "rand", + "scopeguard", "serde", "serde_json", "tar", + "tempfile", "test_framework", + "tracing", + "tracing-subscriber", "uuid", "which", ] [[package]] name = "io-extras" -version = "0.17.1" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b87bc110777311d7832025f38c4ab0f089f764644009edef3b5cbadfedee8c40" +checksum = "fde93d48f0d9277f977a333eca8313695ddd5301dc96f7e02aeddcb0dd99096f" dependencies = [ - "io-lifetimes", - "windows-sys 0.42.0", + "io-lifetimes 1.0.11", + "windows-sys 0.48.0", ] [[package]] name = "io-lifetimes" -version = "1.0.4" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi", "libc", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] +[[package]] +name = "io-lifetimes" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c2355a5aef60b1c70b7001bd60ce60deebe6a98d95dff5a873519b125a3af51" + [[package]] name = "ipnet" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "ipnetwork" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f84f1612606f3753f205a4e9a2efd6fe5b4c573a6269b2cc6c3003d44a0d127" +checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e" dependencies = [ "serde", ] [[package]] name = "is-terminal" -version = "0.4.2" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "io-lifetimes", - "rustix", - "windows-sys 0.42.0", + "rustix 0.38.1", + "windows-sys 0.48.0", ] [[package]] @@ -1524,9 +1844,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "ittapi" @@ -1550,9 +1870,9 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -1584,25 +1904,36 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" +[[package]] +name = "lexical-sort" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c09e4591611e231daf4d4c685a66cb0410cc1e502027a20ae55f2bb9e997207a" +dependencies = [ + "any_ascii", +] + [[package]] name = "libbpf-sys" -version = "1.1.1+v1.1.0" +version = "1.2.1+v1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0bfc74513824996a8f689cae8b40445c9a54bc9f57a31d9778b281b9970868" +checksum = "75adb4021282a72ca63ebbc0e4247750ad74ede68ff062d247691072d709ad8b" dependencies = [ "cc", + "nix", + "num_cpus", "pkg-config", ] [[package]] name = "libc" -version = "0.2.139" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libcgroups" -version = "0.0.4" +version = "0.1.0" dependencies = [ "anyhow", "clap", @@ -1612,71 +1943,58 @@ dependencies = [ "fixedbitset", "libbpf-sys", "libc", - "log", "mockall", "nix", - "oci-spec 0.5.8", + "oci-spec", "procfs", "quickcheck", "rbpf", "serde", "serde_json", "serial_test", + "tempfile", + "thiserror", + "tracing", ] [[package]] name = "libcontainer" -version = "0.0.4" +version = "0.1.0" dependencies = [ "anyhow", - "bitflags", + "bitflags 2.3.3", "caps", "chrono", - "crossbeam-channel", + "clone3", "fastrand", "futures", "libc", "libcgroups", "libseccomp", - "log", - "mio", "nix", - "oci-spec 0.5.8", - "path-clean", + "oci-spec", + "once_cell", "prctl", "procfs", "quickcheck", "rand", + "regex", "rust-criu", + "safe-path", "serde", "serde_json", "serial_test", - "syscalls", - "wasmedge-sdk", - "wasmer", - "wasmer-wasi", - "wasmtime", - "wasmtime-wasi", + "tempfile", + "thiserror", + "tracing", ] [[package]] name = "libdbus-sys" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2264f9d90a9b4e60a2dc722ad899ea0374f03c2e96e755fe22a8f551d4d5fb3c" -dependencies = [ - "pkg-config", -] - -[[package]] -name = "libgit2-sys" -version = "0.14.2+1.5.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f3d95f6b51075fe9810a7ae22c7095f12b98005ab364d8544797a825ce946a4" +checksum = "06085512b750d640299b79be4bad3d2fa90a9c00b1fd9e1b46364f66f0485c72" dependencies = [ - "cc", - "libc", - "libz-sys", "pkg-config", ] @@ -1692,7 +2010,7 @@ dependencies = [ [[package]] name = "liboci-cli" -version = "0.0.4" +version = "0.1.0" dependencies = [ "clap", ] @@ -1703,7 +2021,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21c57fd8981a80019807b7b68118618d29a87177c63d704fc96e6ecd003ae5b3" dependencies = [ - "bitflags", + "bitflags 1.3.2", "libc", "libseccomp-sys", "pkg-config", @@ -1716,24 +2034,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a7cbbd4ad467251987c6e5b47d53b11a5a05add08f2447a9e2d70aef1e0d138" [[package]] -name = "libz-sys" -version = "1.1.8" +name = "linked-hash-map" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] -name = "link-cplusplus" -version = "1.0.8" +name = "linked_hash_set" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" dependencies = [ - "cc", + "linked-hash-map", ] [[package]] @@ -1743,44 +2055,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] -name = "lock_api" -version = "0.4.9" +name = "linux-raw-sys" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" -dependencies = [ - "autocfg", - "scopeguard", -] +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] -name = "log" -version = "0.4.17" +name = "linux-raw-sys" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] -name = "loupe" -version = "0.1.3" +name = "lock_api" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6a72dfa44fe15b5e76b94307eeb2ff995a8c5b283b55008940c02e0c5b634d" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ - "indexmap", - "loupe-derive", - "rustversion", + "autocfg", + "scopeguard", ] [[package]] -name = "loupe-derive" -version = "0.1.3" +name = "log" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fbfc88337168279f2e9ae06e157cfed4efd3316e14dc96ed074d4f2e6c5952" -dependencies = [ - "quote", - "syn", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "mach" @@ -1791,6 +2091,15 @@ dependencies = [ "libc", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "maybe-owned" version = "0.3.4" @@ -1805,29 +2114,29 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memfd" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" +checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" dependencies = [ - "rustix", + "rustix 0.37.19", ] [[package]] name = "memmap2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ "libc", ] [[package]] -name = "memoffset" -version = "0.6.5" +name = "memmap2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "6d28bba84adfe6646737845bc5ebbfa2c08424eb1c37e94a1fd2a82adb56a872" dependencies = [ - "autocfg", + "libc", ] [[package]] @@ -1839,6 +2148,30 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1847,30 +2180,29 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.6.4" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2e212582ede878b109755efd0773a4f0f4ec851584cf0aefbeb4d9ecc114822" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "wasi", + "windows-sys 0.48.0", ] [[package]] name = "mockall" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ "cfg-if 1.0.0", "downcast", @@ -1883,14 +2215,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1899,18 +2231,36 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nix" -version = "0.25.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "autocfg", - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "libc", - "memoffset 0.6.5", + "memoffset 0.7.1", "pin-utils", + "static_assertions", ] [[package]] @@ -1936,11 +2286,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] -name = "ntapi" -version = "0.4.0" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ + "overload", "winapi", ] @@ -1952,87 +2303,86 @@ checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", + "syn 1.0.109", ] [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] -name = "num_threads" -version = "0.1.6" +name = "num_enum" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" dependencies = [ - "libc", + "num_enum_derive", ] [[package]] -name = "object" -version = "0.28.4" +name = "num_enum_derive" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ - "crc32fast", - "hashbrown 0.11.2", - "indexmap", - "memchr", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", ] [[package]] name = "object" -version = "0.29.0" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "crc32fast", - "hashbrown 0.12.3", - "indexmap", + "hashbrown 0.13.2", + "indexmap 1.9.3", "memchr", ] [[package]] name = "object" -version = "0.30.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239da7f290cfa979f43f85a8efeee9a8a76d0827c356d37f9d3d7254d6b537fb" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] [[package]] name = "oci-spec" -version = "0.5.8" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98135224dd4faeb24c05a2fac911ed53ea6b09ecb09d7cada1cb79963ab2ee34" +checksum = "9421b067205c68dc80af7c68599a9c1eb113f975aafeb874cea7f4d5d41ce3fb" dependencies = [ - "derive_builder 0.11.2", + "derive_builder", "getset", "quickcheck", "serde", @@ -2041,29 +2391,60 @@ dependencies = [ ] [[package]] -name = "oci-spec" -version = "0.6.0" +name = "once_cell" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "214b837f7dde5026f2028ead5ae720073277c19f82ff85623b142c39d4b843e7" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "openssl" +version = "0.10.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ - "derive_builder 0.12.0", - "getset", - "serde", - "serde_json", - "thiserror", + "bitflags 1.3.2", + "cfg-if 1.0.0", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", ] [[package]] -name = "once_cell" -version = "1.17.0" +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.26", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] -name = "os_str_bytes" -version = "6.4.1" +name = "overload" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking_lot" @@ -2077,28 +2458,28 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.6" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.42.0", + "windows-targets 0.48.0", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "path-clean" -version = "0.1.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecba01bf2678719532c5e3059e0b5f0811273d94b397088b82e3bd0a78c78fdd" +checksum = "17359afc20d7ab31fdb42bb844c8b3bb1dabd7dcf7e68428492da7f16966fcef" [[package]] name = "peeking_take_while" @@ -2118,15 +2499,45 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" + +[[package]] +name = "petgraph" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" +dependencies = [ + "fixedbitset", + "indexmap 1.9.3", +] + +[[package]] +name = "pin-project" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.26", +] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -2136,24 +2547,24 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "pnet_base" -version = "0.31.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d3a993d49e5fd5d4d854d6999d4addca1f72d86c65adf224a36757161c02b6" +checksum = "fe4cf6fb3ab38b68d01ab2aea03ed3d1132b4868fa4e06285f29f16da01c5f4c" dependencies = [ "no-std-net", ] [[package]] name = "pnet_datalink" -version = "0.31.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e466faf03a98ad27f6e15cd27a2b7cc89e73e640a43527742977bc503c37f8aa" +checksum = "ad5854abf0067ebbd3967f7d45ebc8976ff577ff0c7bd101c4973ae3c70f98fe" dependencies = [ "ipnetwork", "libc", @@ -2164,9 +2575,9 @@ dependencies = [ [[package]] name = "pnet_sys" -version = "0.31.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "328e231f0add6d247d82421bf3790b4b33b39c8930637f428eef24c4c6a90805" +checksum = "417c0becd1b573f6d544f73671070b039051e5ad819cc64aa96377b536128d00" dependencies = [ "libc", "winapi", @@ -2204,20 +2615,40 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" [[package]] name = "predicates-tree" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" dependencies = [ "predicates-core", "termtree", ] +[[package]] +name = "prettyplease" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" +dependencies = [ + "proc-macro2", + "syn 2.0.26", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2227,7 +2658,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -2250,26 +2681,26 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.50" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "procfs" -version = "0.14.2" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de8dacb0873f77e6aefc6d71e044761fcc68060290f5b1089fcdf84626bb69" +checksum = "943ca7f9f29bab5844ecd8fdb3992c5969b6622bb9609b9502fef9b4310e3f1f" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "chrono", "flate2", "hex", "lazy_static", - "rustix", + "rustix 0.36.15", ] [[package]] @@ -2305,7 +2736,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" dependencies = [ "anyhow", - "indexmap", + "indexmap 1.9.3", "log", "protobuf", "protobuf-support", @@ -2349,7 +2780,18 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "pulldown-cmark" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffade02495f22453cd593159ea2f59827aae7f53fa8323f756799b670881dcf8" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", ] [[package]] @@ -2365,13 +2807,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.8.5" @@ -2404,9 +2852,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -2414,9 +2862,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -2426,14 +2874,14 @@ dependencies = [ [[package]] name = "rbpf" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9c11162e7a92d2ede17ea2e5ef83025fd3e252638e43bf92294ea61791d1c4" +checksum = "b536dc5c7e3a730d06c578a41df1fbcccd66240a7a9bd5f150a0826291f01c66" dependencies = [ "byteorder", "combine", "libc", - "time 0.1.45", + "time 0.2.27", ] [[package]] @@ -2442,7 +2890,16 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", ] [[package]] @@ -2452,49 +2909,78 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", ] [[package]] -name = "regalloc" -version = "0.0.34" +name = "regalloc2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62446b1d3ebf980bdc68837700af1d77b37bc430e524bf95319c6eada2a4cc02" +checksum = "300d4fbfb40c1c66a78ba3ddd41c1110247cf52f97b87d0f2fc9209bd49b030c" dependencies = [ + "fxhash", "log", - "rustc-hash", + "slice-group-by", "smallvec", ] [[package]] name = "regalloc2" -version = "0.5.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300d4fbfb40c1c66a78ba3ddd41c1110247cf52f97b87d0f2fc9209bd49b030c" +checksum = "12513beb38dd35aab3ac5f5b89fd0330159a0dc21d5309d75073011bbc8032b0" dependencies = [ - "fxhash", + "hashbrown 0.13.2", "log", + "rustc-hash", "slice-group-by", "smallvec", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.3.2", + "regex-syntax 0.7.3", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.3", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" [[package]] name = "region" @@ -2502,53 +2988,111 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76e189c2369884dce920945e2ddf79b3dff49e071a167dd1817fa9c4c00d512e" dependencies = [ - "bitflags", + "bitflags 1.3.2", "libc", "mach", "winapi", ] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "rend" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ - "winapi", + "bytecheck", ] [[package]] -name = "rend" -version = "0.3.6" +name = "replace_with" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a8614ee435691de62bcffcf4a66d91b3594bf1428a5722e79103249a095690" + +[[package]] +name = "reqwest" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "bytecheck", + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-rustls", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted", + "web-sys", + "winapi", ] [[package]] name = "rkyv" -version = "0.7.39" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" dependencies = [ + "bitvec", "bytecheck", "hashbrown 0.12.3", + "indexmap 1.9.3", "ptr_meta", "rend", "rkyv_derive", "seahash", + "tinyvec", + "uuid", ] [[package]] name = "rkyv_derive" -version = "0.7.39" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2558,7 +3102,7 @@ dependencies = [ "anyhow", "libc", "nix", - "oci-spec 0.6.0", + "oci-spec", ] [[package]] @@ -2575,9 +3119,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -2600,150 +3144,318 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.13", + "semver 1.0.17", ] [[package]] name = "rustix" -version = "0.36.7" +version = "0.36.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fdebc4b395b7fbb9ab11e462e20ed9051e7b16e42d24042c776eca0ac81b03" +checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", - "io-lifetimes", - "itoa", + "io-lifetimes 1.0.11", "libc", - "linux-raw-sys", - "once_cell", - "windows-sys 0.42.0", + "linux-raw-sys 0.1.4", + "windows-sys 0.45.0", ] [[package]] -name = "rustversion" -version = "1.0.11" +name = "rustix" +version = "0.37.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes 1.0.11", + "itoa", + "libc", + "linux-raw-sys 0.3.8", + "once_cell", + "windows-sys 0.48.0", +] [[package]] -name = "ryu" -version = "1.0.12" +name = "rustix" +version = "0.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "fbc6396159432b5c8490d4e301d8c705f61860b8b6c863bf79942ce5401968f3" +dependencies = [ + "bitflags 2.3.3", + "errno", + "libc", + "linux-raw-sys 0.4.3", + "windows-sys 0.48.0", +] [[package]] -name = "scopeguard" -version = "1.1.0" +name = "rustls" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] [[package]] -name = "scratch" +name = "rustls-pemfile" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" - -[[package]] -name = "seahash" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +dependencies = [ + "base64", +] [[package]] -name = "semver" -version = "0.9.0" +name = "rustls-webpki" +version = "0.101.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" dependencies = [ - "semver-parser", + "ring", + "untrusted", ] [[package]] -name = "semver" -version = "1.0.13" +name = "rustversion" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f6841e709003d68bb2deee8c343572bf446003ec20a583e76f7b15cebf3711" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] -name = "semver-parser" -version = "0.7.0" +name = "ryu" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] -name = "serde" -version = "1.0.152" +name = "safe-path" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "980abdd3220aa19b67ca3ea07b173ca36383f18ae48cde696d90c8af39447ffb" dependencies = [ - "serde_derive", + "libc", ] [[package]] -name = "serde_bytes" -version = "0.11.8" +name = "same-file" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718dc5fff5b36f99093fc49b280cfc96ce6fc824317783bff5a1fed0c7a64819" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "serde", + "winapi-util", ] [[package]] -name = "serde_derive" -version = "1.0.152" +name = "schannel" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "proc-macro2", - "quote", - "syn", + "windows-sys 0.48.0", ] [[package]] -name = "serde_json" -version = "1.0.91" +name = "scoped-tls" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" -dependencies = [ - "itoa", - "ryu", - "serde", -] +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] -name = "serde_repr" -version = "0.1.10" +name = "scopeguard" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "serial_test" -version = "1.0.0" +name = "sct" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538c30747ae860d6fb88330addbbd3e0ddbe46d662d032855596d8a8ca260611" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "dashmap", - "futures", - "lazy_static", - "log", - "parking_lot", - "serial_test_derive", + "ring", + "untrusted", +] + +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +dependencies = [ + "serde", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63ba2516aa6bf82e0b19ca8b50019d52df58455d3cf9bdaf6315225fdd0c560a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-wasm-bindgen" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b4c031cd0d9014307d82b8abf653c0290fbdaeb4c02d00c63cf52f728628bf" +dependencies = [ + "js-sys", + "serde", + "wasm-bindgen", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "401797fe7833d72109fedec6bfcbe67c0eed9b99772f26eb8afd261f0abc6fd3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.26", +] + +[[package]] +name = "serde_json" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +dependencies = [ + "indexmap 1.9.3", + "ryu", + "serde", + "yaml-rust", +] + +[[package]] +name = "serde_yaml" +version = "0.9.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +dependencies = [ + "indexmap 2.0.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "serial_test" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive", ] [[package]] name = "serial_test_derive" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "079a83df15f85d89a68d64ae1238f142f172b1fa915d0d76b26a7cba1b659a69" +checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.26", ] [[package]] @@ -2763,15 +3475,34 @@ checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shared-buffer" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cf61602ee61e2f83dd016b3e6387245291cf728ea071c378b35088125b4d995" +dependencies = [ + "bytes", + "memmap2 0.6.2", +] + [[package]] name = "shellexpand" version = "2.1.2" @@ -2787,26 +3518,72 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] [[package]] name = "slice-group-by" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" +checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" + +[[package]] +name = "socket2" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "sptr" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "stable_deref_trait" @@ -2823,6 +3600,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "stdweb" version = "0.4.20" @@ -2847,7 +3630,7 @@ dependencies = [ "quote", "serde", "serde_derive", - "syn", + "syn 1.0.109", ] [[package]] @@ -2863,7 +3646,7 @@ dependencies = [ "serde_derive", "serde_json", "sha1", - "syn", + "syn 1.0.109", ] [[package]] @@ -2880,9 +3663,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -2890,44 +3673,30 @@ dependencies = [ ] [[package]] -name = "syscalls" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535e4a480d47370482f8251117cba053e32067862c439dcd4c9ea4026d08f88e" -dependencies = [ - "cc", - "serde", - "serde_repr", -] - -[[package]] -name = "sysinfo" -version = "0.27.7" +name = "syn" +version = "2.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975fe381e0ecba475d4acff52466906d95b153a40324956552e027b2a9eaa89e" +checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970" dependencies = [ - "cfg-if 1.0.0", - "core-foundation-sys", - "libc", - "ntapi", - "once_cell", - "winapi", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] name = "system-interface" -version = "0.25.3" +version = "0.25.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afe2d3c354cfbfdc14611db99b272f59f80289a2abe30c8b4355ee619bc22ef" +checksum = "10081a99cbecbc363d381b9503563785f0b02735fccbb0d4c1a2cb3d39f7e7fe" dependencies = [ - "bitflags", + "bitflags 2.3.3", "cap-fs-ext", "cap-std", "fd-lock", - "io-lifetimes", - "rustix", - "windows-sys 0.42.0", - "winx", + "io-lifetimes 2.0.1", + "rustix 0.38.1", + "windows-sys 0.48.0", + "winx 0.36.1", ] [[package]] @@ -2939,11 +3708,17 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tar" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6" +checksum = "ec96d2ffad078296368d46ff1cb309be1c23c513b4ab0e22a45de0185275ac96" dependencies = [ "filetime", "libc", @@ -2952,21 +3727,30 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.5" +version = "0.12.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" +checksum = "1d2faeef5759ab89935255b1a4cd98e0baf99d1085e37d36599c625dac49ae8e" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" dependencies = [ "cfg-if 1.0.0", "fastrand", + "redox_syscall 0.3.5", + "rustix 0.38.1", + "windows-sys 0.48.0", +] + +[[package]] +name = "term_size" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e4129646ca0ed8f45d09b929036bafad5377103edd06e50bf574b353d2b08d9" +dependencies = [ "libc", - "redox_syscall", - "remove_dir_all", "winapi", ] @@ -2979,11 +3763,20 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termios" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "411c5bf740737c7918b8b1fe232dca4dc9f8e754b8ad5e20966814001ed0ac6b" +dependencies = [ + "libc", +] + [[package]] name = "termtree" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_framework" @@ -2995,33 +3788,32 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.26", ] [[package]] -name = "time" -version = "0.1.45" +name = "thread_local" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", + "cfg-if 1.0.0", + "once_cell", ] [[package]] @@ -3034,22 +3826,31 @@ dependencies = [ "libc", "standback", "stdweb", - "time-macros", + "time-macros 0.1.1", "version_check", "winapi", ] [[package]] name = "time" -version = "0.3.14" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3f9a28b618c3a6b9251b6908e9c99e04b9e5c02e6581ccbb67d59c34ef7f9b" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", "libc", "num_threads", + "serde", + "time-core", + "time-macros 0.2.9", ] +[[package]] +name = "time-core" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" + [[package]] name = "time-macros" version = "0.1.1" @@ -3060,6 +3861,15 @@ dependencies = [ "time-macros-impl", ] +[[package]] +name = "time-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +dependencies = [ + "time-core", +] + [[package]] name = "time-macros-impl" version = "0.1.2" @@ -3070,7 +3880,7 @@ dependencies = [ "proc-macro2", "quote", "standback", - "syn", + "syn 1.0.109", ] [[package]] @@ -3084,69 +3894,268 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "toml" -version = "0.5.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] -name = "tracing" -version = "0.1.37" +name = "tokio" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ - "cfg-if 1.0.0", - "log", + "autocfg", + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", "pin-project-lite", - "tracing-attributes", - "tracing-core", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", ] [[package]] -name = "tracing-attributes" -version = "0.1.23" +name = "tokio-macros" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.26", ] [[package]] -name = "tracing-core" -version = "0.1.30" +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ + "serde", +] + +[[package]] +name = "toml" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if 1.0.0", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.26", +] + +[[package]] +name = "tracing-core" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-journald" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" +dependencies = [ + "libc", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "tracing-log" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +dependencies = [ + "matchers", + "nu-ansi-term", "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] +[[package]] +name = "try-lock" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" + [[package]] name = "typenum" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +[[package]] +name = "uapi" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "019450240401d342e2a5bc47f7fbaeb002a38fe18197b83788750d7ffb143274" +dependencies = [ + "cc", + "cfg-if 0.1.10", + "libc", + "uapi-proc", +] + +[[package]] +name = "uapi-proc" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54de46f980cea7b2ae8d8f7f9f1c35cf7062c68343e99345ef73758f8e60975a" +dependencies = [ + "lazy_static", + "libc", + "proc-macro2", + "quote", + "regex", + "syn 1.0.109", +] + +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -3157,28 +4166,71 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + [[package]] name = "unicode-width" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" -version = "1.3.0" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" + +[[package]] +name = "valuable" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" @@ -3188,20 +4240,13 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" -version = "7.5.0" +version = "8.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571b69f690c855821462709b6f41d42ceccc316fbd17b60bd06d06928cfe6a99" +checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" dependencies = [ "anyhow", - "cfg-if 1.0.0", - "enum-iterator 1.2.0", - "getset", - "git2", - "rustc_version 0.4.0", "rustversion", - "sysinfo", - "thiserror", - "time 0.3.14", + "time 0.3.22", ] [[package]] @@ -3211,10 +4256,173 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +name = "virtual-fs" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcd74701f37aea30b90a83c90b92bc3850dedb9448836dbcc0960f993bda423b" +dependencies = [ + "anyhow", + "async-trait", + "bytes", + "derivative", + "filetime", + "fs_extra", + "futures", + "getrandom", + "indexmap 1.9.3", + "lazy_static", + "libc", + "pin-project-lite", + "replace_with", + "slab", + "thiserror", + "tokio", + "tracing", + "webc", +] + +[[package]] +name = "virtual-net" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfac1d64ecfe2d8b295530da2a14af9eb9acccd91d76f3347dee96d745c83661" +dependencies = [ + "async-trait", + "bytes", + "libc", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "wai-bindgen-gen-core" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aa3dc41b510811122b3088197234c27e08fcad63ef936306dd8e11e2803876c" +dependencies = [ + "anyhow", + "wai-parser", +] + +[[package]] +name = "wai-bindgen-gen-rust" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19bc05e8380515c4337c40ef03b2ff233e391315b178a320de8640703d522efe" +dependencies = [ + "heck 0.3.3", + "wai-bindgen-gen-core", +] + +[[package]] +name = "wai-bindgen-gen-rust-wasm" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f35ce5e74086fac87f3a7bd50f643f00fe3559adb75c88521ecaa01c8a6199" +dependencies = [ + "heck 0.3.3", + "wai-bindgen-gen-core", + "wai-bindgen-gen-rust", +] + +[[package]] +name = "wai-bindgen-gen-wasmer" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f61484185d8c520a86d5a7f7f8265f446617c2f9774b2e20a52de19b6e53432" +dependencies = [ + "heck 0.3.3", + "wai-bindgen-gen-core", + "wai-bindgen-gen-rust", +] + +[[package]] +name = "wai-bindgen-rust" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e5601c6f448c063e83a5e931b8fefcdf7e01ada424ad42372c948d2e3d67741" +dependencies = [ + "bitflags 1.3.2", + "wai-bindgen-rust-impl", +] + +[[package]] +name = "wai-bindgen-rust-impl" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdeeb5c1170246de8425a3e123e7ef260dc05ba2b522a1d369fe2315376efea4" +dependencies = [ + "proc-macro2", + "syn 1.0.109", + "wai-bindgen-gen-core", + "wai-bindgen-gen-rust-wasm", +] + +[[package]] +name = "wai-bindgen-wasmer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffd9a8124a3e4e664cb79864fd1eaf24521e15bf8d67509af1bc45e8b510475" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "once_cell", + "thiserror", + "tracing", + "wai-bindgen-wasmer-impl", + "wasmer", +] + +[[package]] +name = "wai-bindgen-wasmer-impl" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "4b3488ed88d4dd0e3bf85bad4e27dac6cb31aae5d122a5dda2424803c8dc863a" +dependencies = [ + "proc-macro2", + "syn 1.0.109", + "wai-bindgen-gen-core", + "wai-bindgen-gen-wasmer", +] + +[[package]] +name = "wai-parser" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd0acb6d70885ea0c343749019ba74f015f64a9d30542e66db69b49b7e28186" +dependencies = [ + "anyhow", + "id-arena", + "pulldown-cmark", + "unicode-normalization", + "unicode-xid", +] + +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + +[[package]] +name = "walkdir" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] [[package]] name = "wasi" @@ -3224,9 +4432,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi-cap-std-sync" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79eba5cf83a4adb2ccba4c029858229a4992dd88cc35dbfa5a555ec7fc2a8416" +checksum = "291862f1014dd7e674f93b263d57399de4dd1907ea37e74cf7d36454536ba2f0" dependencies = [ "anyhow", "async-trait", @@ -3236,33 +4444,34 @@ dependencies = [ "cap-time-ext", "fs-set-times", "io-extras", - "io-lifetimes", + "io-lifetimes 1.0.11", "is-terminal", "once_cell", - "rustix", + "rustix 0.37.19", "system-interface", "tracing", "wasi-common", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "wasi-common" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678ff55fb89ae721dae166003b843f53ee3e7bdb96aa96715fec8d44d012b105" +checksum = "3b422ae2403cae9ca603864272a402cf5001dd6fef8632e090e00c4fb475741b" dependencies = [ "anyhow", - "bitflags", + "bitflags 1.3.2", "cap-rand", "cap-std", "io-extras", - "rustix", + "log", + "rustix 0.37.19", "thiserror", "tracing", "wasmtime", "wiggle", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -3286,10 +4495,45 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-downcast" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dac026d43bcca6e7ce1c0956ba68f59edf6403e8e930a5d891be72c31a44340" +dependencies = [ + "js-sys", + "once_cell", + "wasm-bindgen", + "wasm-bindgen-downcast-macros", +] + +[[package]] +name = "wasm-bindgen-downcast-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5020cfa87c7cecefef118055d44e3c1fc122c7ec25701d528ee458a0b45f38f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.84" @@ -3308,7 +4552,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3321,9 +4565,18 @@ checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "wasm-encoder" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef126be0e14bdf355ac1a8b41afc89195289e5c7179f80118e3abddb472f0810" +checksum = "18c41dbd92eaebf3612a39be316540b8377c871cb9bde6b064af962984912881" +dependencies = [ + "leb128", +] + +[[package]] +name = "wasm-encoder" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41763f20eafed1399fff1afb466496d3a959f58241436cfdc17e3f5ca954de16" dependencies = [ "leb128", ] @@ -3334,20 +4587,20 @@ version = "0.1.0" [[package]] name = "wasmedge-macro" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f159a9a7d3d2301de2fc9cb88ad3459af9e95cbd5a0f57437efccc2b572a027" +checksum = "372985c17ffd2705e0cc14aa85e96c546d04823aec07ec1e0da0a2983c9eb655" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.26", ] [[package]] name = "wasmedge-sdk" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6b7678a024c25fbe7168dedbca6958b6baf5412a998c697bf40dbf359efc54e" +checksum = "e30b3d1a423ee62c0016bbfd07bdca4669a30cd167d7ff528caff559b467c17b" dependencies = [ "anyhow", "num-derive", @@ -3361,17 +4614,15 @@ dependencies = [ [[package]] name = "wasmedge-sys" -version = "0.12.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00ab1f78c1d3de17fd0deff054de96e337cf8cf02d13651748bdfa1f1b219369" +checksum = "3db99e2ef8cf618f832f1ecf32c3b86e8ce0fb2f31013426040ae514a981312c" dependencies = [ "bindgen", "cmake", - "lazy_static", "libc", - "parking_lot", "paste", - "rand", + "scoped-tls", "thiserror", "wasmedge-macro", "wasmedge-types", @@ -3380,9 +4631,9 @@ dependencies = [ [[package]] name = "wasmedge-types" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3323fe75a4e65476b33a041092aac22a1065a7ba0bfecbedc8135e8efb6c522d" +checksum = "3b447f1a187fc86d866f388d7f29382b61896ee33627b727d678a994a0cbbd9e" dependencies = [ "thiserror", "wat", @@ -3390,73 +4641,65 @@ dependencies = [ [[package]] name = "wasmer" -version = "2.3.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea8d8361c9d006ea3d7797de7bd6b1492ffd0f91a22430cfda6c1658ad57bedf" +checksum = "ea790bcdfb4e6e9d1e5ddf75b4699aac62b078fcc9f27f44e1748165ceea67bf" dependencies = [ + "bytes", "cfg-if 1.0.0", - "indexmap", + "derivative", + "indexmap 1.9.3", "js-sys", - "loupe", "more-asserts", + "rustc-demangle", + "serde", + "serde-wasm-bindgen", "target-lexicon", "thiserror", "wasm-bindgen", - "wasmer-artifact", + "wasm-bindgen-downcast", "wasmer-compiler", "wasmer-compiler-cranelift", "wasmer-derive", - "wasmer-engine", - "wasmer-engine-dylib", - "wasmer-engine-universal", "wasmer-types", "wasmer-vm", "wat", "winapi", ] -[[package]] -name = "wasmer-artifact" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aaf9428c29c1d8ad2ac0e45889ba8a568a835e33fd058964e5e500f2f7ce325" -dependencies = [ - "enumset", - "loupe", - "thiserror", - "wasmer-compiler", - "wasmer-types", -] - [[package]] name = "wasmer-compiler" -version = "2.3.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67a6cd866aed456656db2cfea96c18baabbd33f676578482b85c51e1ee19d2c" +checksum = "f093937725e242e5529fed27e08ff836c011a9ecc22e6819fb818c2ac6ff5f88" dependencies = [ + "backtrace", + "cfg-if 1.0.0", + "enum-iterator", "enumset", - "loupe", - "rkyv", - "serde", - "serde_bytes", + "lazy_static", + "leb128", + "memmap2 0.5.10", + "more-asserts", + "region", "smallvec", - "target-lexicon", "thiserror", "wasmer-types", - "wasmparser 0.83.0", + "wasmer-vm", + "wasmparser 0.95.0", + "winapi", ] [[package]] name = "wasmer-compiler-cranelift" -version = "2.3.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48be2f9f6495f08649e4f8b946a2cbbe119faf5a654aa1457f9504a99d23dae0" +checksum = "3b27b1670d27158789ebe14e4da3902c72132174884a1c6a3533ce4fd9dd83db" dependencies = [ - "cranelift-codegen 0.82.3", - "cranelift-entity 0.82.3", - "cranelift-frontend 0.82.3", + "cranelift-codegen 0.91.1", + "cranelift-entity 0.91.1", + "cranelift-frontend 0.91.1", "gimli 0.26.2", - "loupe", "more-asserts", "rayon", "smallvec", @@ -3468,259 +4711,248 @@ dependencies = [ [[package]] name = "wasmer-derive" -version = "2.3.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00e50405cc2a2f74ff574584710a5f2c1d5c93744acce2ca0866084739284b51" +checksum = "13ae8286cba2acb10065a4dac129c7c7f7bcd24acd6538555d96616eea16bc27" dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] -name = "wasmer-engine" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f98f010978c244db431b392aeab0661df7ea0822343334f8f2a920763548e45" -dependencies = [ - "backtrace", - "enumset", - "lazy_static", - "loupe", - "memmap2", - "more-asserts", - "rustc-demangle", - "serde", - "serde_bytes", - "target-lexicon", - "thiserror", - "wasmer-artifact", - "wasmer-compiler", - "wasmer-types", - "wasmer-vm", -] - -[[package]] -name = "wasmer-engine-dylib" -version = "2.3.0" +name = "wasmer-toml" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0358af9c154724587731175553805648d9acb8f6657880d165e378672b7e53" +checksum = "4232db0aff83ed6208d541ddcf1bf72730673528be8c4fe13c6369060f6e05a7" dependencies = [ - "cfg-if 1.0.0", - "enum-iterator 0.7.0", - "enumset", - "leb128", - "libloading", - "loupe", - "object 0.28.4", - "rkyv", + "anyhow", + "indexmap 1.9.3", + "semver 1.0.17", "serde", - "tempfile", - "tracing", - "wasmer-artifact", - "wasmer-compiler", - "wasmer-engine", - "wasmer-object", - "wasmer-types", - "wasmer-vm", - "which", -] - -[[package]] -name = "wasmer-engine-universal" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "440dc3d93c9ca47865a4f4edd037ea81bf983b5796b59b3d712d844b32dbef15" -dependencies = [ - "cfg-if 1.0.0", - "enumset", - "leb128", - "loupe", - "region", - "rkyv", - "wasmer-compiler", - "wasmer-engine", - "wasmer-engine-universal-artifact", - "wasmer-types", - "wasmer-vm", - "winapi", -] - -[[package]] -name = "wasmer-engine-universal-artifact" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f1db3f54152657eb6e86c44b66525ff7801dad8328fe677da48dd06af9ad41" -dependencies = [ - "enum-iterator 0.7.0", - "enumset", - "loupe", - "rkyv", - "thiserror", - "wasmer-artifact", - "wasmer-compiler", - "wasmer-types", -] - -[[package]] -name = "wasmer-object" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d831335ff3a44ecf451303f6f891175c642488036b92ceceb24ac8623a8fa8b" -dependencies = [ - "object 0.28.4", + "serde_cbor", + "serde_json", + "serde_yaml 0.9.25", "thiserror", - "wasmer-compiler", - "wasmer-types", + "toml 0.5.11", ] [[package]] name = "wasmer-types" -version = "2.3.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39df01ea05dc0a9bab67e054c7cb01521e53b35a7bb90bd02eca564ed0b2667f" +checksum = "918d2f0bb5eaa95a80c06be33f21dee92f40f12cd0982da34490d121a99d244b" dependencies = [ - "backtrace", - "enum-iterator 0.7.0", - "indexmap", - "loupe", + "bytecheck", + "enum-iterator", + "enumset", + "indexmap 1.9.3", "more-asserts", "rkyv", "serde", + "target-lexicon", "thiserror", ] -[[package]] -name = "wasmer-vfs" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9302eae3edc53cb540c2d681e7f16d8274918c1ce207591f04fed351649e97c0" -dependencies = [ - "libc", - "thiserror", - "tracing", -] - [[package]] name = "wasmer-vm" -version = "2.3.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d965fa61f4dc4cdb35a54daaf7ecec3563fbb94154a6c35433f879466247dd" +checksum = "a1e000c2cbd4f9805427af5f3b3446574caf89ab3a1e66c2f3579fbde22b072b" dependencies = [ "backtrace", "cc", "cfg-if 1.0.0", "corosensei", - "enum-iterator 0.7.0", - "indexmap", + "dashmap", + "derivative", + "enum-iterator", + "fnv", + "indexmap 1.9.3", "lazy_static", "libc", - "loupe", "mach", - "memoffset 0.6.5", + "memoffset 0.8.0", "more-asserts", "region", - "rkyv", "scopeguard", + "thiserror", + "wasmer-types", + "winapi", +] + +[[package]] +name = "wasmer-wasix" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcd089dcd440141b2edf300ddd61c2d67d052baac8d29256c901f607d44d459" +dependencies = [ + "anyhow", + "async-trait", + "bincode", + "bytes", + "cfg-if 1.0.0", + "cooked-waker", + "dashmap", + "derivative", + "futures", + "getrandom", + "heapless", + "hex", + "http", + "lazy_static", + "libc", + "linked_hash_set", + "once_cell", + "petgraph", + "pin-project", + "rand", + "reqwest", + "semver 1.0.17", "serde", + "serde_cbor", + "serde_derive", + "serde_json", + "serde_yaml 0.8.26", + "sha2", + "shellexpand", + "tempfile", + "term_size", + "termios", "thiserror", - "wasmer-artifact", + "tokio", + "tracing", + "url", + "urlencoding", + "virtual-fs", + "virtual-net", + "wai-bindgen-wasmer", + "waker-fn", + "wasm-bindgen", + "wasmer", "wasmer-types", + "wasmer-wasix-types", + "webc", + "weezl", "winapi", ] [[package]] -name = "wasmer-wasi" -version = "2.3.0" +name = "wasmer-wasix-types" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadbe31e3c1b6f3e398ad172b169152ae1a743ae6efd5f9ffb34019983319d99" +checksum = "7a4a519e8f0b878bb4cd2b1bc733235aa6c331b7b4857dd6e0ac3c9a36d942ae" dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", "cfg-if 1.0.0", - "generational-arena", - "getrandom", - "libc", - "thiserror", - "tracing", - "wasm-bindgen", + "num_enum", + "serde", + "time 0.2.27", + "wai-bindgen-gen-core", + "wai-bindgen-gen-rust", + "wai-bindgen-gen-rust-wasm", + "wai-bindgen-rust", + "wai-parser", "wasmer", - "wasmer-vfs", - "wasmer-wasi-types", - "winapi", + "wasmer-derive", + "wasmer-types", ] [[package]] -name = "wasmer-wasi-types" -version = "2.3.0" +name = "wasmparser" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22dc83aadbdf97388de3211cb6f105374f245a3cf2a5c65a16776e7a087a8468" +checksum = "f2ea896273ea99b15132414be1da01ab0d8836415083298ecaffbe308eaac87a" dependencies = [ - "byteorder", - "time 0.2.27", - "wasmer-types", + "indexmap 1.9.3", + "url", ] [[package]] name = "wasmparser" -version = "0.83.0" +version = "0.107.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" +checksum = "29e3ac9b780c7dda0cac7a52a5d6d2d6707cc6e3451c9db209b6c758f40d7acb" +dependencies = [ + "indexmap 1.9.3", + "semver 1.0.17", +] [[package]] name = "wasmparser" -version = "0.95.0" +version = "0.110.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2ea896273ea99b15132414be1da01ab0d8836415083298ecaffbe308eaac87a" +checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ - "indexmap", - "url", + "indexmap 2.0.0", + "semver 1.0.17", +] + +[[package]] +name = "wasmprinter" +version = "0.2.62" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42cd12ed4d96a984e4b598a17457f1126d01640cc7461afbb319642111ff9e7f" +dependencies = [ + "anyhow", + "wasmparser 0.110.0", ] [[package]] name = "wasmtime" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4abddf11816dd8f5e7310f6ebe5a2503b43f20ab2bf050b7d63f5b1bb96a81d9" +checksum = "cd02b992d828b91efaf2a7499b21205fe4ab3002e401e3fe0f227aaeb4001d93" dependencies = [ "anyhow", "async-trait", "bincode", + "bumpalo", "cfg-if 1.0.0", - "indexmap", + "encoding_rs", + "fxprof-processed-profile", + "indexmap 1.9.3", "libc", "log", - "object 0.29.0", + "object 0.30.4", "once_cell", "paste", "psm", "rayon", "serde", + "serde_json", "target-lexicon", - "wasmparser 0.95.0", + "wasmparser 0.107.0", "wasmtime-cache", + "wasmtime-component-macro", + "wasmtime-component-util", "wasmtime-cranelift", "wasmtime-environ", "wasmtime-fiber", "wasmtime-jit", "wasmtime-runtime", + "wasmtime-winch", "wat", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-asm-macros" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f5206486f0467ba86e84d35996c4048b077cec2c9e5b322e7b853bdbe79334" +checksum = "284466ef356ce2d909bc0ad470b60c4d0df5df2de9084457e118131b3c779b92" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "wasmtime-cache" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1e77abcf538af42517e188c109e4b50ecf6c0ee4d77ede76a438e0306b934dc" +checksum = "efc78cfe1a758d1336f447a47af6ec05e0df2c03c93440d70faf80e17fbb001e" dependencies = [ "anyhow", "base64", @@ -3728,162 +4960,249 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix", + "rustix 0.37.19", "serde", "sha2", - "toml", - "windows-sys 0.42.0", + "toml 0.5.11", + "windows-sys 0.48.0", "zstd", ] +[[package]] +name = "wasmtime-component-macro" +version = "10.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e916103436a6d84faa4c2083e2e98612a323c2cc6147ec419124f67c764c9c" +dependencies = [ + "anyhow", + "proc-macro2", + "quote", + "syn 1.0.109", + "wasmtime-component-util", + "wasmtime-wit-bindgen", + "wit-parser", +] + +[[package]] +name = "wasmtime-component-util" +version = "10.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f20a5135ec5ef01080e674979b02d6fa5eebaa2b0c2d6660513ee9956a1bf624" + [[package]] name = "wasmtime-cranelift" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5bcb1d5ef211726b11e1286fe96cb40c69044c3632e1d6c67805d88a2e1a34" +checksum = "8e1aa99cbf3f8edb5ad8408ba380f5ab481528ecd8a5053acf758e006d6727fd" dependencies = [ "anyhow", - "cranelift-codegen 0.91.0", - "cranelift-entity 0.91.0", - "cranelift-frontend 0.91.0", + "cranelift-codegen 0.97.1", + "cranelift-control", + "cranelift-entity 0.97.1", + "cranelift-frontend 0.97.1", "cranelift-native", "cranelift-wasm", - "gimli 0.26.2", + "gimli 0.27.2", "log", - "object 0.29.0", + "object 0.30.4", "target-lexicon", "thiserror", - "wasmparser 0.95.0", + "wasmparser 0.107.0", + "wasmtime-cranelift-shared", + "wasmtime-environ", +] + +[[package]] +name = "wasmtime-cranelift-shared" +version = "10.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cce31fd55978601acc103acbb8a26f81c89a6eae12d3a1c59f34151dfa609484" +dependencies = [ + "anyhow", + "cranelift-codegen 0.97.1", + "cranelift-control", + "cranelift-native", + "gimli 0.27.2", + "object 0.30.4", + "target-lexicon", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcab3fac5a2ff68ce9857166a7d7c0e5251b554839b9dda7ed3b5528e191936e" +checksum = "41f9e58e0ee7d43ff13e75375c726b16bce022db798d3a099a65eeaa7d7a544b" dependencies = [ "anyhow", - "cranelift-entity 0.91.0", - "gimli 0.26.2", - "indexmap", + "cranelift-entity 0.97.1", + "gimli 0.27.2", + "indexmap 1.9.3", "log", - "object 0.29.0", + "object 0.30.4", "serde", "target-lexicon", "thiserror", - "wasmparser 0.95.0", + "wasm-encoder 0.29.0", + "wasmparser 0.107.0", + "wasmprinter", + "wasmtime-component-util", "wasmtime-types", ] [[package]] name = "wasmtime-fiber" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fb38af221b780f2c03764d763fe7f7bc414ea9db744d66dac98f9b694892561" +checksum = "14309cbdf2c395258b124a24757c727403070c0465a28bcc780c4f82f4bca5ff" dependencies = [ "cc", "cfg-if 1.0.0", - "rustix", + "rustix 0.37.19", "wasmtime-asm-macros", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-jit" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7d866e2a84ee164739b7ed7bd7cc9e1f918639d2ec5e2817a31e24c148cab20" +checksum = "5f0f2eaeb01bb67266416507829bd8e0bb60278444e4cbd048e280833ebeaa02" dependencies = [ - "addr2line 0.17.0", + "addr2line 0.19.0", "anyhow", "bincode", "cfg-if 1.0.0", "cpp_demangle", - "gimli 0.26.2", + "gimli 0.27.2", "ittapi", "log", - "object 0.29.0", + "object 0.30.4", "rustc-demangle", + "rustix 0.37.19", "serde", "target-lexicon", "wasmtime-environ", "wasmtime-jit-debug", "wasmtime-jit-icache-coherence", "wasmtime-runtime", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-jit-debug" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0104c2b1ce443f2a2806216fcdf6dce09303203ec5797a698d313063b31e5bc8" +checksum = "f42e59d62542bfb73ce30672db7eaf4084a60b434b688ac4f05b287d497de082" dependencies = [ - "object 0.29.0", + "object 0.30.4", "once_cell", - "rustix", + "rustix 0.37.19", ] [[package]] name = "wasmtime-jit-icache-coherence" -version = "3.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22d9c2e92b0fc124d2cad6cb497a4c840580a7dd2414a37109e8c7cfe699c0ea" +checksum = "2b49ceb7e2105a8ebe5614d7bbab6f6ef137a284e371633af60b34925493081f" dependencies = [ "cfg-if 1.0.0", "libc", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-runtime" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1f0f99297a94cb20c511d1d4e864d9b54794644016d2530dc797cacfa7224a" +checksum = "3a5de4762421b0b2b19e02111ca403632852b53e506e03b4b227ffb0fbfa63c2" dependencies = [ "anyhow", "cc", "cfg-if 1.0.0", - "indexmap", + "encoding_rs", + "indexmap 1.9.3", "libc", "log", "mach", "memfd", - "memoffset 0.6.5", + "memoffset 0.8.0", "paste", "rand", - "rustix", + "rustix 0.37.19", + "sptr", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-fiber", "wasmtime-jit-debug", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-types" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f3d8ee409447cae51651fd812437a0047ed8d7f44e94171ee05ce7cb955c96" +checksum = "dcbb7c138f797192f46afdd3ec16f85ef007c3bb45fa8e5174031f17b0be4c4a" dependencies = [ - "cranelift-entity 0.91.0", + "cranelift-entity 0.97.1", "serde", "thiserror", - "wasmparser 0.95.0", + "wasmparser 0.107.0", ] [[package]] name = "wasmtime-wasi" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f32b06e3282ccbeab6fb96c64fa12a359f1253022dfd5cf99385b2344e70830" +checksum = "01686e859249d4dffe3d7ce9957ae35bcf4161709dfafd165ee136bd54d179f1" dependencies = [ "anyhow", + "async-trait", + "bitflags 1.3.2", + "cap-fs-ext", + "cap-rand", + "cap-std", + "cap-time-ext", + "fs-set-times", + "io-extras", + "libc", + "rustix 0.37.19", + "system-interface", + "thiserror", + "tracing", "wasi-cap-std-sync", "wasi-common", "wasmtime", "wiggle", + "windows-sys 0.48.0", +] + +[[package]] +name = "wasmtime-winch" +version = "10.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60160d8f7d2b301790730dac8ff25156c61d4fed79481e7074c21dd1283cfe2f" +dependencies = [ + "anyhow", + "cranelift-codegen 0.97.1", + "gimli 0.27.2", + "object 0.30.4", + "target-lexicon", + "wasmparser 0.107.0", + "wasmtime-cranelift-shared", + "wasmtime-environ", + "winch-codegen", +] + +[[package]] +name = "wasmtime-wit-bindgen" +version = "10.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3334b0466a4d340de345cda83474d1d2c429770c3d667877971407672bc618a" +dependencies = [ + "anyhow", + "heck 0.4.1", + "wit-parser", ] [[package]] @@ -3897,25 +5216,91 @@ dependencies = [ [[package]] name = "wast" -version = "52.0.2" +version = "62.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707a9fd59b0144c530f0a31f21737036ffea6ece492918cae0843dd09b6f9bc9" +checksum = "b8ae06f09dbe377b889fbd620ff8fa21e1d49d1d9d364983c0cdbf9870cb9f1f" dependencies = [ "leb128", "memchr", "unicode-width", - "wasm-encoder", + "wasm-encoder 0.31.1", ] [[package]] name = "wat" -version = "1.0.56" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "842e15861d203fb4a96d314b0751cdeaf0f6f8b35e8d81d2953af2af5e44e637" +dependencies = [ + "wast 62.0.1", +] + +[[package]] +name = "web-sys" +version = "0.3.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webc" +version = "5.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d8b985cecc5a364f746c7fcd6e5396986360a58550072f2f9147a07532f525c" +dependencies = [ + "anyhow", + "base64", + "byteorder", + "bytes", + "flate2", + "indexmap 1.9.3", + "leb128", + "lexical-sort", + "once_cell", + "path-clean", + "rand", + "serde", + "serde_cbor", + "serde_json", + "sha2", + "shared-buffer", + "tar", + "tempfile", + "thiserror", + "toml 0.7.6", + "url", + "walkdir", + "wasmer-toml", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d73cbaa81acc2f8a3303e2289205c971d99c89245c2f56ab8765c4daabc2be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "wast 52.0.2", + "webpki", ] +[[package]] +name = "weezl" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" + [[package]] name = "which" version = "4.4.0" @@ -3929,13 +5314,13 @@ dependencies = [ [[package]] name = "wiggle" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2433252352677648dc4ac0c99e7e254e1c58be8019cda3323ab3a3ce29da5b" +checksum = "ea93d31f59f2b2fa4196990b684771500072d385eaac12587c63db2bc185d705" dependencies = [ "anyhow", "async-trait", - "bitflags", + "bitflags 1.3.2", "thiserror", "tracing", "wasmtime", @@ -3944,28 +5329,28 @@ dependencies = [ [[package]] name = "wiggle-generate" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15bf89e66bd1a9463ee529d37b999947befafd792f345d4a82e0d2b28c0845f" +checksum = "7df96ee6bea595fabf0346c08c553f684b08e88fad6fdb125e6efde047024f7b" dependencies = [ "anyhow", - "heck", + "heck 0.4.1", "proc-macro2", "quote", "shellexpand", - "syn", + "syn 1.0.109", "witx", ] [[package]] name = "wiggle-macro" -version = "4.0.0" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919fb8f106375c7f6daf7b388a1fea3e2092dedb273b17b2d917522917c07a3c" +checksum = "8649011a011ecca6197c4db6ee630735062ba20595ea56ce58529b3b1c20aa2f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wiggle-generate", ] @@ -4000,6 +5385,31 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winch-codegen" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525fdd0d4e82d1bd3083bd87e8ca8014abfbdc5bf290d1d5371dac440d351e89" +dependencies = [ + "anyhow", + "cranelift-codegen 0.97.1", + "gimli 0.27.2", + "regalloc2 0.9.1", + "smallvec", + "target-lexicon", + "wasmparser 0.107.0", + "wasmtime-environ", +] + +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-sys" version = "0.33.0" @@ -4015,24 +5425,63 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.42.0" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" @@ -4042,9 +5491,15 @@ checksum = "cd761fd3eb9ab8cc1ed81e56e567f02dd82c4c837e48ac3b2181b9ffc5060807" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" @@ -4054,9 +5509,15 @@ checksum = "cab0cf703a96bab2dc0c02c0fa748491294bf9b7feb27e1f4f96340f208ada0e" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" @@ -4066,9 +5527,15 @@ checksum = "8cfdbe89cc9ad7ce618ba34abc34bbb6c36d99e96cae2245b7943cd75ee773d0" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" @@ -4078,15 +5545,27 @@ checksum = "b4dd9b0c0e9ece7bb22e84d70d01b71c6d6248b81a3c60d11869451b4cb24784" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" @@ -4096,19 +5575,69 @@ checksum = "ff1e4aa646495048ec7f3ffddc411e1d829c026a2ec62b39da15c1055e406eaa" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25b5872fa2e10bd067ae946f927e726d7d603eaeb6e02fa6a350e0722d2b8c11" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] [[package]] name = "winx" -version = "0.34.0" +version = "0.35.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c52a121f0fbf9320d5f2a9a5d82f6cb7557eda5e8b47fc3e7f359ec866ae960" +dependencies = [ + "bitflags 1.3.2", + "io-lifetimes 1.0.11", + "windows-sys 0.48.0", +] + +[[package]] +name = "winx" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4857cedf8371f690bb6782a3e2b065c54d1b6661be068aaf3eac8b45e813fdf8" +dependencies = [ + "bitflags 2.3.3", + "windows-sys 0.48.0", +] + +[[package]] +name = "wit-parser" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9baf690e238840de84bbfad6ad72d6628c41d34c1a5e276dab7fb2c9167ca1ac" +checksum = "6daec9f093dbaea0e94043eeb92ece327bbbe70c86b1f41aca9bbfefd7f050f0" dependencies = [ - "bitflags", - "io-lifetimes", - "windows-sys 0.42.0", + "anyhow", + "id-arena", + "indexmap 1.9.3", + "log", + "pulldown-cmark", + "semver 1.0.17", + "unicode-xid", + "url", ] [[package]] @@ -4123,6 +5652,15 @@ dependencies = [ "wast 35.0.2", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "xattr" version = "0.2.3" @@ -4132,9 +5670,18 @@ dependencies = [ "libc", ] +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "youki" -version = "0.0.4" +version = "0.1.0" dependencies = [ "anyhow", "caps", @@ -4144,17 +5691,25 @@ dependencies = [ "libcgroups", "libcontainer", "liboci-cli", - "log", "nix", - "oci-spec 0.5.8", "once_cell", "pentacle", "procfs", + "scopeguard", "serde", "serde_json", "serial_test", "tabwriter", + "tempfile", + "tracing", + "tracing-journald", + "tracing-subscriber", "vergen", + "wasmedge-sdk", + "wasmer", + "wasmer-wasix", + "wasmtime", + "wasmtime-wasi", ] [[package]] @@ -4178,9 +5733,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.6+zstd.1.5.2" +version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a3f9792c0c3dc6c165840a75f47ae1f4da402c2d006881129579f6597e801b" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 0fc295d8a..86d2aca24 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" members = ["tests/rust-integration-tests/*", "crates/*", "tools/*"] [profile.release] diff --git a/Makefile b/Makefile deleted file mode 100644 index 92668f2bd..000000000 --- a/Makefile +++ /dev/null @@ -1,53 +0,0 @@ -ROOT = $(shell git rev-parse --show-toplevel) - -# builds - -build: youki-release - -youki: youki-dev # helper - -youki-dev: - ./scripts/build.sh -o $(ROOT) -c youki - -youki-release: - ./scripts/build.sh -o $(ROOT) -r -c youki - -runtimetest: - ./scripts/build.sh -o $(ROOT) -r -c runtimetest - -rust-oci-tests-bin: - ./scripts/build.sh -o $(ROOT) -r -c integration-test - -all: youki-release rust-oci-tests-bin runtimetest - -# Tests -unittest: - cd ./crates && LD_LIBRARY_PATH=${HOME}/.wasmedge/lib cargo test --all --all-targets --all-features - -featuretest: - ./scripts/features_test.sh - -oci-tests: youki-release - ./scripts/oci_integration_tests.sh $(ROOT) - -rust-oci-tests: youki-release runtimetest rust-oci-tests-bin - ./scripts/rust_integration_tests.sh $(ROOT)/youki - -validate-rust-oci-runc: runtimetest rust-oci-tests-bin - ./scripts/rust_integration_tests.sh runc - -containerd-test: youki-dev - VAGRANT_VAGRANTFILE=Vagrantfile.containerd2youki vagrant up - VAGRANT_VAGRANTFILE=Vagrantfile.containerd2youki vagrant provision --provision-with test - -test-oci: oci-tests rust-oci-tests - -test-all: unittest featuretest oci-tests containerd-test # currently not doing rust-oci here - -# Misc - -lint: - cargo clippy --all-targets --all-features - -clean: - ./scripts/clean.sh $(ROOT) diff --git a/README.md b/README.md index 5a15faefa..1ac3a4a23 100644 --- a/README.md +++ b/README.md @@ -6,12 +6,16 @@ [![codecov](https://codecov.io/gh/containers/youki/branch/main/graph/badge.svg)](https://codecov.io/gh/containers/youki)

- +

youki is an implementation of the [OCI runtime-spec](https://github.com/opencontainers/runtime-spec) in Rust, similar to [runc](https://github.com/opencontainers/runc). Your ideas are welcome [here](https://github.com/containers/youki/issues/10). +# Quick Install + +[User Documentation](https://containers.github.io/youki/user/basic_setup.html#quick-install) + # About the name youki is pronounced as /joสŠki/ or yoh-key. @@ -32,10 +36,13 @@ Here is why we are writing a new container runtime in Rust. Details about the benchmark - A command used for the benchmark - ```console - $ hyperfine --prepare 'sudo sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' --warmup 10 --min-runs 100 'sudo ./youki create -b tutorial a && sudo ./youki start a && sudo ./youki delete -f a' + + ```bash + hyperfine --prepare 'sudo sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' --warmup 10 --min-runs 100 'sudo ./youki create -b tutorial a && sudo ./youki start a && sudo ./youki delete -f a' ``` + - Environment + ```console $ ./youki info Version 0.0.1 @@ -91,9 +98,9 @@ Here is why we are writing a new container runtime in Rust. spec: 1.0.0 +SYSTEMD +SELINUX +APPARMOR +CAP +SECCOMP +EBPF +CRIU +YAJL ``` + -- The development of [railcar](https://github.com/oracle/railcar) has been suspended. This project was very nice but is no longer being developed. This project is inspired by it. - I have fun implementing this. In fact, this may be the most important. # Related project @@ -106,7 +113,8 @@ youki is not at the practical stage yet. However, it is getting closer to practi ![youki demo](docs/demo.gif) | Feature | Description | State | -| :-------------------: | :---------------------------------------------: | :-------------------------------------------------------------------------------------------------: | +|:---------------------:|:-----------------------------------------------:| :-------------------------------------------------------------------------------------------------: | +| Containerd | Running via Containerd | โœ… | | Docker | Running via Docker | โœ… | | Podman | Running via Podman | โœ… | | pivot_root | Change the root directory | โœ… | @@ -138,65 +146,74 @@ For other platforms, please use the [Vagrantfile](#setting-up-vagrant) that we h ## Requires - Rust(See [here](https://www.rust-lang.org/tools/install)), edition 2021 -- Docker(See [here](https://docs.docker.com/engine/install)) +- linux kernel โ‰ฅ 5.3 ## Dependencies +To install `just`, follow the instruction [here](https://github.com/casey/just#installation). + ### Debian, Ubuntu and related distributions ```console -$ sudo apt-get install \ - pkg-config \ - libsystemd-dev \ - libdbus-glib-1-dev \ - build-essential \ - libelf-dev \ - libseccomp-dev \ - libclang-dev +$ sudo apt-get install \ + pkg-config \ + libsystemd-dev \ + libdbus-glib-1-dev \ + build-essential \ + libelf-dev \ + libseccomp-dev \ + libclang-dev \ + glibc-static \ + libssl-dev ``` -### Fedora, Centos, RHEL and related distributions +### Fedora, CentOS, RHEL and related distributions ```console -$ sudo dnf install \ - pkg-config \ - systemd-devel \ - dbus-devel \ +$ sudo dnf install \ + pkg-config \ + systemd-devel \ + dbus-devel \ elfutils-libelf-devel \ - libseccomp-devel \ - libclang-dev + libseccomp-devel \ + clang-devel \ + openssl-devel ``` ## Build -```console -$ git clone git@github.com:containers/youki.git -$ cd youki -$ make youki-dev # or youki-release -$ ./youki -h # you can get information about youki command +```bash +git clone git@github.com:containers/youki.git +cd youki +just youki-dev # or youki-release +./youki -h # you can get information about youki command ``` ## Tutorial +### Requires + +- Docker(See [here](https://docs.docker.com/engine/install)) + ### Create and run a container Let's try to run a container that executes `sleep 30` with youki. This tutorial may need root permission. -```console -$ git clone git@github.com:containers/youki.git -$ cd youki -$ make youki-dev # or youki-release +```bash +git clone git@github.com:containers/youki.git +cd youki +just youki-dev # or youki-release -$ mkdir -p tutorial/rootfs -$ cd tutorial +mkdir -p tutorial/rootfs +cd tutorial # use docker to export busybox into the rootfs directory -$ docker export $(docker create busybox) | tar -C rootfs -xvf - +docker export $(docker create busybox) | tar -C rootfs -xvf - ``` Then, we need to prepare a configuration file. This file contains metadata and specs for a container, such as the process to run, environment variables to inject, sandboxing features to use, etc. -```console -$ ../youki spec # will generate a spec file named config.json +```bash +../youki spec # will generate a spec file named config.json ``` We can edit the `config.json` to add customized behaviors for container. Here, we modify the `process` field to run `sleep 30`. @@ -214,13 +231,13 @@ We can edit the `config.json` to add customized behaviors for container. Here, w Then we can explore the lifecycle of a container: -```console -$ cd .. # go back to the repository root -$ sudo ./youki create -b tutorial tutorial_container # create a container with name `tutorial_container` -$ sudo ./youki state tutorial_container # you can see the state the container is `created` -$ sudo ./youki start tutorial_container # start the container -$ sudo ./youki list # will show the list of containers, the container is `running` -$ sudo ./youki delete tutorial_container # delete the container +```bash +cd .. # go back to the repository root +sudo ./youki create -b tutorial tutorial_container # create a container with name `tutorial_container` +sudo ./youki state tutorial_container # you can see the state the container is `created` +sudo ./youki start tutorial_container # start the container +sudo ./youki list # will show the list of containers, the container is `running` +sudo ./youki delete tutorial_container # delete the container ``` Change the command to be executed in `config.json` and try something other than `sleep 30`. @@ -229,7 +246,7 @@ Change the command to be executed in `config.json` and try something other than `youki` provides the ability to run containers as non-root user([rootless mode](https://docs.docker.com/engine/security/rootless/)). To run a container in rootless mode, we need to add some extra options in `config.json`, other steps are same with above: -```console +```bash $ mkdir -p tutorial/rootfs $ cd tutorial # use docker to export busybox into the rootfs directory @@ -245,13 +262,13 @@ $ ../youki run rootless-container # will create and run a container with rootl Start the docker daemon. -```console -$ dockerd --experimental --add-runtime="youki=$(pwd)/target/x86_64-unknown-linux-gnu/debug/youki" +```bash +dockerd --experimental --add-runtime="youki=$(pwd)/youki" ``` If you get an error like the below, that means your normal Docker daemon is running, and it needs to be stopped. Do that with your init system (i.e., with systemd, run `systemctl stop docker`, as root if necessary). -``` +```console failed to start daemon: pid file found, ensure docker is not running or delete /var/run/docker.pid ``` @@ -259,44 +276,44 @@ Now repeat the command, which should start the docker daemon. You can use youki in a different terminal to start the container. -```console -$ docker run -it --rm --runtime youki busybox +```bash +docker run -it --rm --runtime youki busybox ``` Afterwards, you can close the docker daemon process in other the other terminal. To restart normal docker daemon (if you had stopped it before), run: -```console -$ systemctl start docker # might need root permission +```bash +systemctl start docker # might need root permission ``` ### Integration Tests Go and node-tap are required to run integration tests. See the [opencontainers/runtime-tools](https://github.com/opencontainers/runtime-tools) README for details. -```console -$ git submodule update --init --recursive -$ make oci-tests +```bash +git submodule update --init --recursive +just oci-tests ``` ### Setting up Vagrant You can try youki on platforms other than Linux by using the Vagrantfile we have prepared. We have prepared two environments for vagrant, namely rootless mode and rootful mode -```console -$ git clone git@github.com:containers/youki.git -$ cd youki +```bash +git clone git@github.com:containers/youki.git +cd youki # If you want to develop in rootless mode, and this is the default mode -$ vagrant up -$ vagrant ssh +vagrant up +vagrant ssh # or if you want to develop in rootful mode -$ VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant up -$ VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant ssh +VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant up +VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant ssh # in virtual machine -$ cd youki -$ make youki-dev # or youki-release +cd youki +just youki-dev # or youki-release ``` # Community diff --git a/Vagrantfile b/Vagrantfile index ea371889b..7e68fc9fc 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -12,7 +12,7 @@ Vagrant.configure("2") do |config| config.vm.provision "shell", inline: <<-SHELL set -e -u -o pipefail yum update -y - yum install -y git gcc docker systemd-devel dbus-devel libseccomp-devel + yum install -y git gcc docker wget pkg-config systemd-devel dbus-devel elfutils-libelf-devel libseccomp-devel clang-devel openssl-devel grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" service docker start SHELL diff --git a/Vagrantfile.containerd2youki b/Vagrantfile.containerd2youki index e8240e6a1..6907a0197 100644 --- a/Vagrantfile.containerd2youki +++ b/Vagrantfile.containerd2youki @@ -31,7 +31,7 @@ Vagrant.configure("2") do |config| export GOPATH=$HOME/go git clone https://github.com/containerd/containerd \ - /root/go/src/github.com/containerd/containerd -b v1.5.11 + /root/go/src/github.com/containerd/containerd -b v1.6.20 cd /root/go/src/github.com/containerd/containerd make diff --git a/Vagrantfile.root b/Vagrantfile.root index e44047f99..8a622e279 100644 --- a/Vagrantfile.root +++ b/Vagrantfile.root @@ -14,7 +14,7 @@ Vagrant.configure("2") do |config| config.vm.provision "shell", inline: <<-SHELL set -e -u -o pipefail yum update -y - yum install -y git gcc docker systemd-devel dbus-devel libseccomp-devel + yum install -y git gcc docker wget pkg-config systemd-devel dbus-devel elfutils-libelf-devel libseccomp-devel clang-devel openssl-devel grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0" service docker start curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y @@ -23,4 +23,3 @@ Vagrant.configure("2") do |config| config.ssh.username = 'root' config.ssh.insert_key = 'true' end - diff --git a/crates/Makefile b/crates/Makefile deleted file mode 100644 index a4f211111..000000000 --- a/crates/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY : debug release - -debug: - cargo build && cp ./target/debug/youki ./youki_bin - -release: - cargo build --release && cp ./target/release/youki ./youki_bin - -clean: - rm ./youki_bin diff --git a/crates/libcgroups/Cargo.toml b/crates/libcgroups/Cargo.toml index 618e1c188..b8f79cd34 100644 --- a/crates/libcgroups/Cargo.toml +++ b/crates/libcgroups/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libcgroups" -version = "0.0.4" +version = "0.1.0" description = "Library for cgroup" license-file = "../../LICENSE" repository = "https://github.com/containers/youki" @@ -9,7 +9,7 @@ readme = "README.md" authors = ["youki team"] edition = "2021" rust-version = "1.58.1" -autoexamples = false +autoexamples = true keywords = ["youki", "container", "cgroups"] [features] @@ -20,25 +20,27 @@ systemd = ["v2", "dep:dbus"] cgroupsv2_devices = ["rbpf", "libbpf-sys", "errno", "libc"] [dependencies] -nix = "0.25.0" -procfs = "0.14.2" -log = "0.4" -anyhow = "1.0" -oci-spec = { version = "^0.5.5", features = ["runtime"] } +nix = "0.26.2" +procfs = "0.15.1" +oci-spec = { version = "~0.6.2", features = ["runtime"] } dbus = { version = "0.9.7", optional = true } fixedbitset = "0.4.2" serde = { version = "1.0", features = ["derive"] } -rbpf = {version = "0.1.0", optional = true } -libbpf-sys = { version = "1.1.1+v1.0.1", optional = true } -errno = { version = "0.2.8", optional = true } -libc = { version = "0.2.139", optional = true } +rbpf = {version = "0.2.0", optional = true } +libbpf-sys = { version = "1.2.1", optional = true } +errno = { version = "0.3.1", optional = true } +libc = { version = "0.2.147", optional = true } +thiserror = "1.0.44" +tracing = { version = "0.1.37", features = ["attributes"]} [dev-dependencies] -oci-spec = { version = "^0.5.5", features = ["proptests", "runtime"] } +anyhow = "1.0" +oci-spec = { version = "~0.6.2", features = ["proptests", "runtime"] } quickcheck = "1" -mockall = { version = "0.11.3", features = [] } -clap = "4.0.32" +mockall = { version = "0.11.4", features = [] } +clap = "4.1.6" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" env_logger = "0.10" -serial_test = "1.0.0" +serial_test = "2.0.0" +tempfile = "3" diff --git a/crates/libcgroups/examples/bpf.rs b/crates/libcgroups/examples/bpf.rs index 662802e7a..a1a57284d 100644 --- a/crates/libcgroups/examples/bpf.rs +++ b/crates/libcgroups/examples/bpf.rs @@ -1,100 +1,117 @@ -use anyhow::{bail, Result}; -use clap::{Arg, SubCommand}; -use std::os::unix::io::AsRawFd; -use std::path::Path; +use anyhow::Result; -use nix::fcntl::OFlag; -use nix::sys::stat::Mode; +#[cfg(feature = "cgroupsv2_devices")] +mod bpf { + use anyhow::{bail, Result}; + use clap::Arg; + use clap::Command; + use oci_spec::runtime::LinuxDeviceCgroup; + use std::os::unix::io::AsRawFd; + use std::path::Path; -use cgroups::v2::devices::bpf; -use cgroups::v2::devices::emulator; -use cgroups::v2::devices::program; -use oci_spec::*; + use nix::fcntl::OFlag; + use nix::sys::stat::Mode; -const LICENSE: &'static str = &"Apache"; + use libcgroups::v2::devices::bpf; + use libcgroups::v2::devices::emulator; + use libcgroups::v2::devices::program; -fn main() -> Result<()> { - env_logger::init(); + const LICENSE: &str = "Apache"; + fn cli() -> Command { + clap::Command::new("bpf") + .version("0.1") + .about("tools to test BPF program for cgroups v2 devices") + .arg(Arg::new("cgroup_dir").short('c').value_name("CGROUP_DIR")) + .subcommand( + Command::new("query").about("query list of BPF programs attached to cgroup dir"), + ) + .subcommand( + Command::new("detach") + .about("detach BPF program by id") + .arg( + Arg::new("id") + .value_name("PROG_ID") + .required(true) + .help("ID of BPF program returned by query command"), + ), + ) + .subcommand( + Command::new("attach") + .about("compile rules to BPF and attach to cgroup dir") + .arg( + Arg::new("input_file") + .value_name("INPUT_FILE") + .required(true) + .help("File contains Vec in json format"), + ), + ) + } - let matches = clap::App::new("bpf") - .version("0.1") - .about("tools to test BPF program for cgroups v2 devices") - .arg( - Arg::with_name("cgroup_dir") - .short("c") - .value_name("CGROUP_DIR"), - ) - .subcommand( - SubCommand::with_name("query") - .help("query list of BPF programs attached to cgroup dir"), - ) - .subcommand( - SubCommand::with_name("detach") - .help("detach BPF program by id") - .arg( - Arg::with_name("id") - .value_name("PROG_ID") - .required(true) - .help("ID of BPF program returned by query command"), - ), - ) - .subcommand( - SubCommand::with_name("attach") - .help("compile rules to BPF and attach to cgroup dir") - .arg( - Arg::with_name("input_file") - .value_name("INPUT_FILE") - .required(true) - .help("File contains Vec in json format"), - ), - ) - .get_matches_safe()?; + fn parse_cgroupv1_device_rules>(path: P) -> Result> { + let content = std::fs::read_to_string(path)?; + let devices = serde_json::from_str(&content)?; + Ok(devices) + } - let cgroup_dir = matches.value_of("cgroup_dir").unwrap(); - - let cgroup_fd = nix::dir::Dir::open( - cgroup_dir, - OFlag::O_RDONLY | OFlag::O_DIRECTORY, - Mode::from_bits(0o600).unwrap(), - )?; + pub fn run() -> Result<()> { + let matches = cli().get_matches(); + let cgroup_dir = matches.get_one::("cgroup_dir").unwrap(); + let cgroup_fd = nix::dir::Dir::open( + cgroup_dir.as_str(), + OFlag::O_RDONLY | OFlag::O_DIRECTORY, + Mode::from_bits(0o600).unwrap(), + )?; + match matches.subcommand() { + Some(("query", _)) => { + let progs = bpf::prog::query(cgroup_fd.as_raw_fd())?; + for prog in &progs { + println!("prog: id={}, fd={}", prog.id, prog.fd); + } + } + Some(("detach", submatch)) => { + let prog_id = submatch.get_one::("id").unwrap().parse::()?; + let progs = bpf::prog::query(cgroup_fd.as_raw_fd())?; + let prog = progs.iter().find(|v| v.id == prog_id); + if prog.is_none() { + bail!("can't get prog fd by prog id"); + } - match matches.subcommand() { - ("query", Some(_)) => { - let progs = bpf::prog_query(cgroup_fd.as_raw_fd())?; - for prog in &progs { - println!("prog: id={}, fd={}", prog.id, prog.fd); + bpf::prog::detach2(prog.unwrap().fd, cgroup_fd.as_raw_fd())?; + println!("detach ok"); } - } - ("detach", Some(submatch)) => { - let prog_id = submatch.value_of("id").unwrap().parse::()?; - let progs = bpf::prog_query(cgroup_fd.as_raw_fd())?; - let prog = progs.iter().find(|v| v.id == prog_id); - if prog.is_none() { - bail!("can't get prog fd by prog id"); + Some(("attach", submatch)) => { + let input_file = submatch.get_one::("input_file").unwrap(); + let rules = parse_cgroupv1_device_rules(input_file)?; + let mut emulator = emulator::Emulator::with_default_allow(false); + emulator.add_rules(&rules); + let prog = program::Program::from_rules(&emulator.rules, emulator.default_allow)?; + let prog_fd = bpf::prog::load(LICENSE, prog.bytecodes())?; + bpf::prog::attach(prog_fd, cgroup_fd.as_raw_fd())?; + println!("attach ok"); } - bpf::prog_detach2(prog.unwrap().fd, cgroup_fd.as_raw_fd())?; - println!("detach ok"); - } - ("attach", Some(submatch)) => { - let input_file = submatch.value_of("input_file").unwrap(); - let rules = parse_cgroupv1_device_rules(&input_file)?; - let mut emulator = emulator::Emulator::with_default_allow(false); - emulator.add_rules(&rules)?; - let prog = program::Program::from_rules(&emulator.rules, emulator.default_allow)?; - let prog_fd = bpf::prog_load(LICENSE, prog.bytecodes())?; - bpf::prog_attach(prog_fd, cgroup_fd.as_raw_fd())?; - println!("attach ok"); - } + _ => unreachable!(), + }; + Ok(()) + } +} - (_, _) => {} - }; +#[cfg(not(feature = "cgroupsv2_devices"))] +mod bpf { + use anyhow::{bail, Result}; - Ok(()) + pub fn run() -> Result<()> { + if !cfg!(feature = "cgroupsv2_devices") { + bail!("cgroupsv2_devices feature is not enabled"); + } + + unreachable!() + } } -fn parse_cgroupv1_device_rules>(path: P) -> Result> { - let content = std::fs::read_to_string(path)?; - let devices = serde_json::from_str(&content)?; - Ok(devices) +fn main() -> Result<()> { + env_logger::init(); + bpf::run()?; + + Ok(()) } diff --git a/crates/libcgroups/src/common.rs b/crates/libcgroups/src/common.rs index 538e95a61..a0d20424d 100644 --- a/crates/libcgroups/src/common.rs +++ b/crates/libcgroups/src/common.rs @@ -2,11 +2,10 @@ use std::{ fmt::{Debug, Display}, fs::{self, File}, io::{BufRead, BufReader, Write}, - path::{Path, PathBuf}, + path::{Path, PathBuf, StripPrefixError}, time::Duration, }; -use anyhow::{bail, Context, Result}; use nix::{ sys::statfs::{statfs, CGROUP2_SUPER_MAGIC, TMPFS_MAGIC}, unistd::Pid, @@ -17,11 +16,8 @@ use oci_spec::runtime::{ LinuxDevice, LinuxDeviceBuilder, LinuxDeviceCgroup, LinuxDeviceCgroupBuilder, LinuxDeviceType, }; -#[cfg(feature = "systemd")] use super::systemd; -#[cfg(feature = "v1")] use super::v1; -#[cfg(feature = "v2")] use super::v2; use super::stats::Stats; @@ -30,23 +26,93 @@ pub const CGROUP_PROCS: &str = "cgroup.procs"; pub const DEFAULT_CGROUP_ROOT: &str = "/sys/fs/cgroup"; pub trait CgroupManager { + type Error; + /// Adds a task specified by its pid to the cgroup - fn add_task(&self, pid: Pid) -> Result<()>; + fn add_task(&self, pid: Pid) -> Result<(), Self::Error>; /// Applies resource restrictions to the cgroup - fn apply(&self, controller_opt: &ControllerOpt) -> Result<()>; + fn apply(&self, controller_opt: &ControllerOpt) -> Result<(), Self::Error>; /// Removes the cgroup - fn remove(&self) -> Result<()>; + fn remove(&self) -> Result<(), Self::Error>; /// Sets the freezer cgroup to the specified state - fn freeze(&self, state: FreezerState) -> Result<()>; + fn freeze(&self, state: FreezerState) -> Result<(), Self::Error>; /// Retrieve statistics for the cgroup - fn stats(&self) -> Result; + fn stats(&self) -> Result; /// Gets the PIDs inside the cgroup - fn get_all_pids(&self) -> Result>; + fn get_all_pids(&self) -> Result, Self::Error>; +} + +#[derive(thiserror::Error, Debug)] +pub enum AnyManagerError { + #[error(transparent)] + Systemd(#[from] systemd::manager::SystemdManagerError), + #[error(transparent)] + V1(#[from] v1::manager::V1ManagerError), + #[error(transparent)] + V2(#[from] v2::manager::V2ManagerError), +} + +pub enum AnyCgroupManager { + Systemd(systemd::manager::Manager), + V1(v1::manager::Manager), + V2(v2::manager::Manager), +} + +impl CgroupManager for AnyCgroupManager { + type Error = AnyManagerError; + + fn add_task(&self, pid: Pid) -> Result<(), Self::Error> { + match self { + AnyCgroupManager::Systemd(m) => Ok(m.add_task(pid)?), + AnyCgroupManager::V1(m) => Ok(m.add_task(pid)?), + AnyCgroupManager::V2(m) => Ok(m.add_task(pid)?), + } + } + + fn apply(&self, controller_opt: &ControllerOpt) -> Result<(), Self::Error> { + match self { + AnyCgroupManager::Systemd(m) => Ok(m.apply(controller_opt)?), + AnyCgroupManager::V1(m) => Ok(m.apply(controller_opt)?), + AnyCgroupManager::V2(m) => Ok(m.apply(controller_opt)?), + } + } + + fn remove(&self) -> Result<(), Self::Error> { + match self { + AnyCgroupManager::Systemd(m) => Ok(m.remove()?), + AnyCgroupManager::V1(m) => Ok(m.remove()?), + AnyCgroupManager::V2(m) => Ok(m.remove()?), + } + } + + fn freeze(&self, state: FreezerState) -> Result<(), Self::Error> { + match self { + AnyCgroupManager::Systemd(m) => Ok(m.freeze(state)?), + AnyCgroupManager::V1(m) => Ok(m.freeze(state)?), + AnyCgroupManager::V2(m) => Ok(m.freeze(state)?), + } + } + + fn stats(&self) -> Result { + match self { + AnyCgroupManager::Systemd(m) => Ok(m.stats()?), + AnyCgroupManager::V1(m) => Ok(m.stats()?), + AnyCgroupManager::V2(m) => Ok(m.stats()?), + } + } + + fn get_all_pids(&self) -> Result, Self::Error> { + match self { + AnyCgroupManager::Systemd(m) => Ok(m.get_all_pids()?), + AnyCgroupManager::V1(m) => Ok(m.get_all_pids()?), + AnyCgroupManager::V2(m) => Ok(m.get_all_pids()?), + } + } } #[derive(Debug)] @@ -64,11 +130,11 @@ impl Display for CgroupSetup { CgroupSetup::Unified => "unified", }; - write!(f, "{}", print) + write!(f, "{print}") } } -/// FreezerState is given freezer contoller +/// FreezerState is given freezer controller #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum FreezerState { /// Tasks in cgroup are undefined @@ -88,43 +154,107 @@ pub struct ControllerOpt<'a> { pub disable_oom_killer: bool, /// Specify an oom_score_adj for container. pub oom_score_adj: Option, - /// FreezerState is given to freezer contoller for suspending process. + /// FreezerState is given to freezer controller for suspending process. pub freezer_state: Option, } +#[derive(thiserror::Error, Debug)] +pub enum WrappedIoError { + #[error("failed to open {path}: {err}")] + Open { err: std::io::Error, path: PathBuf }, + #[error("failed to write {data} to {path}: {err}")] + Write { + err: std::io::Error, + path: PathBuf, + data: String, + }, + #[error("failed to read {path}: {err}")] + Read { err: std::io::Error, path: PathBuf }, + #[error("failed to create dir {path}: {err}")] + CreateDir { err: std::io::Error, path: PathBuf }, + #[error("at {path}: {err}")] + Other { err: std::io::Error, path: PathBuf }, +} + +impl WrappedIoError { + pub fn inner(&self) -> &std::io::Error { + match self { + WrappedIoError::Open { err, .. } => err, + WrappedIoError::Write { err, .. } => err, + WrappedIoError::Read { err, .. } => err, + WrappedIoError::CreateDir { err, .. } => err, + WrappedIoError::Other { err, .. } => err, + } + } +} + #[inline] -pub fn write_cgroup_file_str>(path: P, data: &str) -> Result<()> { +pub fn write_cgroup_file_str>(path: P, data: &str) -> Result<(), WrappedIoError> { + let path = path.as_ref(); + fs::OpenOptions::new() .create(false) .write(true) .truncate(false) - .open(path.as_ref()) - .with_context(|| format!("failed to open {:?}", path.as_ref()))? + .open(path) + .map_err(|err| WrappedIoError::Open { + err, + path: path.to_path_buf(), + })? .write_all(data.as_bytes()) - .with_context(|| format!("failed to write {} to {:?}", data, path.as_ref()))?; + .map_err(|err| WrappedIoError::Write { + err, + path: path.to_path_buf(), + data: data.into(), + })?; Ok(()) } #[inline] -pub fn write_cgroup_file, T: ToString>(path: P, data: T) -> Result<()> { +pub fn write_cgroup_file, T: ToString>( + path: P, + data: T, +) -> Result<(), WrappedIoError> { + let path = path.as_ref(); let data = data.to_string(); + fs::OpenOptions::new() .create(false) .write(true) .truncate(false) - .open(path.as_ref()) - .with_context(|| format!("failed to open {:?}", path.as_ref()))? + .open(path) + .map_err(|err| WrappedIoError::Open { + err, + path: path.to_path_buf(), + })? .write_all(data.as_bytes()) - .with_context(|| format!("failed to write {} to {:?}", data, path.as_ref()))?; + .map_err(|err| WrappedIoError::Write { + err, + path: path.to_path_buf(), + data, + })?; Ok(()) } #[inline] -pub fn read_cgroup_file>(path: P) -> Result { +pub fn read_cgroup_file>(path: P) -> Result { let path = path.as_ref(); - fs::read_to_string(path).with_context(|| format!("failed to open {:?}", path)) + fs::read_to_string(path).map_err(|err| WrappedIoError::Read { + err, + path: path.to_path_buf(), + }) +} + +#[derive(thiserror::Error, Debug)] +pub enum GetCgroupSetupError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("non default cgroup root not supported")] + NonDefault, + #[error("failed to detect cgroup setup")] + FailedToDetect, } /// Determines the cgroup setup of the system. Systems typically have one of @@ -135,7 +265,7 @@ pub fn read_cgroup_file>(path: P) -> Result { /// an additional unified hierarchy which doesn't have any /// controllers attached. Resource control can purely be achieved /// through the cgroup v1 hierarchy, not through the cgroup v2 hierarchy. -pub fn get_cgroup_setup() -> Result { +pub fn get_cgroup_setup() -> Result { let default_root = Path::new(DEFAULT_CGROUP_ROOT); match default_root.exists() { true => { @@ -143,12 +273,9 @@ pub fn get_cgroup_setup() -> Result { // If the filesystem is tmpfs instead the system is either in legacy or // hybrid mode. If a cgroup2 filesystem has been mounted under the "unified" // folder we are in hybrid mode, otherwise we are in legacy mode. - let stat = statfs(default_root).with_context(|| { - format!( - "failed to stat default cgroup root {}", - &default_root.display() - ) - })?; + let stat = statfs(default_root) + .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)) + .wrap_other(default_root)?; if stat.filesystem_type() == CGROUP2_SUPER_MAGIC { return Ok(CgroupSetup::Unified); } @@ -157,7 +284,8 @@ pub fn get_cgroup_setup() -> Result { let unified = Path::new("/sys/fs/cgroup/unified"); if Path::new(unified).exists() { let stat = statfs(unified) - .with_context(|| format!("failed to stat {}", unified.display()))?; + .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)) + .wrap_other(unified)?; if stat.filesystem_type() == CGROUP2_SUPER_MAGIC { return Ok(CgroupSetup::Hybrid); } @@ -166,113 +294,150 @@ pub fn get_cgroup_setup() -> Result { return Ok(CgroupSetup::Legacy); } } - false => bail!("non default cgroup root not supported"), + false => return Err(GetCgroupSetupError::NonDefault), } - bail!("failed to detect cgroup setup"); + Err(GetCgroupSetupError::FailedToDetect) } -pub fn create_cgroup_manager>( - cgroup_path: P, - systemd_cgroup: bool, - container_name: &str, -) -> Result> { - let cgroup_setup = get_cgroup_setup()?; - let cgroup_path = cgroup_path.into(); +#[derive(thiserror::Error, Debug)] +pub enum CreateCgroupSetupError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("non default cgroup root not supported")] + NonDefault, + #[error("failed to detect cgroup setup")] + FailedToDetect, + #[error("v1 error: {0}")] + V1(#[from] v1::manager::V1ManagerError), + #[error("v2 error: {0}")] + V2(#[from] v2::manager::V2ManagerError), + #[error("systemd error: {0}")] + Systemd(#[from] systemd::manager::SystemdManagerError), +} + +#[derive(Clone)] +pub struct CgroupConfig { + pub cgroup_path: PathBuf, + pub systemd_cgroup: bool, + pub container_name: String, +} + +pub fn create_cgroup_manager( + config: CgroupConfig, +) -> Result { + let cgroup_setup = get_cgroup_setup().map_err(|err| match err { + GetCgroupSetupError::WrappedIo(err) => CreateCgroupSetupError::WrappedIo(err), + GetCgroupSetupError::NonDefault => CreateCgroupSetupError::NonDefault, + GetCgroupSetupError::FailedToDetect => CreateCgroupSetupError::FailedToDetect, + })?; + let cgroup_path = config.cgroup_path.as_path(); match cgroup_setup { - CgroupSetup::Legacy | CgroupSetup::Hybrid => create_v1_cgroup_manager(cgroup_path), + CgroupSetup::Legacy | CgroupSetup::Hybrid => { + Ok(create_v1_cgroup_manager(cgroup_path)?.any()) + } CgroupSetup::Unified => { // ref https://github.com/opencontainers/runtime-spec/blob/main/config-linux.md#cgroups-path - if cgroup_path.is_absolute() || !systemd_cgroup { - return create_v2_cgroup_manager(cgroup_path); + if cgroup_path.is_absolute() || !config.systemd_cgroup { + return Ok(create_v2_cgroup_manager(cgroup_path)?.any()); } - create_systemd_cgroup_manager(cgroup_path, container_name) + Ok(create_systemd_cgroup_manager(cgroup_path, config.container_name.as_str())?.any()) } } } #[cfg(feature = "v1")] -fn create_v1_cgroup_manager(cgroup_path: PathBuf) -> Result> { - log::info!("cgroup manager V1 will be used"); - Ok(Box::new(v1::manager::Manager::new(cgroup_path)?)) +fn create_v1_cgroup_manager( + cgroup_path: &Path, +) -> Result { + tracing::info!("cgroup manager V1 will be used"); + v1::manager::Manager::new(cgroup_path) } #[cfg(not(feature = "v1"))] -fn create_v1_cgroup_manager(_cgroup_path: PathBuf) -> Result> { - bail!("cgroup v1 feature is required, but was not enabled during compile time"); +fn create_v1_cgroup_manager( + _cgroup_path: &Path, +) -> Result { + Err(v1::manager::V1ManagerError::NotEnabled) } #[cfg(feature = "v2")] -fn create_v2_cgroup_manager(cgroup_path: PathBuf) -> Result> { - log::info!("cgroup manager V2 will be used"); - Ok(Box::new(v2::manager::Manager::new( - DEFAULT_CGROUP_ROOT.into(), - cgroup_path, - )?)) +fn create_v2_cgroup_manager( + cgroup_path: &Path, +) -> Result { + tracing::info!("cgroup manager V2 will be used"); + v2::manager::Manager::new(DEFAULT_CGROUP_ROOT.into(), cgroup_path.to_owned()) } #[cfg(not(feature = "v2"))] -fn create_v2_cgroup_manager(_cgroup_path: PathBuf) -> Result> { - bail!("cgroup v2 feature is required, but was not enabled during compile time"); +fn create_v2_cgroup_manager( + _cgroup_path: &Path, +) -> Result { + Err(v2::manager::V2ManagerError::NotEnabled) } #[cfg(feature = "systemd")] fn create_systemd_cgroup_manager( - cgroup_path: PathBuf, + cgroup_path: &Path, container_name: &str, -) -> Result> { +) -> Result { if !systemd::booted() { - bail!( + panic!( "systemd cgroup flag passed, but systemd support for managing cgroups is not available" ); } let use_system = nix::unistd::geteuid().is_root(); - log::info!( + tracing::info!( "systemd cgroup manager with system bus {} will be used", use_system ); - Ok(Box::new(systemd::manager::Manager::new( + systemd::manager::Manager::new( DEFAULT_CGROUP_ROOT.into(), - cgroup_path, + cgroup_path.to_owned(), container_name.into(), use_system, - )?)) + ) } #[cfg(not(feature = "systemd"))] fn create_systemd_cgroup_manager( - _cgroup_path: PathBuf, + _cgroup_path: &Path, _container_name: &str, -) -> Result> { - bail!("systemd cgroup feature is required, but was not enabled during compile time"); +) -> Result { + Err(systemd::manager::SystemdManagerError::NotEnabled) } -pub fn get_all_pids(path: &Path) -> Result> { - log::debug!("scan pids in folder: {:?}", path); +pub fn get_all_pids(path: &Path) -> Result, WrappedIoError> { + tracing::debug!("scan pids in folder: {:?}", path); let mut result = vec![]; walk_dir(path, &mut |p| { let file_path = p.join(CGROUP_PROCS); if file_path.exists() { - let file = File::open(file_path)?; + let file = File::open(&file_path).wrap_open(&file_path)?; for line in BufReader::new(file).lines().flatten() { - result.push(Pid::from_raw(line.parse::()?)) + result.push(Pid::from_raw( + line.parse::() + .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err)) + .wrap_other(&file_path)?, + )) } } - Ok(()) + Ok::<(), WrappedIoError>(()) })?; Ok(result) } -fn walk_dir(path: &Path, c: &mut F) -> Result<()> +fn walk_dir(path: &Path, c: &mut F) -> Result<(), E> where - F: FnMut(&Path) -> Result<()>, + F: FnMut(&Path) -> Result<(), E>, + E: From, { c(path)?; - for entry in fs::read_dir(path)? { - let entry = entry?; + for entry in fs::read_dir(path).wrap_read(path)? { + let entry = entry.wrap_open(path)?; let path = entry.path(); if path.is_dir() { @@ -283,11 +448,20 @@ where } pub(crate) trait PathBufExt { - fn join_safely>(&self, path: P) -> Result; + fn join_safely>(&self, path: P) -> Result; +} + +#[derive(thiserror::Error, Debug)] +pub enum JoinSafelyError { + #[error("failed to strip prefix from {path}: {err}")] + StripPrefix { + err: StripPrefixError, + path: PathBuf, + }, } impl PathBufExt for PathBuf { - fn join_safely>(&self, path: P) -> Result { + fn join_safely>(&self, path: P) -> Result { let path = path.as_ref(); if path.is_relative() { return Ok(self.join(path)); @@ -295,7 +469,10 @@ impl PathBufExt for PathBuf { let stripped = path .strip_prefix("/") - .with_context(|| format!("failed to strip prefix from {}", path.display()))?; + .map_err(|err| JoinSafelyError::StripPrefix { + err, + path: path.to_path_buf(), + })?; Ok(self.join(stripped)) } } @@ -411,7 +588,7 @@ pub(crate) fn delete_with_retry, L: Into>>( path: P, retries: u32, limit_backoff: L, -) -> Result<()> { +) -> Result<(), WrappedIoError> { let mut attempts = 0; let mut delay = Duration::from_millis(10); let path = path.as_ref(); @@ -430,5 +607,93 @@ pub(crate) fn delete_with_retry, L: Into>>( } } - bail!("could not delete {:?}", path) + Err(std::io::Error::new( + std::io::ErrorKind::TimedOut, + "could not delete".to_string(), + )) + .wrap_other(path)? +} + +pub(crate) trait WrapIoResult { + type Target; + + fn wrap_create_dir>(self, path: P) -> Result; + fn wrap_read>(self, path: P) -> Result; + fn wrap_open>(self, path: P) -> Result; + fn wrap_write, D: Into>( + self, + path: P, + data: D, + ) -> Result; + fn wrap_other>(self, path: P) -> Result; +} + +impl WrapIoResult for Result { + type Target = T; + + fn wrap_create_dir>(self, path: P) -> Result { + self.map_err(|err| WrappedIoError::CreateDir { + err, + path: path.into(), + }) + } + + fn wrap_read>(self, path: P) -> Result { + self.map_err(|err| WrappedIoError::Read { + err, + path: path.into(), + }) + } + + fn wrap_open>(self, path: P) -> Result { + self.map_err(|err| WrappedIoError::Open { + err, + path: path.into(), + }) + } + + fn wrap_write, D: Into>( + self, + path: P, + data: D, + ) -> Result { + self.map_err(|err| WrappedIoError::Write { + err, + path: path.into(), + data: data.into(), + }) + } + + fn wrap_other>(self, path: P) -> Result { + self.map_err(|err| WrappedIoError::Other { + err, + path: path.into(), + }) + } +} + +#[derive(Debug)] +pub enum EitherError { + Left(L), + Right(R), +} + +impl Display for EitherError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + EitherError::Left(left) => ::fmt(left, f), + EitherError::Right(right) => ::fmt(right, f), + } + } +} + +impl std::error::Error for EitherError {} + +#[derive(Debug)] +pub struct MustBePowerOfTwo; + +impl Display for MustBePowerOfTwo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("page size must be in the format of 2^(integer)") + } } diff --git a/crates/libcgroups/src/lib.rs b/crates/libcgroups/src/lib.rs index f7e82c2ce..12234ef3a 100644 --- a/crates/libcgroups/src/lib.rs +++ b/crates/libcgroups/src/lib.rs @@ -15,8 +15,17 @@ pub mod common; pub mod stats; #[cfg(feature = "systemd")] pub mod systemd; +#[cfg(not(feature = "systemd"))] +#[path = "stub/systemd/mod.rs"] +pub mod systemd; pub mod test_manager; #[cfg(feature = "v1")] pub mod v1; +#[cfg(not(feature = "v1"))] +#[path = "stub/v1/mod.rs"] +pub mod v1; #[cfg(feature = "v2")] pub mod v2; +#[cfg(not(feature = "v2"))] +#[path = "stub/v2/mod.rs"] +pub mod v2; diff --git a/crates/libcgroups/src/stats.rs b/crates/libcgroups/src/stats.rs index 6d53065b2..de9491eb5 100644 --- a/crates/libcgroups/src/stats.rs +++ b/crates/libcgroups/src/stats.rs @@ -1,13 +1,21 @@ -use anyhow::{bail, Context, Result}; use serde::Serialize; -use std::{collections::HashMap, fmt::Display, fs, path::Path}; +use std::{ + collections::HashMap, + fmt::Display, + fs, + num::ParseIntError, + path::{Path, PathBuf}, +}; + +use crate::common::{WrapIoResult, WrappedIoError}; use super::common; -pub trait StatsProvider { +pub(crate) trait StatsProvider { + type Error; type Stats; - fn stats(cgroup_path: &Path) -> Result; + fn stats(cgroup_path: &Path) -> Result; } /// Reports the statistics for a cgroup @@ -188,8 +196,18 @@ pub struct PSIData { pub avg300: f64, } +#[derive(thiserror::Error, Debug)] +pub enum SupportedPageSizesError { + #[error("io error: {0}")] + Io(#[from] std::io::Error), + #[error("failed to parse value {value}: {err}")] + Parse { value: String, err: ParseIntError }, + #[error("failed to determine page size from {dir_name}")] + Failed { dir_name: String }, +} + /// Reports which hugepage sizes are supported by the system -pub fn supported_page_sizes() -> Result> { +pub fn supported_page_sizes() -> Result, SupportedPageSizesError> { let mut sizes = Vec::new(); for hugetlb_entry in fs::read_dir("/sys/kernel/mm/hugepages")? { let hugetlb_entry = hugetlb_entry?; @@ -206,12 +224,15 @@ pub fn supported_page_sizes() -> Result> { Ok(sizes) } -fn extract_page_size(dir_name: &str) -> Result { +fn extract_page_size(dir_name: &str) -> Result { if let Some(size) = dir_name .strip_prefix("hugepages-") .and_then(|name_stripped| name_stripped.strip_suffix("kB")) { - let size: u64 = parse_value(size)?; + let size: u64 = size.parse().map_err(|err| SupportedPageSizesError::Parse { + value: size.into(), + err, + })?; let size_moniker = if size >= (1 << 20) { (size >> 20).to_string() + "GB" @@ -224,7 +245,9 @@ fn extract_page_size(dir_name: &str) -> Result { return Ok(size_moniker); } - bail!("failed to determine page size from {}", dir_name); + Err(SupportedPageSizesError::Failed { + dir_name: dir_name.into(), + }) } /// Parses this string slice into an u64 @@ -235,10 +258,8 @@ fn extract_page_size(dir_name: &str) -> Result { /// let value = parse_value("32").unwrap(); /// assert_eq!(value, 32); /// ``` -pub fn parse_value(value: &str) -> Result { - value - .parse() - .with_context(|| format!("failed to parse {}", value)) +pub fn parse_value(value: &str) -> Result { + value.parse() } /// Parses a single valued file to an u64 @@ -250,58 +271,82 @@ pub fn parse_value(value: &str) -> Result { /// let value = parse_single_value(&Path::new("memory.current")).unwrap(); /// assert_eq!(value, 32); /// ``` -pub fn parse_single_value(file_path: &Path) -> Result { +pub fn parse_single_value(file_path: &Path) -> Result { let value = common::read_cgroup_file(file_path)?; let value = value.trim(); if value == "max" { return Ok(u64::MAX); } - value.parse().with_context(|| { - format!( - "failed to parse value {} from {}", - value, - file_path.display() - ) - }) + value + .parse() + .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err)) + .wrap_other(file_path) +} + +#[derive(thiserror::Error, Debug)] +pub enum ParseFlatKeyedDataError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("flat keyed data at {path} contains entries that do not conform to 'key value'")] + DoesNotConform { path: PathBuf }, + #[error("failed to parse value {value} from {path}")] + FailedToParse { + value: String, + path: PathBuf, + err: ParseIntError, + }, } /// Parses a file that is structured according to the flat keyed format -pub fn parse_flat_keyed_data(file_path: &Path) -> Result> { +pub(crate) fn parse_flat_keyed_data( + file_path: &Path, +) -> Result, ParseFlatKeyedDataError> { let mut stats = HashMap::new(); let keyed_data = common::read_cgroup_file(file_path)?; for entry in keyed_data.lines() { let entry_fields: Vec<&str> = entry.split_ascii_whitespace().collect(); if entry_fields.len() != 2 { - bail!( - "flat keyed data at {} contains entries that do not conform to 'key value'", - &file_path.display() - ); + return Err(ParseFlatKeyedDataError::DoesNotConform { + path: file_path.to_path_buf(), + }); } stats.insert( entry_fields[0].to_owned(), - entry_fields[1].parse().with_context(|| { - format!( - "failed to parse value {} from {}", - entry_fields[0], - file_path.display() - ) - })?, + entry_fields[1] + .parse() + .map_err(|err| ParseFlatKeyedDataError::FailedToParse { + value: entry_fields[0].into(), + path: file_path.to_path_buf(), + err, + })?, ); } Ok(stats) } -/// Parses a file that is structed according to the nested keyed format -pub fn parse_nested_keyed_data(file_path: &Path) -> Result>> { +#[derive(thiserror::Error, Debug)] +pub enum ParseNestedKeyedDataError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("nested keyed data at {path} contains entries that do not conform to key format")] + DoesNotConform { path: PathBuf }, +} + +/// Parses a file that is structured according to the nested keyed format +pub fn parse_nested_keyed_data( + file_path: &Path, +) -> Result>, ParseNestedKeyedDataError> { let mut stats: HashMap> = HashMap::new(); let keyed_data = common::read_cgroup_file(file_path)?; for entry in keyed_data.lines() { let entry_fields: Vec<&str> = entry.split_ascii_whitespace().collect(); if entry_fields.len() < 2 || !entry_fields[1..].iter().all(|p| p.contains('=')) { - bail!("nested key data at {} contains entries that do not conform to the nested key format", file_path.display()); + return Err(ParseNestedKeyedDataError::DoesNotConform { + path: file_path.to_path_buf(), + }); } stats.insert( @@ -317,50 +362,76 @@ pub fn parse_nested_keyed_data(file_path: &Path) -> Result Result<(u64, u64)> { +#[derive(thiserror::Error, Debug)] +pub enum ParseDeviceNumberError { + #[error("failed to parse device number from {device}: expected 2 parts, found {numbers}")] + TooManyNumbers { device: String, numbers: usize }, + #[error("failed to parse device number from {device}: {err}")] + MalformedNumber { device: String, err: ParseIntError }, +} + +pub(crate) fn parse_device_number(device: &str) -> Result<(u64, u64), ParseDeviceNumberError> { let numbers: Vec<&str> = device.split_terminator(':').collect(); if numbers.len() != 2 { - bail!("failed to parse device number {}", device); + return Err(ParseDeviceNumberError::TooManyNumbers { + device: device.into(), + numbers: numbers.len(), + }); } - Ok((numbers[0].parse()?, numbers[1].parse()?)) + Ok(( + numbers[0] + .parse() + .map_err(|err| ParseDeviceNumberError::MalformedNumber { + device: device.into(), + err, + })?, + numbers[1] + .parse() + .map_err(|err| ParseDeviceNumberError::MalformedNumber { + device: device.into(), + err, + })?, + )) +} + +#[derive(thiserror::Error, Debug)] +pub enum PidStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("failed to parse current pids: {0}")] + ParseCurrent(ParseIntError), + #[error("failed to parse pids limit: {0}")] + ParseLimit(ParseIntError), } /// Returns cgroup pid statistics -pub fn pid_stats(cgroup_path: &Path) -> Result { +pub fn pid_stats(cgroup_path: &Path) -> Result { let mut stats = PidStats::default(); let current = common::read_cgroup_file(cgroup_path.join("pids.current"))?; stats.current = current .trim() .parse() - .context("failed to parse current pids")?; + .map_err(PidStatsError::ParseCurrent)?; let limit = common::read_cgroup_file(cgroup_path.join("pids.max")).map(|l| l.trim().to_owned())?; if limit != "max" { - stats.limit = limit.parse().context("failed to parse pids limit")?; + stats.limit = limit.parse().map_err(PidStatsError::ParseLimit)?; } Ok(stats) } -pub fn psi_stats(psi_file: &Path) -> Result { +pub fn psi_stats(psi_file: &Path) -> Result { let mut stats = PSIStats::default(); let psi = common::read_cgroup_file(psi_file)?; for line in psi.lines() { match &line[0..4] { - "some" => stats.some = parse_psi(&line[4..])?, - "full" => stats.full = parse_psi(&line[4..])?, + "some" => stats.some = parse_psi(&line[4..], psi_file)?, + "full" => stats.full = parse_psi(&line[4..], psi_file)?, _ => continue, } } @@ -368,7 +439,9 @@ pub fn psi_stats(psi_file: &Path) -> Result { Ok(stats) } -fn parse_psi(stat_line: &str) -> Result { +fn parse_psi(stat_line: &str, path: &Path) -> Result { + use std::io::{Error, ErrorKind}; + let mut psi_data = PSIData::default(); for kv in stat_line.split_ascii_whitespace() { @@ -376,17 +449,20 @@ fn parse_psi(stat_line: &str) -> Result { Some(("avg10", v)) => { psi_data.avg10 = v .parse() - .with_context(|| format!("invalid psi value {v}"))? + .map_err(|err| Error::new(ErrorKind::InvalidData, err)) + .wrap_other(path)? } Some(("avg60", v)) => { psi_data.avg60 = v .parse() - .with_context(|| format!("invalid psi value {v}"))? + .map_err(|err| Error::new(ErrorKind::InvalidData, err)) + .wrap_other(path)? } Some(("avg300", v)) => { psi_data.avg300 = v .parse() - .with_context(|| format!("invalid psi value {v}"))? + .map_err(|err| Error::new(ErrorKind::InvalidData, err)) + .wrap_other(path)? } _ => continue, } @@ -397,7 +473,7 @@ fn parse_psi(stat_line: &str) -> Result { #[cfg(test)] mod tests { - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use super::*; @@ -421,8 +497,8 @@ mod tests { #[test] fn test_parse_single_value_valid() { - let tmp = create_temp_dir("test_parse_single_value_valid").unwrap(); - let file_path = set_fixture(&tmp, "single_valued_file", "1200\n").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + let file_path = set_fixture(tmp.path(), "single_valued_file", "1200\n").unwrap(); let value = parse_single_value(&file_path).unwrap(); assert_eq!(value, 1200); @@ -430,8 +506,8 @@ mod tests { #[test] fn test_parse_single_value_invalid_number() { - let tmp = create_temp_dir("test_parse_single_value_invalid_number").unwrap(); - let file_path = set_fixture(&tmp, "single_invalid_file", "noop\n").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + let file_path = set_fixture(tmp.path(), "single_invalid_file", "noop\n").unwrap(); let value = parse_single_value(&file_path); assert!(value.is_err()); @@ -439,8 +515,8 @@ mod tests { #[test] fn test_parse_single_value_multiple_entries() { - let tmp = create_temp_dir("test_parse_single_value_multiple_entries").unwrap(); - let file_path = set_fixture(&tmp, "multi_valued_file", "1200\n1400\n1600").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + let file_path = set_fixture(tmp.path(), "multi_valued_file", "1200\n1400\n1600").unwrap(); let value = parse_single_value(&file_path); assert!(value.is_err()); @@ -448,9 +524,9 @@ mod tests { #[test] fn test_parse_flat_keyed_data() { - let tmp = create_temp_dir("test_parse_flat_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["key1 1", "key2 2", "key3 3"].join("\n"); - let file_path = set_fixture(&tmp, "flat_keyed_data", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "flat_keyed_data", &file_content).unwrap(); let actual = parse_flat_keyed_data(&file_path).unwrap(); let mut expected = HashMap::with_capacity(3); @@ -463,9 +539,9 @@ mod tests { #[test] fn test_parse_flat_keyed_data_with_characters() { - let tmp = create_temp_dir("test_parse_flat_keyed_data_with_characters").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["key1 1", "key2 a", "key3 b"].join("\n"); - let file_path = set_fixture(&tmp, "flat_keyed_data", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "flat_keyed_data", &file_content).unwrap(); let result = parse_flat_keyed_data(&file_path); assert!(result.is_err()); @@ -473,9 +549,9 @@ mod tests { #[test] fn test_parse_space_separated_as_flat_keyed_data() { - let tmp = create_temp_dir("test_parse_space_separated_as_flat_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["key1", "key2", "key3", "key4"].join(" "); - let file_path = set_fixture(&tmp, "space_separated", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "space_separated", &file_content).unwrap(); let result = parse_flat_keyed_data(&file_path); assert!(result.is_err()); @@ -483,9 +559,9 @@ mod tests { #[test] fn test_parse_newline_separated_as_flat_keyed_data() { - let tmp = create_temp_dir("test_parse_newline_separated_as_flat_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["key1", "key2", "key3", "key4"].join("\n"); - let file_path = set_fixture(&tmp, "newline_separated", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "newline_separated", &file_content).unwrap(); let result = parse_flat_keyed_data(&file_path); assert!(result.is_err()); @@ -493,14 +569,14 @@ mod tests { #[test] fn test_parse_nested_keyed_data_as_flat_keyed_data() { - let tmp = create_temp_dir("test_parse_nested_keyed_data_as_flat_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = [ "key1 subkey1=value1 subkey2=value2 subkey3=value3", "key2 subkey1=value1 subkey2=value2 subkey3=value3", "key3 subkey1=value1 subkey2=value2 subkey3=value3", ] .join("\n"); - let file_path = set_fixture(&tmp, "nested_keyed_data", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "nested_keyed_data", &file_content).unwrap(); let result = parse_flat_keyed_data(&file_path); assert!(result.is_err()); @@ -508,14 +584,14 @@ mod tests { #[test] fn test_parse_nested_keyed_data() { - let tmp = create_temp_dir("test_parse_nested_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = [ "key1 subkey1=value1 subkey2=value2 subkey3=value3", "key2 subkey1=value1 subkey2=value2 subkey3=value3", "key3 subkey1=value1 subkey2=value2 subkey3=value3", ] .join("\n"); - let file_path = set_fixture(&tmp, "nested_keyed_data", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "nested_keyed_data", &file_content).unwrap(); let actual = parse_nested_keyed_data(&file_path).unwrap(); let mut expected = HashMap::with_capacity(3); @@ -549,9 +625,9 @@ mod tests { #[test] fn test_parse_space_separated_as_nested_keyed_data() { - let tmp = create_temp_dir("test_parse_space_separated_as_nested_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["key1", "key2", "key3", "key4"].join(" "); - let file_path = set_fixture(&tmp, "space_separated", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "space_separated", &file_content).unwrap(); let result = parse_nested_keyed_data(&file_path); assert!(result.is_err()); @@ -559,9 +635,9 @@ mod tests { #[test] fn test_parse_newline_separated_as_nested_keyed_data() { - let tmp = create_temp_dir("test_parse_newline_separated_as_nested_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["key1", "key2", "key3", "key4"].join("\n"); - let file_path = set_fixture(&tmp, "newline_separated", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "newline_separated", &file_content).unwrap(); let result = parse_nested_keyed_data(&file_path); assert!(result.is_err()); @@ -569,9 +645,9 @@ mod tests { #[test] fn test_parse_flat_keyed_as_nested_keyed_data() { - let tmp = create_temp_dir("test_parse_flat_keyed_as_nested_keyed_data").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["key1 1", "key2 2", "key3 3"].join("\n"); - let file_path = set_fixture(&tmp, "newline_separated", &file_content).unwrap(); + let file_path = set_fixture(tmp.path(), "newline_separated", &file_content).unwrap(); let result = parse_nested_keyed_data(&file_path); assert!(result.is_err()); @@ -591,13 +667,13 @@ mod tests { #[test] fn test_parse_psi_full_stats() { - let tmp = create_temp_dir("test_parse_psi_full_stats").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = [ "some avg10=80.00 avg60=50.00 avg300=90.00 total=0", "full avg10=10.00 avg60=30.00 avg300=50.00 total=0", ] .join("\n"); - let psi_file = set_fixture(&tmp, "psi.pressure", &file_content).unwrap(); + let psi_file = set_fixture(tmp.path(), "psi.pressure", &file_content).unwrap(); let result = psi_stats(&psi_file).unwrap(); assert_eq!( @@ -619,9 +695,9 @@ mod tests { #[test] fn test_parse_psi_only_some() { - let tmp = create_temp_dir("test_parse_psi_only_some").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let file_content = ["some avg10=80.00 avg60=50.00 avg300=90.00 total=0"].join("\n"); - let psi_file = set_fixture(&tmp, "psi.pressure", &file_content).unwrap(); + let psi_file = set_fixture(tmp.path(), "psi.pressure", &file_content).unwrap(); let result = psi_stats(&psi_file).unwrap(); assert_eq!( diff --git a/crates/libcgroups/src/stub/systemd/manager.rs b/crates/libcgroups/src/stub/systemd/manager.rs new file mode 100644 index 000000000..d2f488870 --- /dev/null +++ b/crates/libcgroups/src/stub/systemd/manager.rs @@ -0,0 +1,43 @@ +use crate::common::{AnyCgroupManager, CgroupManager}; + +#[derive(thiserror::Error, Debug)] +pub enum SystemdManagerError { + #[error("systemd cgroup feature is required, but was not enabled during compile time")] + NotEnabled, +} + +pub struct Manager {} + +impl Manager { + pub fn any(self) -> AnyCgroupManager { + AnyCgroupManager::Systemd(self) + } +} + +impl CgroupManager for Manager { + type Error = SystemdManagerError; + + fn add_task(&self, _pid: nix::unistd::Pid) -> Result<(), Self::Error> { + Err(SystemdManagerError::NotEnabled) + } + + fn apply(&self, _controller_opt: &crate::common::ControllerOpt) -> Result<(), Self::Error> { + Err(SystemdManagerError::NotEnabled) + } + + fn remove(&self) -> Result<(), Self::Error> { + Err(SystemdManagerError::NotEnabled) + } + + fn freeze(&self, _state: crate::common::FreezerState) -> Result<(), Self::Error> { + Err(SystemdManagerError::NotEnabled) + } + + fn stats(&self) -> Result { + Err(SystemdManagerError::NotEnabled) + } + + fn get_all_pids(&self) -> Result, Self::Error> { + Err(SystemdManagerError::NotEnabled) + } +} diff --git a/crates/libcgroups/src/stub/systemd/mod.rs b/crates/libcgroups/src/stub/systemd/mod.rs new file mode 100644 index 000000000..ff8de9eb9 --- /dev/null +++ b/crates/libcgroups/src/stub/systemd/mod.rs @@ -0,0 +1 @@ +pub mod manager; diff --git a/crates/libcgroups/src/stub/v1/manager.rs b/crates/libcgroups/src/stub/v1/manager.rs new file mode 100644 index 000000000..d464ca53c --- /dev/null +++ b/crates/libcgroups/src/stub/v1/manager.rs @@ -0,0 +1,43 @@ +use crate::common::{AnyCgroupManager, CgroupManager}; + +#[derive(thiserror::Error, Debug)] +pub enum V1ManagerError { + #[error("v1 cgroup feature is required, but was not enabled during compile time")] + NotEnabled, +} + +pub struct Manager {} + +impl Manager { + pub fn any(self) -> AnyCgroupManager { + crate::common::AnyCgroupManager::V1(self) + } +} + +impl CgroupManager for Manager { + type Error = V1ManagerError; + + fn add_task(&self, _pid: nix::unistd::Pid) -> Result<(), Self::Error> { + Err(V1ManagerError::NotEnabled) + } + + fn apply(&self, _controller_opt: &crate::common::ControllerOpt) -> Result<(), Self::Error> { + Err(V1ManagerError::NotEnabled) + } + + fn remove(&self) -> Result<(), Self::Error> { + Err(V1ManagerError::NotEnabled) + } + + fn freeze(&self, _state: crate::common::FreezerState) -> Result<(), Self::Error> { + Err(V1ManagerError::NotEnabled) + } + + fn stats(&self) -> Result { + Err(V1ManagerError::NotEnabled) + } + + fn get_all_pids(&self) -> Result, Self::Error> { + Err(V1ManagerError::NotEnabled) + } +} diff --git a/crates/libcgroups/src/stub/v1/mod.rs b/crates/libcgroups/src/stub/v1/mod.rs new file mode 100644 index 000000000..ff8de9eb9 --- /dev/null +++ b/crates/libcgroups/src/stub/v1/mod.rs @@ -0,0 +1 @@ +pub mod manager; diff --git a/crates/libcgroups/src/stub/v2/manager.rs b/crates/libcgroups/src/stub/v2/manager.rs new file mode 100644 index 000000000..220184bf4 --- /dev/null +++ b/crates/libcgroups/src/stub/v2/manager.rs @@ -0,0 +1,43 @@ +use crate::common::{AnyCgroupManager, CgroupManager}; + +#[derive(thiserror::Error, Debug)] +pub enum V2ManagerError { + #[error("v2 cgroup feature is required, but was not enabled during compile time")] + NotEnabled, +} + +pub struct Manager {} + +impl Manager { + pub fn any(self) -> AnyCgroupManager { + crate::common::AnyCgroupManager::V2(self) + } +} + +impl CgroupManager for Manager { + type Error = V2ManagerError; + + fn add_task(&self, _pid: nix::unistd::Pid) -> Result<(), Self::Error> { + Err(V2ManagerError::NotEnabled) + } + + fn apply(&self, _controller_opt: &crate::common::ControllerOpt) -> Result<(), Self::Error> { + Err(V2ManagerError::NotEnabled) + } + + fn remove(&self) -> Result<(), Self::Error> { + Err(V2ManagerError::NotEnabled) + } + + fn freeze(&self, _state: crate::common::FreezerState) -> Result<(), Self::Error> { + Err(V2ManagerError::NotEnabled) + } + + fn stats(&self) -> Result { + Err(V2ManagerError::NotEnabled) + } + + fn get_all_pids(&self) -> Result, Self::Error> { + Err(V2ManagerError::NotEnabled) + } +} diff --git a/crates/libcgroups/src/stub/v2/mod.rs b/crates/libcgroups/src/stub/v2/mod.rs new file mode 100644 index 000000000..ff8de9eb9 --- /dev/null +++ b/crates/libcgroups/src/stub/v2/mod.rs @@ -0,0 +1 @@ +pub mod manager; diff --git a/crates/libcgroups/src/systemd/controller.rs b/crates/libcgroups/src/systemd/controller.rs index 5e2075a63..2e93003c5 100644 --- a/crates/libcgroups/src/systemd/controller.rs +++ b/crates/libcgroups/src/systemd/controller.rs @@ -1,14 +1,15 @@ use std::collections::HashMap; -use anyhow::Result; use dbus::arg::RefArg; use crate::common::ControllerOpt; -pub(crate) trait Controller { +pub(super) trait Controller { + type Error; + fn apply( options: &ControllerOpt, systemd_version: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()>; + ) -> Result<(), Self::Error>; } diff --git a/crates/libcgroups/src/systemd/controller_type.rs b/crates/libcgroups/src/systemd/controller_type.rs index e38d5d64f..2383a31ec 100644 --- a/crates/libcgroups/src/systemd/controller_type.rs +++ b/crates/libcgroups/src/systemd/controller_type.rs @@ -18,7 +18,7 @@ impl Display for ControllerType { ControllerType::Pids => "pids", }; - write!(f, "{}", print) + write!(f, "{print}") } } diff --git a/crates/libcgroups/src/systemd/cpu.rs b/crates/libcgroups/src/systemd/cpu.rs index 32aaec048..f9b104af4 100644 --- a/crates/libcgroups/src/systemd/cpu.rs +++ b/crates/libcgroups/src/systemd/cpu.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use anyhow::{bail, Context, Result}; use dbus::arg::RefArg; use oci_spec::runtime::LinuxCpu; @@ -12,18 +11,25 @@ pub const CPU_QUOTA: &str = "CPUQuotaPerSecUSec"; pub const CPU_PERIOD: &str = "CPUQuotaPeriodUSec"; const MICROSECS_PER_SEC: u64 = 1_000_000; +#[derive(thiserror::Error, Debug)] +pub enum SystemdCpuError { + #[error("realtime is not supported on systemd v2 yet")] + RealtimeSystemd, +} + pub(crate) struct Cpu {} impl Controller for Cpu { + type Error = SystemdCpuError; + fn apply( options: &ControllerOpt, _: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), Self::Error> { if let Some(cpu) = options.resources.cpu() { - log::debug!("Applying cpu resource restrictions"); - return Self::apply(cpu, properties) - .context("could not apply cpu resource restrictions"); + tracing::debug!("Applying cpu resource restrictions"); + Self::apply(cpu, properties)?; } Ok(()) @@ -31,9 +37,12 @@ impl Controller for Cpu { } impl Cpu { - fn apply(cpu: &LinuxCpu, properties: &mut HashMap<&str, Box>) -> Result<()> { + fn apply( + cpu: &LinuxCpu, + properties: &mut HashMap<&str, Box>, + ) -> Result<(), SystemdCpuError> { if Self::is_realtime_requested(cpu) { - bail!("realtime is not supported on systemd v2 yet"); + return Err(SystemdCpuError::RealtimeSystemd); } if let Some(mut shares) = cpu.shares() { @@ -77,11 +86,12 @@ pub fn convert_shares_to_cgroup2(shares: u64) -> u64 { return 0; } - 1 + ((shares - 2) * 9999) / 262142 + 1 + ((shares.saturating_sub(2)) * 9999) / 262142 } #[cfg(test)] mod tests { + use anyhow::{Context, Result}; use dbus::arg::ArgType; use oci_spec::runtime::LinuxCpuBuilder; @@ -97,7 +107,7 @@ mod tests { let mut properties: HashMap<&str, Box> = HashMap::new(); // act - Cpu::apply(&cpu, &mut properties).context("apply cpu")?; + Cpu::apply(&cpu, &mut properties)?; // assert assert!(properties.contains_key(CPU_WEIGHT)); @@ -119,7 +129,7 @@ mod tests { let mut properties: HashMap<&str, Box> = HashMap::new(); // act - Cpu::apply(&cpu, &mut properties).context("apply cpu")?; + Cpu::apply(&cpu, &mut properties)?; // assert assert!(properties.contains_key(CPU_QUOTA)); @@ -143,7 +153,7 @@ mod tests { let mut properties: HashMap<&str, Box> = HashMap::new(); // act - Cpu::apply(&cpu, &mut properties).context("apply cpu")?; + Cpu::apply(&cpu, &mut properties)?; // assert assert!(properties.contains_key(CPU_PERIOD)); diff --git a/crates/libcgroups/src/systemd/cpuset.rs b/crates/libcgroups/src/systemd/cpuset.rs index e85fb7d6d..33b73b992 100644 --- a/crates/libcgroups/src/systemd/cpuset.rs +++ b/crates/libcgroups/src/systemd/cpuset.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use anyhow::{bail, Context, Result}; use dbus::arg::RefArg; use fixedbitset::FixedBitSet; use oci_spec::runtime::LinuxCpu; @@ -12,18 +11,29 @@ use super::controller::Controller; pub const ALLOWED_CPUS: &str = "AllowedCPUs"; pub const ALLOWED_NODES: &str = "AllowedMemoryNodes"; +#[derive(thiserror::Error, Debug)] +pub enum SystemdCpuSetError { + #[error("setting cpuset restrictions requires systemd version greater than 243")] + OldSystemd, + #[error("could not create bitmask for cpus: {0}")] + CpusBitmask(BitmaskError), + #[error("could not create bitmask for memory nodes: {0}")] + MemoryNodesBitmask(BitmaskError), +} + pub struct CpuSet {} impl Controller for CpuSet { + type Error = SystemdCpuSetError; + fn apply( options: &ControllerOpt, systemd_version: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), Self::Error> { if let Some(cpu) = options.resources.cpu() { - log::debug!("Applying cpuset resource restrictions"); - return Self::apply(cpu, systemd_version, properties) - .context("could not apply cpuset resource restrictions"); + tracing::debug!("Applying cpuset resource restrictions"); + return Self::apply(cpu, systemd_version, properties); } Ok(()) @@ -35,19 +45,18 @@ impl CpuSet { cpu: &LinuxCpu, systemd_version: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), SystemdCpuSetError> { if systemd_version <= 243 { - bail!("setting cpuset restrictions requires systemd version greather than 243"); + return Err(SystemdCpuSetError::OldSystemd); } if let Some(cpus) = cpu.cpus() { - let cpu_mask = to_bitmask(cpus).context("could not create bitmask for cpus")?; + let cpu_mask = to_bitmask(cpus).map_err(SystemdCpuSetError::CpusBitmask)?; properties.insert(ALLOWED_CPUS, Box::new(cpu_mask)); } if let Some(mems) = cpu.mems() { - let mems_mask = - to_bitmask(mems).context("could not create bitmask for memory nodes")?; + let mems_mask = to_bitmask(mems).map_err(SystemdCpuSetError::MemoryNodesBitmask)?; properties.insert(ALLOWED_NODES, Box::new(mems_mask)); } @@ -55,7 +64,18 @@ impl CpuSet { } } -pub fn to_bitmask(range: &str) -> Result> { +#[derive(thiserror::Error, Debug)] +pub enum BitmaskError { + #[error("invalid index {index}: {err}")] + InvalidIndex { + err: std::num::ParseIntError, + index: String, + }, + #[error("invalid cpu range {0}")] + InvalidRange(String), +} + +pub fn to_bitmask(range: &str) -> Result, BitmaskError> { let mut bitset = FixedBitSet::with_capacity(8); for cpu_set in range.split_terminator(',') { @@ -66,16 +86,25 @@ pub fn to_bitmask(range: &str) -> Result> { let cpus: Vec<&str> = cpu_set.split('-').map(|s| s.trim()).collect(); if cpus.len() == 1 { - let cpu_index: usize = cpus[0].parse()?; + let cpu_index: usize = cpus[0].parse().map_err(|err| BitmaskError::InvalidIndex { + err, + index: cpus[0].into(), + })?; if cpu_index >= bitset.len() { bitset.grow(bitset.len() + 8); } bitset.set(cpu_index, true); } else { - let start_index = cpus[0].parse()?; - let end_index = cpus[1].parse()?; + let start_index = cpus[0].parse().map_err(|err| BitmaskError::InvalidIndex { + err, + index: cpus[0].into(), + })?; + let end_index = cpus[1].parse().map_err(|err| BitmaskError::InvalidIndex { + err, + index: cpus[1].into(), + })?; if start_index > end_index { - bail!("invalid cpu range {}", cpu_set); + return Err(BitmaskError::InvalidRange(cpu_set.into())); } if end_index >= bitset.len() { @@ -98,6 +127,7 @@ pub fn to_bitmask(range: &str) -> Result> { #[cfg(test)] mod tests { + use anyhow::{Context, Result}; use dbus::arg::{ArgType, RefArg}; use oci_spec::runtime::LinuxCpuBuilder; diff --git a/crates/libcgroups/src/systemd/dbus/client.rs b/crates/libcgroups/src/systemd/dbus/client.rs index bb489b9f7..b51b9a801 100644 --- a/crates/libcgroups/src/systemd/dbus/client.rs +++ b/crates/libcgroups/src/systemd/dbus/client.rs @@ -1,11 +1,29 @@ use crate::systemd::dbus::systemd_api::OrgFreedesktopSystemd1Manager; -use anyhow::{Context, Result}; use dbus::arg::{RefArg, Variant}; use dbus::blocking::{Connection, Proxy}; use std::collections::HashMap; +use std::num::ParseIntError; use std::path::PathBuf; use std::time::Duration; +#[derive(thiserror::Error, Debug)] +pub enum SystemdClientError { + #[error("dbus error: {0}")] + DBus(#[from] dbus::Error), + #[error("failed to start transient unit {unit_name}, parent is {parent}: {err}")] + FailedTransient { + err: dbus::Error, + unit_name: String, + parent: String, + }, + #[error("failed to stop unit {unit_name}: {err}")] + FailedStop { err: dbus::Error, unit_name: String }, + #[error("failed to set properties for unit {unit_name}: {err}")] + FailedProperties { err: dbus::Error, unit_name: String }, + #[error("could not parse systemd version: {0}")] + SystemdVersion(ParseIntError), +} + pub trait SystemdClient { fn is_system(&self) -> bool; @@ -17,19 +35,19 @@ pub trait SystemdClient { pid: u32, parent: &str, unit_name: &str, - ) -> Result<()>; + ) -> Result<(), SystemdClientError>; - fn stop_transient_unit(&self, unit_name: &str) -> Result<()>; + fn stop_transient_unit(&self, unit_name: &str) -> Result<(), SystemdClientError>; fn set_unit_properties( &self, unit_name: &str, properties: &HashMap<&str, Box>, - ) -> Result<()>; + ) -> Result<(), SystemdClientError>; - fn systemd_version(&self) -> Result; + fn systemd_version(&self) -> Result; - fn control_cgroup_root(&self) -> Result; + fn control_cgroup_root(&self) -> Result; } /// Client is a wrapper providing higher level API and abatraction around dbus. @@ -41,13 +59,13 @@ pub struct Client { impl Client { /// Uses the system bus to communicate with systemd - pub fn new_system() -> Result { + pub fn new_system() -> Result { let conn = Connection::new_system()?; Ok(Client { conn, system: true }) } /// Uses the session bus to communicate with systemd - pub fn new_session() -> Result { + pub fn new_session() -> Result { let conn = Connection::new_session()?; Ok(Client { conn, @@ -83,7 +101,7 @@ impl SystemdClient for Client { pid: u32, parent: &str, unit_name: &str, - ) -> Result<()> { + ) -> Result<(), SystemdClientError> { // To view and introspect the methods under the 'org.freedesktop.systemd1' destination // and object path under it use the following command: // `gdbus introspect --system --dest org.freedesktop.systemd1 --object-path /org/freedesktop/systemd1` @@ -99,7 +117,7 @@ impl SystemdClient for Client { let mut properties: Vec<(&str, Variant>)> = Vec::with_capacity(6); properties.push(( "Description", - Variant(Box::new(format!("youki container {}", container_name))), + Variant(Box::new(format!("youki container {container_name}"))), )); // if we create a slice, the parent is defined via a Wants= @@ -119,24 +137,26 @@ impl SystemdClient for Client { properties.push(("DefaultDependencies", Variant(Box::new(false)))); properties.push(("PIDs", Variant(Box::new(vec![pid])))); - log::debug!("Starting transient unit: {:?}", properties); + tracing::debug!("Starting transient unit: {:?}", properties); proxy .start_transient_unit(unit_name, "replace", properties, vec![]) - .with_context(|| { - format!( - "failed to start transient unit {}, parent is {}", - unit_name, parent - ) + .map_err(|err| SystemdClientError::FailedTransient { + err, + unit_name: unit_name.into(), + parent: parent.into(), })?; Ok(()) } - fn stop_transient_unit(&self, unit_name: &str) -> Result<()> { + fn stop_transient_unit(&self, unit_name: &str) -> Result<(), SystemdClientError> { let proxy = self.create_proxy(); proxy .stop_unit(unit_name, "replace") - .with_context(|| format!("failed to stop unit {}", unit_name))?; + .map_err(|err| SystemdClientError::FailedStop { + err, + unit_name: unit_name.into(), + })?; Ok(()) } @@ -144,7 +164,7 @@ impl SystemdClient for Client { &self, unit_name: &str, properties: &HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), SystemdClientError> { let proxy = self.create_proxy(); let props = properties @@ -154,33 +174,32 @@ impl SystemdClient for Client { proxy .set_unit_properties(unit_name, true, props) - .with_context(|| format!("failed to set properties for unit {:?}", unit_name))?; + .map_err(|err| SystemdClientError::FailedProperties { + err, + unit_name: unit_name.into(), + })?; Ok(()) } - fn systemd_version(&self) -> Result { + fn systemd_version(&self) -> Result { let proxy = self.create_proxy(); let version = proxy - .version() - .context("dbus request failed")? + .version()? .chars() .skip_while(|c| c.is_alphabetic()) .take_while(|c| c.is_numeric()) .collect::() .parse::() - .context("could not parse systemd version")?; + .map_err(SystemdClientError::SystemdVersion)?; Ok(version) } - fn control_cgroup_root(&self) -> Result { + fn control_cgroup_root(&self) -> Result { let proxy = self.create_proxy(); - let cgroup_root = proxy - .control_group() - .context("failed to get systemd control group")?; - PathBuf::try_from(&cgroup_root) - .with_context(|| format!("parse systemd control cgroup {} into path", cgroup_root)) + let cgroup_root = proxy.control_group()?; + Ok(PathBuf::from(&cgroup_root)) } } diff --git a/crates/libcgroups/src/systemd/manager.rs b/crates/libcgroups/src/systemd/manager.rs index 4f38c1e52..286b2fe34 100644 --- a/crates/libcgroups/src/systemd/manager.rs +++ b/crates/libcgroups/src/systemd/manager.rs @@ -1,11 +1,11 @@ use std::{ collections::HashMap, + convert::Infallible, fmt::{Debug, Display}, fs::{self}, path::Component::RootDir, }; -use anyhow::{anyhow, bail, Context, Result}; use dbus::arg::RefArg; use nix::{unistd::Pid, NixPath}; use std::path::{Path, PathBuf}; @@ -15,13 +15,17 @@ use super::{ controller_type::{ControllerType, CONTROLLER_TYPES}, cpu::Cpu, cpuset::CpuSet, - dbus::client::{Client, SystemdClient}, + dbus::client::{Client, SystemdClient, SystemdClientError}, memory::Memory, pids::Pids, }; use crate::{ - common::{self, CgroupManager, ControllerOpt, FreezerState, PathBufExt}, + common::{ + self, AnyCgroupManager, CgroupManager, ControllerOpt, FreezerState, JoinSafelyError, + PathBufExt, WrapIoResult, WrappedIoError, + }, systemd::unified::Unified, + v2::manager::V2ManagerError, }; use crate::{stats::Stats, v2::manager::Manager as FsManager}; @@ -61,19 +65,29 @@ struct CgroupsPath { name: String, } +#[derive(thiserror::Error, Debug)] +pub enum CgroupsPathError { + #[error("no cgroups path has been provided")] + NoPath, + #[error("cgroups path does not contain valid utf8")] + InvalidUtf8(PathBuf), + #[error("cgroups path is malformed: {0}")] + MalformedPath(PathBuf), +} + impl TryFrom<&Path> for CgroupsPath { - type Error = anyhow::Error; + type Error = CgroupsPathError; fn try_from(cgroups_path: &Path) -> Result { // if cgroups_path was provided it should be of the form [slice]:[prefix]:[name], // for example: "system.slice:docker:1234". if cgroups_path.len() == 0 { - bail!("no cgroups path has been provided"); + return Err(CgroupsPathError::NoPath); } let parts = cgroups_path .to_str() - .ok_or_else(|| anyhow!("failed to parse cgroups path {:?}", cgroups_path))? + .ok_or_else(|| CgroupsPathError::InvalidUtf8(cgroups_path.to_path_buf()))? .split(':') .collect::>(); @@ -88,7 +102,7 @@ impl TryFrom<&Path> for CgroupsPath { prefix: parts[1].to_owned(), name: parts[2].to_owned(), }, - _ => bail!("cgroup path {:?} is invalid", cgroups_path), + _ => return Err(CgroupsPathError::MalformedPath(cgroups_path.to_path_buf())), }; Ok(destructured_path) @@ -126,27 +140,56 @@ impl Debug for Manager { } } +#[derive(thiserror::Error, Debug)] +pub enum SystemdManagerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("failed to destructure cgroups path: {0}")] + CgroupsPath(#[from] CgroupsPathError), + #[error("dbus error: {0}")] + DBus(#[from] dbus::Error), + #[error("invalid slice name: {0}")] + InvalidSliceName(String), + #[error(transparent)] + SystemdClient(#[from] SystemdClientError), + #[error("failed to join safely: {0}")] + JoinSafely(#[from] JoinSafelyError), + #[error("file not found: {0}")] + FileNotFound(PathBuf), + #[error("bad delegation boundary {boundary} for cgroups path {cgroup}")] + BadDelegationBoundary { boundary: PathBuf, cgroup: PathBuf }, + #[error("in v2 manager: {0}")] + V2Manager(#[from] V2ManagerError), + + #[error("in cpu controller: {0}")] + Cpu(#[from] super::cpu::SystemdCpuError), + #[error("in cpuset controller: {0}")] + CpuSet(#[from] super::cpuset::SystemdCpuSetError), + #[error("in memory controller: {0}")] + Memory(#[from] super::memory::SystemdMemoryError), + #[error("in pids controller: {0}")] + Pids(Infallible), + #[error("in pids unified controller: {0}")] + Unified(#[from] super::unified::SystemdUnifiedError), +} + impl Manager { pub fn new( root_path: PathBuf, cgroups_path: PathBuf, container_name: String, use_system: bool, - ) -> Result { - let mut destructured_path = cgroups_path - .as_path() - .try_into() - .with_context(|| format!("failed to destructure cgroups path {:?}", cgroups_path))?; + ) -> Result { + let mut destructured_path: CgroupsPath = cgroups_path.as_path().try_into()?; ensure_parent_unit(&mut destructured_path, use_system); let client = match use_system { - true => Client::new_system().context("failed to create system dbus client")?, - false => Client::new_session().context("failed to create session dbus client")?, + true => Client::new_system()?, + false => Client::new_session()?, }; let (cgroups_path, delegation_boundary) = - Self::construct_cgroups_path(&destructured_path, &client) - .context("failed to construct cgroups path")?; + Self::construct_cgroups_path(&destructured_path, &client)?; let full_path = root_path.join_safely(&cgroups_path)?; let fs_manager = FsManager::new(root_path.clone(), cgroups_path.clone())?; @@ -179,7 +222,7 @@ impl Manager { fn construct_cgroups_path( cgroups_path: &CgroupsPath, client: &dyn SystemdClient, - ) -> Result<(PathBuf, PathBuf)> { + ) -> Result<(PathBuf, PathBuf), SystemdManagerError> { // if the user provided a '.slice' (as in a branch of a tree) // we need to convert it to a filesystem path. @@ -187,24 +230,20 @@ impl Manager { let systemd_root = client.control_cgroup_root()?; let unit_name = Self::get_unit_name(cgroups_path); - let cgroups_path = systemd_root - .join_safely(&parent) - .with_context(|| format!("failed to join {:?} with {:?}", systemd_root, parent))? - .join_safely(&unit_name) - .with_context(|| format!("failed to join {:?} with {:?}", parent, unit_name))?; + let cgroups_path = systemd_root.join_safely(parent)?.join_safely(unit_name)?; Ok((cgroups_path, systemd_root)) } // systemd represents slice hierarchy using `-`, so we need to follow suit when // generating the path of slice. For example, 'test-a-b.slice' becomes // '/test.slice/test-a.slice/test-a-b.slice'. - fn expand_slice(slice: &str) -> Result { + fn expand_slice(slice: &str) -> Result { let suffix = ".slice"; if slice.len() <= suffix.len() || !slice.ends_with(suffix) { - bail!("invalid slice name: {}", slice); + return Err(SystemdManagerError::InvalidSliceName(slice.into())); } if slice.contains('/') { - bail!("invalid slice name: {}", slice); + return Err(SystemdManagerError::InvalidSliceName(slice.into())); } let mut path = "".to_owned(); let mut prefix = "".to_owned(); @@ -215,18 +254,18 @@ impl Manager { } for component in slice_name.split('-') { if component.is_empty() { - bail!("invalid slice name: {}", slice); + return Err(SystemdManagerError::InvalidSliceName(slice.into())); } // Append the component to the path and to the prefix. - path = format!("{}/{}{}{}", path, prefix, component, suffix); - prefix = format!("{}{}-", prefix, component); + path = format!("{path}/{prefix}{component}{suffix}"); + prefix = format!("{prefix}{component}-"); } Ok(Path::new(&path).to_path_buf()) } /// ensures that each level in the downward path from the delegation boundary down to /// the scope or slice of the transient unit has all available controllers enabled - fn ensure_controllers_attached(&self) -> Result<()> { + fn ensure_controllers_attached(&self) -> Result<(), SystemdManagerError> { let full_boundary_path = self.root_path.join_safely(&self.delegation_boundary)?; let controllers: Vec = self @@ -240,7 +279,11 @@ impl Manager { let mut current_path = full_boundary_path; let mut components = self .cgroups_path - .strip_prefix(&self.delegation_boundary)? + .strip_prefix(&self.delegation_boundary) + .map_err(|_| SystemdManagerError::BadDelegationBoundary { + boundary: self.delegation_boundary.clone(), + cgroup: self.cgroups_path.clone(), + })? .components() .filter(|c| c.ne(&RootDir)) .peekable(); @@ -250,7 +293,7 @@ impl Manager { while let Some(component) = components.next() { current_path = current_path.join(component); if !current_path.exists() { - log::warn!( + tracing::warn!( "{:?} does not exist. Resource restrictions might not work correctly", current_path ); @@ -270,17 +313,17 @@ impl Manager { fn get_available_controllers>( &self, cgroups_path: P, - ) -> Result> { + ) -> Result, SystemdManagerError> { let controllers_path = self.root_path.join(cgroups_path).join(CGROUP_CONTROLLERS); if !controllers_path.exists() { - bail!( - "cannot get available controllers. {:?} does not exist", - controllers_path - ) + return Err(SystemdManagerError::FileNotFound(controllers_path)); } let mut controllers = Vec::new(); - for controller in fs::read_to_string(controllers_path)?.split_whitespace() { + for controller in fs::read_to_string(&controllers_path) + .wrap_read(controllers_path)? + .split_whitespace() + { match controller { "cpu" => controllers.push(ControllerType::Cpu), "memory" => controllers.push(ControllerType::Memory), @@ -292,110 +335,103 @@ impl Manager { Ok(controllers) } - fn write_controllers(path: &Path, controllers: &[String]) -> Result<()> { + fn write_controllers(path: &Path, controllers: &[String]) -> Result<(), SystemdManagerError> { for controller in controllers { common::write_cgroup_file_str(path.join(CGROUP_SUBTREE_CONTROL), controller)?; } Ok(()) } + + pub fn any(self) -> AnyCgroupManager { + AnyCgroupManager::Systemd(self) + } } impl CgroupManager for Manager { - fn add_task(&self, pid: Pid) -> Result<()> { + type Error = SystemdManagerError; + + fn add_task(&self, pid: Pid) -> Result<(), Self::Error> { // Dont attach any pid to the cgroup if -1 is specified as a pid if pid.as_raw() == -1 { return Ok(()); } - log::debug!("Starting {:?}", self.unit_name); - self.client - .start_transient_unit( - &self.container_name, - pid.as_raw() as u32, - &self.destructured_path.parent, - &self.unit_name, - ) - .with_context(|| { - format!( - "failed to create unit {} for container {}", - self.unit_name, self.container_name - ) - })?; + tracing::debug!("Starting {:?}", self.unit_name); + self.client.start_transient_unit( + &self.container_name, + pid.as_raw() as u32, + &self.destructured_path.parent, + &self.unit_name, + )?; Ok(()) } - fn apply(&self, controller_opt: &ControllerOpt) -> Result<()> { + fn apply(&self, controller_opt: &ControllerOpt) -> Result<(), Self::Error> { let mut properties: HashMap<&str, Box> = HashMap::new(); - let systemd_version = self - .client - .systemd_version() - .context("could not retrieve systemd version")?; + let systemd_version = self.client.systemd_version()?; for controller in CONTROLLER_TYPES { match controller { ControllerType::Cpu => { - Cpu::apply(controller_opt, systemd_version, &mut properties)? + Cpu::apply(controller_opt, systemd_version, &mut properties)?; } ControllerType::CpuSet => { - CpuSet::apply(controller_opt, systemd_version, &mut properties)? + CpuSet::apply(controller_opt, systemd_version, &mut properties)?; } ControllerType::Pids => { - Pids::apply(controller_opt, systemd_version, &mut properties)? + Pids::apply(controller_opt, systemd_version, &mut properties) + .map_err(SystemdManagerError::Pids)?; } ControllerType::Memory => { - Memory::apply(controller_opt, systemd_version, &mut properties)? + Memory::apply(controller_opt, systemd_version, &mut properties)?; } _ => {} }; } Unified::apply(controller_opt, systemd_version, &mut properties)?; - log::debug!("{:?}", properties); + tracing::debug!("{:?}", properties); if !properties.is_empty() { - self.ensure_controllers_attached() - .context("failed to attach controllers")?; + self.ensure_controllers_attached()?; self.client - .set_unit_properties(&self.unit_name, &properties) - .context("could not apply resource restrictions")?; + .set_unit_properties(&self.unit_name, &properties)?; } Ok(()) } - fn remove(&self) -> Result<()> { - log::debug!("remove {}", self.unit_name); + fn remove(&self) -> Result<(), Self::Error> { + tracing::debug!("remove {}", self.unit_name); if self.client.transient_unit_exists(&self.unit_name) { - self.client - .stop_transient_unit(&self.unit_name) - .with_context(|| { - format!("could not remove control group {}", self.destructured_path) - })?; + self.client.stop_transient_unit(&self.unit_name)?; } Ok(()) } - fn freeze(&self, state: FreezerState) -> Result<()> { - self.fs_manager.freeze(state) + fn freeze(&self, state: FreezerState) -> Result<(), Self::Error> { + Ok(self.fs_manager.freeze(state)?) } - fn stats(&self) -> Result { - self.fs_manager.stats() + fn stats(&self) -> Result { + Ok(self.fs_manager.stats()?) } - fn get_all_pids(&self) -> Result> { - common::get_all_pids(&self.full_path) + fn get_all_pids(&self) -> Result, Self::Error> { + Ok(common::get_all_pids(&self.full_path)?) } } #[cfg(test)] mod tests { + use anyhow::{Context, Result}; + use crate::systemd::dbus::client::SystemdClient; use super::*; @@ -417,11 +453,11 @@ mod tests { _pid: u32, _parent: &str, _unit_name: &str, - ) -> Result<()> { + ) -> Result<(), SystemdClientError> { Ok(()) } - fn stop_transient_unit(&self, _unit_name: &str) -> Result<()> { + fn stop_transient_unit(&self, _unit_name: &str) -> Result<(), SystemdClientError> { Ok(()) } @@ -429,15 +465,15 @@ mod tests { &self, _unit_name: &str, _properties: &HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), SystemdClientError> { Ok(()) } - fn systemd_version(&self) -> Result { + fn systemd_version(&self) -> Result { Ok(245) } - fn control_cgroup_root(&self) -> Result { + fn control_cgroup_root(&self) -> Result { Ok(PathBuf::from("/")) } } diff --git a/crates/libcgroups/src/systemd/memory.rs b/crates/libcgroups/src/systemd/memory.rs index ed8e7f1f3..07c9799d1 100644 --- a/crates/libcgroups/src/systemd/memory.rs +++ b/crates/libcgroups/src/systemd/memory.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use anyhow::{bail, Context, Result}; use dbus::arg::RefArg; use oci_spec::runtime::LinuxMemory; @@ -14,18 +13,29 @@ pub const MEMORY_HIGH: &str = "MemoryHigh"; pub const MEMORY_MAX: &str = "MemoryMax"; pub const MEMORY_SWAP: &str = "MemorySwapMax"; +#[derive(thiserror::Error, Debug)] +pub enum SystemdMemoryError { + #[error("invalid memory reservation value: {0}")] + ReservationValue(i64), + #[error("invalid memory limit value: {0}")] + MemoryLimit(i64), + #[error("cgroup v2 swap value cannot be calculated from swap of {swap} and limit of {limit}")] + SwapValue { swap: i64, limit: String }, +} + pub struct Memory {} impl Controller for Memory { + type Error = SystemdMemoryError; + fn apply( options: &ControllerOpt, _: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), Self::Error> { if let Some(memory) = options.resources.memory() { - log::debug!("applying memory resource restrictions"); - return Self::apply(memory, properties) - .context("could not apply memory resource restrictions"); + tracing::debug!("applying memory resource restrictions"); + return Self::apply(memory, properties); } Ok(()) @@ -33,7 +43,10 @@ impl Controller for Memory { } impl Memory { - fn apply(memory: &LinuxMemory, properties: &mut HashMap<&str, Box>) -> Result<()> { + fn apply( + memory: &LinuxMemory, + properties: &mut HashMap<&str, Box>, + ) -> Result<(), SystemdMemoryError> { if let Some(reservation) = memory.reservation() { match reservation { 1..=i64::MAX => { @@ -42,7 +55,7 @@ impl Memory { -1 => { properties.insert(MEMORY_LOW, Box::new(u64::MAX)); } - _ => bail!("invalid memory reservation value: {}", reservation), + _ => return Err(SystemdMemoryError::ReservationValue(reservation)), } } @@ -54,12 +67,11 @@ impl Memory { -1 => { properties.insert(MEMORY_MAX, Box::new(u64::MAX)); } - _ => bail!("invalid memory limit value: {}", limit), + _ => return Err(SystemdMemoryError::MemoryLimit(limit)), } } - Self::apply_swap(memory.swap(), memory.limit(), properties) - .context("could not apply swap")?; + Self::apply_swap(memory.swap(), memory.limit(), properties)?; Ok(()) } @@ -72,7 +84,7 @@ impl Memory { swap: Option, limit: Option, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), SystemdMemoryError> { let value: Box = match (limit, swap) { // memory is unlimited and swap not specified -> assume swap unlimited (Some(-1), None) => Box::new(u64::MAX), @@ -84,11 +96,13 @@ impl Memory { // if swap is greater than zero and memory limit is unspecified swap cannot be // calculated. If memory limit is zero the container would have only swap. If // memory is unlimited it would be bigger than swap. - (_, Some(0)) | (None | Some(0) | Some(-1), Some(1..=i64::MAX)) => bail!( - "cgroup v2 swap value cannot be calculated from swap of {} and limit of {}", - swap.unwrap(), - limit.map_or("none".to_owned(), |v| v.to_string()) - ), + (_, Some(0)) | (None | Some(0) | Some(-1), Some(1..=i64::MAX)) => { + return Err(SystemdMemoryError::SwapValue { + swap: swap.unwrap(), + limit: limit.map_or("none".to_owned(), |v| v.to_string()), + }) + } + (Some(l), Some(s)) if l < s => Box::new((s - l) as u64), _ => return Ok(()), }; @@ -100,6 +114,7 @@ impl Memory { #[cfg(test)] mod tests { + use anyhow::{Context, Result}; use dbus::arg::ArgType; use oci_spec::runtime::LinuxMemoryBuilder; diff --git a/crates/libcgroups/src/systemd/pids.rs b/crates/libcgroups/src/systemd/pids.rs index e29cf2a23..c93dfb7e1 100644 --- a/crates/libcgroups/src/systemd/pids.rs +++ b/crates/libcgroups/src/systemd/pids.rs @@ -1,6 +1,5 @@ -use std::collections::HashMap; +use std::{collections::HashMap, convert::Infallible}; -use anyhow::{Context, Result}; use dbus::arg::RefArg; use oci_spec::runtime::LinuxPids; @@ -13,14 +12,16 @@ pub const TASKS_MAX: &str = "TasksMax"; pub struct Pids {} impl Controller for Pids { + type Error = Infallible; + fn apply( options: &ControllerOpt, _: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), Self::Error> { if let Some(pids) = options.resources.pids() { - log::debug!("Applying pids resource restrictions"); - return Self::apply(pids, properties).context(""); + tracing::debug!("Applying pids resource restrictions"); + Self::apply(pids, properties); } Ok(()) @@ -28,7 +29,7 @@ impl Controller for Pids { } impl Pids { - fn apply(pids: &LinuxPids, properties: &mut HashMap<&str, Box>) -> Result<()> { + fn apply(pids: &LinuxPids, properties: &mut HashMap<&str, Box>) { let limit = if pids.limit() > 0 { pids.limit() as u64 } else { @@ -36,13 +37,13 @@ impl Pids { }; properties.insert(TASKS_MAX, Box::new(limit)); - Ok(()) } } #[cfg(test)] mod tests { use super::*; + use anyhow::{anyhow, Context, Result}; use dbus::arg::ArgType; use oci_spec::runtime::{LinuxPidsBuilder, LinuxResources, LinuxResourcesBuilder}; @@ -65,7 +66,9 @@ mod tests { .build()?; let (options, mut properties) = setup(&resources); - ::apply(&options, 245, &mut properties).context("apply pids")?; + ::apply(&options, 245, &mut properties) + .map_err(|err| anyhow!(err)) + .context("apply pids")?; assert_eq!(properties.len(), 1); assert!(properties.contains_key(TASKS_MAX)); @@ -84,7 +87,9 @@ mod tests { .build()?; let (options, mut properties) = setup(&resources); - ::apply(&options, 245, &mut properties).context("apply pids")?; + ::apply(&options, 245, &mut properties) + .map_err(|err| anyhow!(err)) + .context("apply pids")?; assert_eq!(properties.len(), 1); assert!(properties.contains_key(TASKS_MAX)); @@ -103,7 +108,9 @@ mod tests { .build()?; let (options, mut properties) = setup(&resources); - ::apply(&options, 245, &mut properties).context("apply pids")?; + ::apply(&options, 245, &mut properties) + .map_err(|err| anyhow!(err)) + .context("apply pids")?; assert_eq!(properties.len(), 1); assert!(properties.contains_key(TASKS_MAX)); diff --git a/crates/libcgroups/src/systemd/unified.rs b/crates/libcgroups/src/systemd/unified.rs index 8081e79b1..79d6d71bb 100644 --- a/crates/libcgroups/src/systemd/unified.rs +++ b/crates/libcgroups/src/systemd/unified.rs @@ -1,27 +1,51 @@ -use anyhow::{bail, Context, Result}; use dbus::arg::RefArg; -use std::collections::HashMap; +use std::{collections::HashMap, num::ParseIntError}; use super::{ controller::Controller, cpu::{self, convert_shares_to_cgroup2}, - cpuset::{self, to_bitmask}, + cpuset::{self, to_bitmask, BitmaskError}, memory, pids, }; use crate::common::ControllerOpt; +#[derive(thiserror::Error, Debug)] +pub enum SystemdUnifiedError { + #[error("failed to parse cpu weight {value}: {err}")] + CpuWeight { err: ParseIntError, value: String }, + #[error("invalid format for cpu.max: {0}")] + CpuMax(String), + #[error("failed to to parse cpu quota {value}: {err}")] + CpuQuota { err: ParseIntError, value: String }, + #[error("failed to to parse cpu period {value}: {err}")] + CpuPeriod { err: ParseIntError, value: String }, + #[error("setting {0} requires systemd version greater than 243")] + OldSystemd(String), + #[error("invalid value for cpuset.cpus {0}")] + CpuSetCpu(BitmaskError), + #[error("failed to parse {name} {value}: {err}")] + Memory { + err: ParseIntError, + name: String, + value: String, + }, + #[error("failed to to parse pids.max {value}: {err}")] + PidsMax { err: ParseIntError, value: String }, +} + pub struct Unified {} impl Controller for Unified { + type Error = SystemdUnifiedError; + fn apply( options: &ControllerOpt, systemd_version: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), Self::Error> { if let Some(unified) = options.resources.unified() { - log::debug!("applying unified resource restrictions"); - Self::apply(unified, systemd_version, properties) - .context("failed to apply unified resource restrictions")?; + tracing::debug!("applying unified resource restrictions"); + Self::apply(unified, systemd_version, properties)?; } Ok(()) @@ -33,43 +57,50 @@ impl Unified { unified: &HashMap, systemd_version: u32, properties: &mut HashMap<&str, Box>, - ) -> Result<()> { + ) -> Result<(), SystemdUnifiedError> { for (key, value) in unified { match key.as_str() { "cpu.weight" => { - let shares = value - .parse::() - .with_context(|| format!("failed to parse cpu weight: {}", value))?; + let shares = + value + .parse::() + .map_err(|err| SystemdUnifiedError::CpuWeight { + err, + value: value.into(), + })?; properties.insert(cpu::CPU_WEIGHT, Box::new(convert_shares_to_cgroup2(shares))); } "cpu.max" => { let parts: Vec<&str> = value.split_whitespace().collect(); if parts.is_empty() || parts.len() > 2 { - bail!("invalid format for cpu.max: {}", value); + return Err(SystemdUnifiedError::CpuMax(value.into())); } - let quota = parts[0] - .parse::() - .with_context(|| format!("failed to parse cpu quota: {}", parts[0]))?; + let quota = + parts[0] + .parse::() + .map_err(|err| SystemdUnifiedError::CpuQuota { + err, + value: parts[0].into(), + })?; properties.insert(cpu::CPU_QUOTA, Box::new(quota)); if parts.len() == 2 { - let period = parts[1].parse::().with_context(|| { - format!("failed to to parse cpu period: {}", parts[1]) + let period = parts[1].parse::().map_err(|err| { + SystemdUnifiedError::CpuPeriod { + err, + value: parts[1].into(), + } })?; properties.insert(cpu::CPU_PERIOD, Box::new(period)); } } cpuset @ ("cpuset.cpus" | "cpuset.mems") => { if systemd_version <= 243 { - bail!( - "setting {} requires systemd version greater than 243", - cpuset - ); + return Err(SystemdUnifiedError::OldSystemd(cpuset.into())); } - let bitmask = to_bitmask(value) - .with_context(|| format!("invalid value for cpuset.cpus: {}", value))?; + let bitmask = to_bitmask(value).map_err(SystemdUnifiedError::CpuSetCpu)?; let systemd_cpuset = match cpuset { "cpuset.cpus" => cpuset::ALLOWED_CPUS, @@ -80,9 +111,14 @@ impl Unified { properties.insert(systemd_cpuset, Box::new(bitmask)); } memory @ ("memory.min" | "memory.low" | "memory.high" | "memory.max") => { - let value = value - .parse::() - .with_context(|| format!("failed to parse {}: {}", memory, value))?; + let value = + value + .parse::() + .map_err(|err| SystemdUnifiedError::Memory { + err, + name: memory.into(), + value: value.into(), + })?; let systemd_memory = match memory { "memory.min" => memory::MEMORY_MIN, "memory.low" => memory::MEMORY_LOW, @@ -93,11 +129,16 @@ impl Unified { properties.insert(systemd_memory, Box::new(value)); } "pids.max" => { - let pids = value.trim().parse::()?; + let pids = value.trim().parse::().map_err(|err| { + SystemdUnifiedError::PidsMax { + err, + value: value.into(), + } + })?; properties.insert(pids::TASKS_MAX, Box::new(pids as u64)); } - unknown => log::warn!("could not apply {}. Unknown property.", unknown), + unknown => tracing::warn!("could not apply {}. Unknown property.", unknown), } } @@ -107,6 +148,7 @@ impl Unified { #[cfg(test)] mod tests { + use anyhow::{bail, Context, Result}; use dbus::arg::ArgType; use super::*; @@ -145,10 +187,10 @@ mod tests { // assert for (setting, value) in expected { assert!(actual.contains_key(setting)); - assert_eq!(value.arg_type(), actual[setting].arg_type(), "{}", setting); + assert_eq!(value.arg_type(), actual[setting].arg_type(), "{setting}"); match value.arg_type() { ArgType::UInt64 => { - assert_eq!(value.as_u64(), actual[setting].as_u64(), "{}", setting) + assert_eq!(value.as_u64(), actual[setting].as_u64(), "{setting}") } ArgType::Array => assert_eq!( value.as_iter().unwrap().next().unwrap().as_u64(), diff --git a/crates/libcgroups/src/test.rs b/crates/libcgroups/src/test.rs index e2c54490c..84e6246be 100644 --- a/crates/libcgroups/src/test.rs +++ b/crates/libcgroups/src/test.rs @@ -2,66 +2,14 @@ use anyhow::{Context, Result}; use std::{ - fs, io::Write, - ops::Deref, path::{Path, PathBuf}, }; -pub struct TempDir { - path: Option, -} - -impl TempDir { - pub fn new>(path: P) -> Result { - let p = path.into(); - std::fs::create_dir_all(&p)?; - Ok(Self { path: Some(p) }) - } - - pub fn path(&self) -> &Path { - self.path - .as_ref() - .expect("temp dir has already been removed") - } - - pub fn remove(&mut self) { - if let Some(p) = &self.path { - let _ = fs::remove_dir_all(p); - self.path = None; - } - } -} - -impl Drop for TempDir { - fn drop(&mut self) { - self.remove(); - } -} - -impl AsRef for TempDir { - fn as_ref(&self) -> &Path { - self.path() - } -} - -impl Deref for TempDir { - type Target = Path; - - fn deref(&self) -> &Self::Target { - self.path() - } -} - -pub fn create_temp_dir(test_name: &str) -> Result { - let dir = TempDir::new(std::env::temp_dir().join(test_name))?; - Ok(dir) -} - -pub fn setup(testname: &str, cgroup_file: &str) -> (TempDir, PathBuf) { - let tmp = create_temp_dir(testname).expect("create temp directory for test"); - let cgroup_file = set_fixture(&tmp, cgroup_file, "") - .unwrap_or_else(|_| panic!("set test fixture for {}", cgroup_file)); +pub fn setup(cgroup_file: &str) -> (tempfile::TempDir, PathBuf) { + let tmp = tempfile::tempdir().expect("create temp directory for test"); + let cgroup_file = set_fixture(tmp.path(), cgroup_file, "") + .unwrap_or_else(|_| panic!("set test fixture for {cgroup_file}")); (tmp, cgroup_file) } @@ -74,9 +22,9 @@ pub fn set_fixture(temp_dir: &Path, filename: &str, val: &str) -> Result Result<()> { + type Error = Infallible; + + fn add_task(&self, pid: Pid) -> Result<(), Infallible> { self.add_task_args.borrow_mut().push(pid); Ok(()) } // NOTE: The argument cannot be stored due to lifetime. - fn apply(&self, _controller_opt: &ControllerOpt) -> Result<()> { + fn apply(&self, _controller_opt: &ControllerOpt) -> Result<(), Infallible> { *self.apply_called.borrow_mut() = true; Ok(()) } - fn remove(&self) -> Result<()> { + fn remove(&self) -> Result<(), Infallible> { unimplemented!() } - fn freeze(&self, _state: FreezerState) -> Result<()> { + fn freeze(&self, _state: FreezerState) -> Result<(), Infallible> { unimplemented!() } - fn stats(&self) -> anyhow::Result { + fn stats(&self) -> Result { unimplemented!() } - fn get_all_pids(&self) -> Result> { + fn get_all_pids(&self) -> Result, Infallible> { unimplemented!() } } diff --git a/crates/libcgroups/src/v1/blkio.rs b/crates/libcgroups/src/v1/blkio.rs index bb02bbf28..a66102a40 100644 --- a/crates/libcgroups/src/v1/blkio.rs +++ b/crates/libcgroups/src/v1/blkio.rs @@ -1,14 +1,17 @@ -use std::path::Path; +use std::{ + num::ParseIntError, + path::{Path, PathBuf}, +}; use crate::{ - common::{self, ControllerOpt}, - stats::{self, BlkioDeviceStat, BlkioStats, StatsProvider}, - v1::Controller, + common::{self, ControllerOpt, WrappedIoError}, + stats::{self, BlkioDeviceStat, BlkioStats, ParseDeviceNumberError, StatsProvider}, }; -use anyhow::{Context, Result}; use oci_spec::runtime::LinuxBlockIo; +use super::controller::Controller; + // Throttling/upper limit policy // --------------------------------------- // Upper limit on the number of read operations a device can perform specified in bytes @@ -76,10 +79,11 @@ const BLKIO_MERGED: &str = "blkio.io_merged_recursive"; pub struct Blkio {} impl Controller for Blkio { + type Error = WrappedIoError; type Resource = LinuxBlockIo; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply blkio cgroup config"); + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply blkio cgroup config"); if let Some(blkio) = Self::needs_to_handle(controller_opt) { Self::apply(cgroup_root, blkio)?; @@ -93,10 +97,25 @@ impl Controller for Blkio { } } +#[derive(thiserror::Error, Debug)] +pub enum V1BlkioStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("failed to parse device value {value} in {path}: {err}")] + FailedParseValue { + value: String, + path: PathBuf, + err: ParseIntError, + }, + #[error("failed to parse device number: {0}")] + FailedParseNumber(#[from] ParseDeviceNumberError), +} + impl StatsProvider for Blkio { + type Error = V1BlkioStatsError; type Stats = BlkioStats; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { if cgroup_path.join(BLKIO_WEIGHT).exists() { return Self::get_weight_division_policy_stats(cgroup_path); } @@ -106,7 +125,7 @@ impl StatsProvider for Blkio { } impl Blkio { - fn apply(root_path: &Path, blkio: &LinuxBlockIo) -> Result<()> { + fn apply(root_path: &Path, blkio: &LinuxBlockIo) -> Result<(), WrappedIoError> { if let Some(blkio_weight) = blkio.weight() { // be aligned with what runc does // See also: https://github.com/opencontainers/runc/blob/81044ad7c902f3fc153cb8ffadaf4da62855193f/libcontainer/cgroups/fs/blkio.go#L28-L33 @@ -159,7 +178,7 @@ impl Blkio { Ok(()) } - fn get_throttling_policy_stats(cgroup_path: &Path) -> Result { + fn get_throttling_policy_stats(cgroup_path: &Path) -> Result { let stats = BlkioStats { service_bytes: Self::parse_blkio_file( &cgroup_path.join(BLKIO_THROTTLE_IO_SERVICE_BYTES), @@ -171,7 +190,9 @@ impl Blkio { Ok(stats) } - fn get_weight_division_policy_stats(cgroup_path: &Path) -> Result { + fn get_weight_division_policy_stats( + cgroup_path: &Path, + ) -> Result { let stats = BlkioStats { time: Self::parse_blkio_file(&cgroup_path.join(BLKIO_TIME))?, sectors: Self::parse_blkio_file(&cgroup_path.join(BLKIO_SECTORS))?, @@ -187,7 +208,7 @@ impl Blkio { Ok(stats) } - fn parse_blkio_file(blkio_file: &Path) -> Result> { + fn parse_blkio_file(blkio_file: &Path) -> Result, V1BlkioStatsError> { let content = common::read_cgroup_file(blkio_file)?; let mut stats = Vec::new(); for entry in content.lines() { @@ -203,21 +224,21 @@ impl Blkio { None }; let value = if entry_fields.len() == 3 { - entry_fields[2].parse().with_context(|| { - format!( - "failed to parse device value {} in {}", - entry_fields[2], - blkio_file.display() - ) - })? + entry_fields[2] + .parse() + .map_err(|err| V1BlkioStatsError::FailedParseValue { + value: entry_fields[2].into(), + path: blkio_file.to_path_buf(), + err, + })? } else { - entry_fields[1].parse().with_context(|| { - format!( - "failed to parse device value {} in {}", - entry_fields[1], - blkio_file.display() - ) - })? + entry_fields[1] + .parse() + .map_err(|err| V1BlkioStatsError::FailedParseValue { + value: entry_fields[1].into(), + path: blkio_file.to_path_buf(), + err, + })? }; let stat = BlkioDeviceStat { @@ -239,21 +260,20 @@ mod tests { use std::fs; use super::*; - use crate::test::{create_temp_dir, set_fixture, setup}; + use crate::test::{set_fixture, setup}; - use anyhow::Result; use oci_spec::runtime::{LinuxBlockIoBuilder, LinuxThrottleDeviceBuilder}; #[test] fn test_set_blkio_weight() { for cgroup_file in &[BLKIO_WEIGHT, BLKIO_BFQ_WEIGHT] { - let (tmp, weight_file) = setup("test_set_blkio_weight", cgroup_file); + let (tmp, weight_file) = setup(cgroup_file); let blkio = LinuxBlockIoBuilder::default() .weight(200_u16) .build() .unwrap(); - Blkio::apply(&tmp, &blkio).expect("apply blkio"); + Blkio::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(weight_file).expect("read blkio weight"); assert_eq!("200", content); } @@ -261,7 +281,7 @@ mod tests { #[test] fn test_set_blkio_read_bps() { - let (tmp, throttle) = setup("test_set_blkio_read_bps", BLKIO_THROTTLE_READ_BPS); + let (tmp, throttle) = setup(BLKIO_THROTTLE_READ_BPS); let blkio = LinuxBlockIoBuilder::default() .throttle_read_bps_device(vec![LinuxThrottleDeviceBuilder::default() @@ -273,16 +293,16 @@ mod tests { .build() .unwrap(); - Blkio::apply(&tmp, &blkio).expect("apply blkio"); + Blkio::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle) - .unwrap_or_else(|_| panic!("read {} content", BLKIO_THROTTLE_READ_BPS)); + .unwrap_or_else(|_| panic!("read {BLKIO_THROTTLE_READ_BPS} content")); assert_eq!("8:0 102400", content); } #[test] fn test_set_blkio_write_bps() { - let (tmp, throttle) = setup("test_set_blkio_write_bps", BLKIO_THROTTLE_WRITE_BPS); + let (tmp, throttle) = setup(BLKIO_THROTTLE_WRITE_BPS); let blkio = LinuxBlockIoBuilder::default() .throttle_write_bps_device(vec![LinuxThrottleDeviceBuilder::default() @@ -294,16 +314,16 @@ mod tests { .build() .unwrap(); - Blkio::apply(&tmp, &blkio).expect("apply blkio"); + Blkio::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle) - .unwrap_or_else(|_| panic!("read {} content", BLKIO_THROTTLE_WRITE_BPS)); + .unwrap_or_else(|_| panic!("read {BLKIO_THROTTLE_WRITE_BPS} content")); assert_eq!("8:0 102400", content); } #[test] fn test_set_blkio_read_iops() { - let (tmp, throttle) = setup("test_set_blkio_read_iops", BLKIO_THROTTLE_READ_IOPS); + let (tmp, throttle) = setup(BLKIO_THROTTLE_READ_IOPS); let blkio = LinuxBlockIoBuilder::default() .throttle_read_iops_device(vec![LinuxThrottleDeviceBuilder::default() @@ -315,16 +335,16 @@ mod tests { .build() .unwrap(); - Blkio::apply(&tmp, &blkio).expect("apply blkio"); + Blkio::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle) - .unwrap_or_else(|_| panic!("read {} content", BLKIO_THROTTLE_READ_IOPS)); + .unwrap_or_else(|_| panic!("read {BLKIO_THROTTLE_READ_IOPS} content")); assert_eq!("8:0 102400", content); } #[test] fn test_set_blkio_write_iops() { - let (tmp, throttle) = setup("test_set_blkio_write_iops", BLKIO_THROTTLE_WRITE_IOPS); + let (tmp, throttle) = setup(BLKIO_THROTTLE_WRITE_IOPS); let blkio = LinuxBlockIoBuilder::default() .throttle_write_iops_device(vec![LinuxThrottleDeviceBuilder::default() @@ -336,16 +356,16 @@ mod tests { .build() .unwrap(); - Blkio::apply(&tmp, &blkio).expect("apply blkio"); + Blkio::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle) - .unwrap_or_else(|_| panic!("read {} content", BLKIO_THROTTLE_WRITE_IOPS)); + .unwrap_or_else(|_| panic!("read {BLKIO_THROTTLE_WRITE_IOPS} content")); assert_eq!("8:0 102400", content); } #[test] - fn test_stat_throttling_policy() -> Result<()> { - let tmp = create_temp_dir("test_stat_throttling_policy").expect("create test directory"); + fn test_stat_throttling_policy() -> Result<(), Box> { + let tmp = tempfile::tempdir().unwrap(); let content = &[ "8:0 Read 20", "8:0 Write 20", @@ -356,10 +376,10 @@ mod tests { "Total 0", ] .join("\n"); - set_fixture(&tmp, BLKIO_THROTTLE_IO_SERVICE_BYTES, content).unwrap(); - set_fixture(&tmp, BLKIO_THROTTLE_IO_SERVICED, content).unwrap(); + set_fixture(tmp.path(), BLKIO_THROTTLE_IO_SERVICE_BYTES, content).unwrap(); + set_fixture(tmp.path(), BLKIO_THROTTLE_IO_SERVICED, content).unwrap(); - let actual = Blkio::stats(&tmp).expect("get cgroup stats"); + let actual = Blkio::stats(tmp.path()).expect("get cgroup stats"); let mut expected = BlkioStats::default(); let devices: Vec = ["Read", "Write", "Sync", "Async", "Discard", "Total"] .iter() diff --git a/crates/libcgroups/src/v1/controller.rs b/crates/libcgroups/src/v1/controller.rs index 3966fb86b..86cd5a17c 100644 --- a/crates/libcgroups/src/v1/controller.rs +++ b/crates/libcgroups/src/v1/controller.rs @@ -1,22 +1,22 @@ use std::{fs, path::Path}; -use anyhow::Result; use nix::unistd::Pid; -use crate::common::{self, ControllerOpt, CGROUP_PROCS}; +use crate::common::{self, ControllerOpt, WrapIoResult, WrappedIoError, CGROUP_PROCS}; -pub trait Controller { +pub(super) trait Controller { + type Error: From; type Resource; /// Adds a new task specified by its pid to the cgroup - fn add_task(pid: Pid, cgroup_path: &Path) -> Result<()> { - fs::create_dir_all(cgroup_path)?; + fn add_task(pid: Pid, cgroup_path: &Path) -> Result<(), Self::Error> { + fs::create_dir_all(cgroup_path).wrap_create_dir(cgroup_path)?; common::write_cgroup_file(cgroup_path.join(CGROUP_PROCS), pid)?; Ok(()) } /// Applies resource restrictions to the cgroup - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()>; + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error>; /// Checks if the controller needs to handle this request fn needs_to_handle<'a>(controller_opt: &'a ControllerOpt) -> Option<&'a Self::Resource>; diff --git a/crates/libcgroups/src/v1/controller_type.rs b/crates/libcgroups/src/v1/controller_type.rs index c073811a8..dcc3f66e8 100644 --- a/crates/libcgroups/src/v1/controller_type.rs +++ b/crates/libcgroups/src/v1/controller_type.rs @@ -1,6 +1,6 @@ use std::fmt::Display; -#[derive(Hash, PartialEq, Eq, Debug, Clone)] +#[derive(Hash, PartialEq, Eq, Debug, Clone, Copy)] pub enum ControllerType { Cpu, CpuAcct, @@ -33,7 +33,7 @@ impl Display for ControllerType { Self::Freezer => "freezer", }; - write!(f, "{}", print) + write!(f, "{print}") } } diff --git a/crates/libcgroups/src/v1/cpu.rs b/crates/libcgroups/src/v1/cpu.rs index ad44ca696..73c9074f3 100644 --- a/crates/libcgroups/src/v1/cpu.rs +++ b/crates/libcgroups/src/v1/cpu.rs @@ -1,14 +1,13 @@ -use std::path::Path; +use std::path::{Path, PathBuf}; -use anyhow::{bail, Context, Result}; use oci_spec::runtime::LinuxCpu; use crate::{ - common::{self, ControllerOpt}, - stats::{CpuThrottling, StatsProvider}, + common::{self, ControllerOpt, WrappedIoError}, + stats::{parse_flat_keyed_data, CpuThrottling, ParseFlatKeyedDataError, StatsProvider}, }; -use super::Controller; +use super::controller::Controller; const CGROUP_CPU_SHARES: &str = "cpu.shares"; const CGROUP_CPU_QUOTA: &str = "cpu.cfs_quota_us"; @@ -22,13 +21,14 @@ const CGROUP_CPU_IDLE: &str = "cpu.idle"; pub struct Cpu {} impl Controller for Cpu { + type Error = WrappedIoError; type Resource = LinuxCpu; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply Cpu cgroup config"); + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply Cpu cgroup config"); if let Some(cpu) = Self::needs_to_handle(controller_opt) { - Self::apply(cgroup_root, cpu).context("failed to apply cpu resource restrictions")?; + Self::apply(cgroup_root, cpu)?; } Ok(()) @@ -51,53 +51,46 @@ impl Controller for Cpu { } } +#[derive(thiserror::Error, Debug)] +pub enum V1CpuStatsError { + #[error("error parsing data: {0}")] + ParseData(#[from] ParseFlatKeyedDataError), + #[error("missing field {field} from {path}")] + MissingField { field: &'static str, path: PathBuf }, +} + impl StatsProvider for Cpu { + type Error = V1CpuStatsError; type Stats = CpuThrottling; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let mut stats = CpuThrottling::default(); let stat_path = cgroup_path.join(CGROUP_CPU_STAT); - let stat_content = common::read_cgroup_file(&stat_path)?; - - let parts: Vec<&str> = stat_content.split_ascii_whitespace().collect(); - if parts.len() < 6 { - bail!( - "{} contains less than the expected number of entries", - stat_path.display() - ); - } - - if parts[0] != "nr_periods" { - bail!( - "{} does not contain the number of elapsed periods", - stat_path.display() - ); - } - - if parts[2] != "nr_throttled" { - bail!( - "{} does not contain the number of throttled periods", - stat_path.display() - ); - } - if parts[4] != "throttled_time" { - bail!( - "{} does not contain the total time tasks have spent throttled", - stat_path.display() - ); + let stat_table = parse_flat_keyed_data(&stat_path)?; + + macro_rules! get { + ($name: expr => $field: ident) => { + stats.$field = + *stat_table + .get($name) + .ok_or_else(|| V1CpuStatsError::MissingField { + field: $name, + path: stat_path.clone(), + })?; + }; } - stats.periods = parts[1].parse().context("failed to parse nr_periods")?; - stats.throttled_periods = parts[3].parse().context("failed to parse nr_throttled")?; - stats.throttled_time = parts[5].parse().context("failed to parse throttled time")?; + get!("nr_periods" => periods); + get!("nr_throttled" => throttled_periods); + get!("throttled_time" => throttled_time); Ok(stats) } } impl Cpu { - fn apply(root_path: &Path, cpu: &LinuxCpu) -> Result<()> { + fn apply(root_path: &Path, cpu: &LinuxCpu) -> Result<(), WrappedIoError> { if let Some(cpu_shares) = cpu.shares() { if cpu_shares != 0 { common::write_cgroup_file(root_path.join(CGROUP_CPU_SHARES), cpu_shares)?; @@ -143,24 +136,24 @@ impl Cpu { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture, setup}; + use crate::test::{set_fixture, setup}; use oci_spec::runtime::LinuxCpuBuilder; use std::fs; #[test] fn test_set_shares() { // arrange - let (tmp, shares) = setup("test_set_shares", CGROUP_CPU_SHARES); - let _ = set_fixture(&tmp, CGROUP_CPU_SHARES, "") - .unwrap_or_else(|_| panic!("set test fixture for {}", CGROUP_CPU_SHARES)); + let (tmp, shares) = setup(CGROUP_CPU_SHARES); + let _ = set_fixture(tmp.path(), CGROUP_CPU_SHARES, "") + .unwrap_or_else(|_| panic!("set test fixture for {CGROUP_CPU_SHARES}")); let cpu = LinuxCpuBuilder::default().shares(2048u64).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(shares) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_SHARES)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_SHARES} file content")); assert_eq!(content, 2048.to_string()); } @@ -168,15 +161,15 @@ mod tests { fn test_set_quota() { // arrange const QUOTA: i64 = 200000; - let (tmp, max) = setup("test_set_quota", CGROUP_CPU_QUOTA); + let (tmp, max) = setup(CGROUP_CPU_QUOTA); let cpu = LinuxCpuBuilder::default().quota(QUOTA).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_QUOTA)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_QUOTA} file content")); assert_eq!(content, QUOTA.to_string()); } @@ -184,15 +177,15 @@ mod tests { fn test_set_period() { // arrange const PERIOD: u64 = 100000; - let (tmp, max) = setup("test_set_period", CGROUP_CPU_PERIOD); + let (tmp, max) = setup(CGROUP_CPU_PERIOD); let cpu = LinuxCpuBuilder::default().period(PERIOD).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_PERIOD)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_PERIOD} file content")); assert_eq!(content, PERIOD.to_string()); } @@ -200,18 +193,18 @@ mod tests { fn test_set_rt_runtime() { // arrange const RUNTIME: i64 = 100000; - let (tmp, max) = setup("test_set_rt_runtime", CGROUP_CPU_RT_RUNTIME); + let (tmp, max) = setup(CGROUP_CPU_RT_RUNTIME); let cpu = LinuxCpuBuilder::default() .realtime_runtime(RUNTIME) .build() .unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_RT_RUNTIME)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_RT_RUNTIME} file content")); assert_eq!(content, RUNTIME.to_string()); } @@ -230,15 +223,15 @@ mod tests { return; } - let (tmp, max) = setup("test_set_cpu_idle", CGROUP_CPU_IDLE); + let (tmp, max) = setup(CGROUP_CPU_IDLE); let cpu = LinuxCpuBuilder::default().idle(IDLE).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_IDLE)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_IDLE} file content")); assert_eq!(content, IDLE.to_string()); } @@ -246,33 +239,33 @@ mod tests { fn test_set_rt_period() { // arrange const PERIOD: u64 = 100000; - let (tmp, max) = setup("test_set_rt_period", CGROUP_CPU_RT_PERIOD); + let (tmp, max) = setup(CGROUP_CPU_RT_PERIOD); let cpu = LinuxCpuBuilder::default() .realtime_period(PERIOD) .build() .unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_RT_PERIOD)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_RT_PERIOD} file content")); assert_eq!(content, PERIOD.to_string()); } #[test] fn test_stat_cpu_throttling() { - let tmp = create_temp_dir("test_stat_cpu_throttling").expect("create test directory"); + let tmp = tempfile::tempdir().unwrap(); let stat_content = &[ "nr_periods 165000", "nr_throttled 27", "throttled_time 1080", ] .join("\n"); - set_fixture(&tmp, CGROUP_CPU_STAT, stat_content).expect("create stat file"); + set_fixture(tmp.path(), CGROUP_CPU_STAT, stat_content).expect("create stat file"); - let actual = Cpu::stats(&tmp).expect("get cgroup stats"); + let actual = Cpu::stats(tmp.path()).expect("get cgroup stats"); let expected = CpuThrottling { periods: 165000, throttled_periods: 27, @@ -285,14 +278,14 @@ mod tests { fn test_set_burst() { // arrange let expected_burst: u64 = 100_000; - let (tmp, max) = setup("test_set_burst", CGROUP_CPU_BURST); + let (tmp, max) = setup(CGROUP_CPU_BURST); let cpu = LinuxCpuBuilder::default() .burst(expected_burst) .build() .unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let actual_burst = fs::read_to_string(max).expect("read burst"); diff --git a/crates/libcgroups/src/v1/cpuacct.rs b/crates/libcgroups/src/v1/cpuacct.rs index 3dadf12ac..20ddb2f90 100644 --- a/crates/libcgroups/src/v1/cpuacct.rs +++ b/crates/libcgroups/src/v1/cpuacct.rs @@ -1,13 +1,14 @@ -use std::path::Path; - -use anyhow::{bail, Context, Result}; +use std::{ + num::ParseIntError, + path::{Path, PathBuf}, +}; use crate::{ - common::{self, ControllerOpt}, - stats::{CpuUsage, StatsProvider}, + common::{self, ControllerOpt, WrappedIoError}, + stats::{parse_flat_keyed_data, CpuUsage, ParseFlatKeyedDataError, StatsProvider}, }; -use super::Controller; +use super::controller::Controller; // Contains user mode and kernel mode cpu consumption const CGROUP_CPUACCT_STAT: &str = "cpuacct.stat"; @@ -21,9 +22,10 @@ const CGROUP_CPUACCT_PERCPU: &str = "cpuacct.usage_percpu"; pub struct CpuAcct {} impl Controller for CpuAcct { + type Error = WrappedIoError; type Resource = (); - fn apply(_controller_opt: &ControllerOpt, _cgroup_path: &Path) -> Result<()> { + fn apply(_controller_opt: &ControllerOpt, _cgroup_path: &Path) -> Result<(), Self::Error> { Ok(()) } @@ -32,10 +34,31 @@ impl Controller for CpuAcct { } } +#[derive(thiserror::Error, Debug)] +pub enum V1CpuAcctStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("error parsing data: {0}")] + ParseData(#[from] ParseFlatKeyedDataError), + #[error("missing field {field} from {path}")] + MissingField { field: &'static str, path: PathBuf }, + #[error("failed to parse total cpu usage: {0}")] + ParseTotalCpu(ParseIntError), + #[error("failed to parse per core {mode} mode cpu usage in {path}: {err}")] + FailedToParseField { + mode: &'static str, + path: PathBuf, + err: ParseIntError, + }, + #[error("failed to parse per core cpu usage: {0}")] + ParsePerCore(ParseIntError), +} + impl StatsProvider for CpuAcct { + type Error = V1CpuAcctStatsError; type Stats = CpuUsage; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let mut stats = CpuUsage::default(); Self::get_total_cpu_usage(cgroup_path, &mut stats)?; Self::get_per_core_usage(cgroup_path, &mut stats)?; @@ -45,54 +68,43 @@ impl StatsProvider for CpuAcct { } impl CpuAcct { - fn get_total_cpu_usage(cgroup_path: &Path, stats: &mut CpuUsage) -> Result<()> { + fn get_total_cpu_usage( + cgroup_path: &Path, + stats: &mut CpuUsage, + ) -> Result<(), V1CpuAcctStatsError> { let stat_file_path = cgroup_path.join(CGROUP_CPUACCT_STAT); - let stat_file_content = common::read_cgroup_file(&stat_file_path)?; - - // the first two entries of the file should look like this - // user 746908 - // system 213896 - let parts: Vec<&str> = stat_file_content.split_whitespace().collect(); - - if parts.len() < 4 { - bail!( - "{} contains less than the expected number of entries", - stat_file_path.display() - ); + let stat_table = parse_flat_keyed_data(&stat_file_path)?; + + macro_rules! get { + ($name: expr => $field: ident) => { + stats.$field = + *stat_table + .get($name) + .ok_or_else(|| V1CpuAcctStatsError::MissingField { + field: $name, + path: stat_file_path.clone(), + })?; + }; } - if !parts[0].eq("user") { - bail!( - "{} does not contain user mode cpu usage", - stat_file_path.display() - ); - } - - if !parts[2].eq("system") { - bail!( - "{} does not contain kernel mode cpu usage", - stat_file_path.display() - ); - } - - stats.usage_user = parts[1] - .parse() - .context("failed to parse user mode cpu usage")?; - stats.usage_kernel = parts[3] - .parse() - .context("failed to parse kernel mode cpu usage")?; + get!("user" => usage_user); + get!("system" => usage_kernel); let total = common::read_cgroup_file(cgroup_path.join(CGROUP_CPUACCT_USAGE))?; stats.usage_total = total .trim() .parse() - .context("failed to parse total cpu usage")?; + .map_err(V1CpuAcctStatsError::ParseTotalCpu)?; Ok(()) } - fn get_per_core_usage(cgroup_path: &Path, stats: &mut CpuUsage) -> Result<()> { - let all_content = common::read_cgroup_file(cgroup_path.join(CGROUP_CPUACCT_USAGE_ALL))?; + fn get_per_core_usage( + cgroup_path: &Path, + stats: &mut CpuUsage, + ) -> Result<(), V1CpuAcctStatsError> { + let path = cgroup_path.join(CGROUP_CPUACCT_USAGE_ALL); + let all_content = common::read_cgroup_file(&path)?; // first line is header, skip it for entry in all_content.lines().skip(1) { let entry_parts: Vec<&str> = entry.split_ascii_whitespace().collect(); @@ -100,16 +112,24 @@ impl CpuAcct { continue; } - stats.per_core_usage_user.push( - entry_parts[1] - .parse() - .context("failed to parse per core user mode cpu usage")?, - ); - stats.per_core_usage_kernel.push( - entry_parts[2] - .parse() - .context("failed to parse per core kernel mode cpu usage")?, - ); + stats + .per_core_usage_user + .push(entry_parts[1].parse().map_err(|err| { + V1CpuAcctStatsError::FailedToParseField { + mode: "user", + path: path.clone(), + err, + } + })?); + stats + .per_core_usage_kernel + .push(entry_parts[2].parse().map_err(|err| { + V1CpuAcctStatsError::FailedToParseField { + mode: "kernel", + path: path.clone(), + err, + } + })?); } let percpu_content = common::read_cgroup_file(cgroup_path.join(CGROUP_CPUACCT_PERCPU))?; @@ -117,7 +137,7 @@ impl CpuAcct { .split_ascii_whitespace() .map(|v| v.parse()) .collect::, _>>() - .context("failed to parse per core cpu usage")?; + .map_err(V1CpuAcctStatsError::ParsePerCore)?; Ok(()) } @@ -128,45 +148,45 @@ mod tests { use std::fs; use nix::unistd::Pid; + use tempfile::TempDir; use super::*; use crate::{ common::CGROUP_PROCS, - test::{create_temp_dir, TempDir}, test::{set_fixture, setup}, }; - fn setup_total_cpu(test_name: &str, stat_content: &str, usage_content: &str) -> TempDir { - let tmp = create_temp_dir(test_name).expect("create temp directory for test"); + fn setup_total_cpu(stat_content: &str, usage_content: &str) -> TempDir { + let tmp = tempfile::tempdir().unwrap(); - let _ = set_fixture(&tmp, CGROUP_CPUACCT_STAT, stat_content) - .unwrap_or_else(|_| panic!("create {} file", CGROUP_CPUACCT_STAT)); - let _ = set_fixture(&tmp, CGROUP_CPUACCT_USAGE, usage_content) - .unwrap_or_else(|_| panic!("create {} file", CGROUP_CPUACCT_USAGE)); + let _ = set_fixture(tmp.path(), CGROUP_CPUACCT_STAT, stat_content) + .unwrap_or_else(|_| panic!("create {CGROUP_CPUACCT_STAT} file")); + let _ = set_fixture(tmp.path(), CGROUP_CPUACCT_USAGE, usage_content) + .unwrap_or_else(|_| panic!("create {CGROUP_CPUACCT_USAGE} file")); tmp } - fn setup_per_core(test_name: &str, percpu_content: &str, usage_all_content: &str) -> TempDir { - let tmp = create_temp_dir(test_name).expect("create temp directory for test"); + fn setup_per_core(percpu_content: &str, usage_all_content: &str) -> TempDir { + let tmp = tempfile::tempdir().unwrap(); - let _ = set_fixture(&tmp, CGROUP_CPUACCT_PERCPU, percpu_content) - .unwrap_or_else(|_| panic!("create {} file", CGROUP_CPUACCT_PERCPU)); - let _ = set_fixture(&tmp, CGROUP_CPUACCT_USAGE_ALL, usage_all_content) - .unwrap_or_else(|_| panic!("create {} file", CGROUP_CPUACCT_USAGE_ALL)); + let _ = set_fixture(tmp.path(), CGROUP_CPUACCT_PERCPU, percpu_content) + .unwrap_or_else(|_| panic!("create {CGROUP_CPUACCT_PERCPU} file")); + let _ = set_fixture(tmp.path(), CGROUP_CPUACCT_USAGE_ALL, usage_all_content) + .unwrap_or_else(|_| panic!("create {CGROUP_CPUACCT_USAGE_ALL} file")); tmp } #[test] fn test_add_task() { - let (tmp, procs) = setup("test_cpuacct_apply", CGROUP_PROCS); + let (tmp, procs) = setup(CGROUP_PROCS); let pid = Pid::from_raw(1000); - CpuAcct::add_task(pid, &tmp).expect("apply cpuacct"); + CpuAcct::add_task(pid, tmp.path()).expect("apply cpuacct"); let content = fs::read_to_string(procs) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_PROCS)); + .unwrap_or_else(|_| panic!("read {CGROUP_PROCS} file content")); assert_eq!(content, "1000"); } @@ -174,7 +194,7 @@ mod tests { fn test_stat_total_cpu_usage() { let stat_content = &["user 1300888", "system 364592"].join("\n"); let usage_content = "18198092369681"; - let tmp = setup_total_cpu("test_get_total_cpu", stat_content, usage_content); + let tmp = setup_total_cpu(stat_content, usage_content); let mut stats = CpuUsage::default(); CpuAcct::get_total_cpu_usage(tmp.path(), &mut stats).expect("get cgroup stats"); @@ -195,11 +215,7 @@ mod tests { "3 4021385867300 304269989810", ] .join("\n"); - let tmp = setup_per_core( - "test_get_per_core_cpu_usage", - percpu_content, - usage_all_content, - ); + let tmp = setup_per_core(percpu_content, usage_all_content); let mut stats = CpuUsage::default(); CpuAcct::get_per_core_usage(tmp.path(), &mut stats).expect("get cgroup stats"); diff --git a/crates/libcgroups/src/v1/cpuset.rs b/crates/libcgroups/src/v1/cpuset.rs index ec9cec54c..a190ceaae 100644 --- a/crates/libcgroups/src/v1/cpuset.rs +++ b/crates/libcgroups/src/v1/cpuset.rs @@ -1,24 +1,46 @@ -use std::{fs, path::Path}; +use std::{ + fs, + path::{Path, PathBuf, StripPrefixError}, +}; -use anyhow::{bail, Context, Result}; use nix::unistd; use oci_spec::runtime::LinuxCpu; use unistd::Pid; -use crate::common::{self, ControllerOpt, CGROUP_PROCS}; +use crate::common::{self, ControllerOpt, WrapIoResult, WrappedIoError, CGROUP_PROCS}; -use super::{util, Controller, ControllerType}; +use super::{ + controller::Controller, + util::{self, V1MountPointError}, + ControllerType, +}; const CGROUP_CPUSET_CPUS: &str = "cpuset.cpus"; const CGROUP_CPUSET_MEMS: &str = "cpuset.mems"; +#[derive(thiserror::Error, Debug)] +pub enum V1CpuSetControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("bad cgroup path {path}: {err}")] + BadCgroupPath { + err: StripPrefixError, + path: PathBuf, + }, + #[error("cpuset parent value is empty")] + EmptyParent, + #[error("mount point error: {0}")] + MountPoint(#[from] V1MountPointError), +} + pub struct CpuSet {} impl Controller for CpuSet { + type Error = V1CpuSetControllerError; type Resource = LinuxCpu; - fn add_task(pid: Pid, cgroup_path: &Path) -> Result<()> { - fs::create_dir_all(cgroup_path)?; + fn add_task(pid: Pid, cgroup_path: &Path) -> Result<(), Self::Error> { + fs::create_dir_all(cgroup_path).wrap_create_dir(cgroup_path)?; Self::ensure_not_empty(cgroup_path, CGROUP_CPUSET_CPUS)?; Self::ensure_not_empty(cgroup_path, CGROUP_CPUSET_MEMS)?; @@ -27,12 +49,11 @@ impl Controller for CpuSet { Ok(()) } - fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<()> { - log::debug!("Apply CpuSet cgroup config"); + fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply CpuSet cgroup config"); if let Some(cpuset) = Self::needs_to_handle(controller_opt) { - Self::apply(cgroup_path, cpuset) - .context("failed to apply cpuset resource restrictions")?; + Self::apply(cgroup_path, cpuset)?; } Ok(()) @@ -50,7 +71,7 @@ impl Controller for CpuSet { } impl CpuSet { - fn apply(cgroup_path: &Path, cpuset: &LinuxCpu) -> Result<()> { + fn apply(cgroup_path: &Path, cpuset: &LinuxCpu) -> Result<(), V1CpuSetControllerError> { if let Some(cpus) = &cpuset.cpus() { common::write_cgroup_file_str(cgroup_path.join(CGROUP_CPUSET_CPUS), cpus)?; } @@ -64,19 +85,28 @@ impl CpuSet { // if a task is moved into the cgroup and a value has not been set for cpus and mems // Errno 28 (no space left on device) will be returned. Therefore we set the value from the parent if required. - fn ensure_not_empty(cgroup_path: &Path, interface_file: &str) -> Result<()> { + fn ensure_not_empty( + cgroup_path: &Path, + interface_file: &str, + ) -> Result<(), V1CpuSetControllerError> { let mut current = util::get_subsystem_mount_point(&ControllerType::CpuSet)?; - let relative_cgroup_path = cgroup_path.strip_prefix(¤t)?; + let relative_cgroup_path = cgroup_path.strip_prefix(¤t).map_err(|err| { + V1CpuSetControllerError::BadCgroupPath { + err, + path: cgroup_path.to_path_buf(), + } + })?; for component in relative_cgroup_path.components() { - let parent_value = fs::read_to_string(current.join(interface_file))?; + let parent_value = + fs::read_to_string(current.join(interface_file)).wrap_read(cgroup_path)?; if parent_value.trim().is_empty() { - bail!("cpuset parent value is empty") + return Err(V1CpuSetControllerError::EmptyParent); } current.push(component); let child_path = current.join(interface_file); - let child_value = fs::read_to_string(&child_path)?; + let child_value = fs::read_to_string(&child_path).wrap_read(&child_path)?; // the file can contain a newline character. Need to trim it away, // otherwise it is not considered empty and value will not be written if child_value.trim().is_empty() { @@ -99,36 +129,36 @@ mod tests { #[test] fn test_set_cpus() { // arrange - let (tmp, cpus) = setup("test_set_cpus", CGROUP_CPUSET_CPUS); + let (tmp, cpus) = setup(CGROUP_CPUSET_CPUS); let cpuset = LinuxCpuBuilder::default() .cpus("1-3".to_owned()) .build() .unwrap(); // act - CpuSet::apply(&tmp, &cpuset).expect("apply cpuset"); + CpuSet::apply(tmp.path(), &cpuset).expect("apply cpuset"); // assert let content = fs::read_to_string(cpus) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPUSET_CPUS)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPUSET_CPUS} file content")); assert_eq!(content, "1-3"); } #[test] fn test_set_mems() { // arrange - let (tmp, mems) = setup("test_set_mems", CGROUP_CPUSET_MEMS); + let (tmp, mems) = setup(CGROUP_CPUSET_MEMS); let cpuset = LinuxCpuBuilder::default() .mems("1-3".to_owned()) .build() .unwrap(); // act - CpuSet::apply(&tmp, &cpuset).expect("apply cpuset"); + CpuSet::apply(tmp.path(), &cpuset).expect("apply cpuset"); // assert let content = fs::read_to_string(mems) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPUSET_MEMS)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPUSET_MEMS} file content")); assert_eq!(content, "1-3"); } } diff --git a/crates/libcgroups/src/v1/devices.rs b/crates/libcgroups/src/v1/devices.rs index 2d58992b8..fc6f392e5 100644 --- a/crates/libcgroups/src/v1/devices.rs +++ b/crates/libcgroups/src/v1/devices.rs @@ -1,18 +1,17 @@ use std::path::Path; -use anyhow::Result; - use super::controller::Controller; -use crate::common::{self, default_allow_devices, default_devices, ControllerOpt}; +use crate::common::{self, default_allow_devices, default_devices, ControllerOpt, WrappedIoError}; use oci_spec::runtime::LinuxDeviceCgroup; pub struct Devices {} impl Controller for Devices { + type Error = WrappedIoError; type Resource = (); - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply Devices cgroup config"); + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply Devices cgroup config"); if let Some(devices) = controller_opt.resources.devices().as_ref() { for d in devices { @@ -39,7 +38,7 @@ impl Controller for Devices { } impl Devices { - fn apply_device(device: &LinuxDeviceCgroup, cgroup_root: &Path) -> Result<()> { + fn apply_device(device: &LinuxDeviceCgroup, cgroup_root: &Path) -> Result<(), WrappedIoError> { let path = if device.allow() { cgroup_root.join("devices.allow") } else { @@ -54,33 +53,31 @@ impl Devices { #[cfg(test)] mod tests { use super::*; - use crate::test::create_temp_dir; use crate::test::set_fixture; use oci_spec::runtime::{LinuxDeviceCgroupBuilder, LinuxDeviceType}; use std::fs::read_to_string; #[test] fn test_set_default_devices() { - let tmp = - create_temp_dir("test_set_default_devices").expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); default_allow_devices().iter().for_each(|d| { // NOTE: We reset the fixtures every iteration because files aren't appended // so what happens in the tests is you get strange overwrites which can contain // remaining bytes from the last iteration. Resetting the files more appropriately // mocks the behavior of cgroup files. - set_fixture(&tmp, "devices.allow", "").expect("create allowed devices list"); - set_fixture(&tmp, "devices.deny", "").expect("create denied devices list"); + set_fixture(tmp.path(), "devices.allow", "").expect("create allowed devices list"); + set_fixture(tmp.path(), "devices.deny", "").expect("create denied devices list"); - Devices::apply_device(d, &tmp).expect("Apply default device"); + Devices::apply_device(d, tmp.path()).expect("Apply default device"); println!("Device: {}", d.to_string()); if d.allow() { let allowed_content = - read_to_string(tmp.join("devices.allow")).expect("read to string"); + read_to_string(tmp.path().join("devices.allow")).expect("read to string"); assert_eq!(allowed_content, d.to_string()); } else { let denied_content = - read_to_string(tmp.join("devices.deny")).expect("read to string"); + read_to_string(tmp.path().join("devices.deny")).expect("read to string"); assert_eq!(denied_content, d.to_string()); } }); @@ -88,7 +85,7 @@ mod tests { #[test] fn test_set_mock_devices() { - let tmp = create_temp_dir("test_set_mock_devices").expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); [ LinuxDeviceCgroupBuilder::default() .allow(true) @@ -121,18 +118,18 @@ mod tests { ] .iter() .for_each(|d| { - set_fixture(&tmp, "devices.allow", "").expect("create allowed devices list"); - set_fixture(&tmp, "devices.deny", "").expect("create denied devices list"); + set_fixture(tmp.path(), "devices.allow", "").expect("create allowed devices list"); + set_fixture(tmp.path(), "devices.deny", "").expect("create denied devices list"); - Devices::apply_device(d, &tmp).expect("Apply default device"); + Devices::apply_device(d, tmp.path()).expect("Apply default device"); println!("Device: {}", d.to_string()); if d.allow() { let allowed_content = - read_to_string(tmp.join("devices.allow")).expect("read to string"); + read_to_string(tmp.path().join("devices.allow")).expect("read to string"); assert_eq!(allowed_content, d.to_string()); } else { let denied_content = - read_to_string(tmp.join("devices.deny")).expect("read to string"); + read_to_string(tmp.path().join("devices.deny")).expect("read to string"); assert_eq!(denied_content, d.to_string()); } }); @@ -140,35 +137,35 @@ mod tests { quickcheck! { fn property_test_apply_device(device: LinuxDeviceCgroup) -> bool { - let tmp = create_temp_dir("property_test_apply_device").expect("create temp directory for test"); - set_fixture(&tmp, "devices.allow", "").expect("create allowed devices list"); - set_fixture(&tmp, "devices.deny", "").expect("create denied devices list"); - Devices::apply_device(&device, &tmp).expect("Apply default device"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), "devices.allow", "").expect("create allowed devices list"); + set_fixture(tmp.path(), "devices.deny", "").expect("create denied devices list"); + Devices::apply_device(&device, tmp.path()).expect("Apply default device"); if device.allow() { let allowed_content = - read_to_string(tmp.join("devices.allow")).expect("read to string"); + read_to_string(tmp.path().join("devices.allow")).expect("read to string"); allowed_content == device.to_string() } else { let denied_content = - read_to_string(tmp.join("devices.deny")).expect("read to string"); + read_to_string(tmp.path().join("devices.deny")).expect("read to string"); denied_content == device.to_string() } } fn property_test_apply_multiple_devices(devices: Vec) -> bool { - let tmp = create_temp_dir("property_test_apply_multiple_devices").expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); devices.iter() .map(|device| { - set_fixture(&tmp, "devices.allow", "").expect("create allowed devices list"); - set_fixture(&tmp, "devices.deny", "").expect("create denied devices list"); - Devices::apply_device(device, &tmp).expect("Apply default device"); + set_fixture(tmp.path(), "devices.allow", "").expect("create allowed devices list"); + set_fixture(tmp.path(), "devices.deny", "").expect("create denied devices list"); + Devices::apply_device(device, tmp.path()).expect("Apply default device"); if device.allow() { let allowed_content = - read_to_string(tmp.join("devices.allow")).expect("read to string"); + read_to_string(tmp.path().join("devices.allow")).expect("read to string"); allowed_content == device.to_string() } else { let denied_content = - read_to_string(tmp.join("devices.deny")).expect("read to string"); + read_to_string(tmp.path().join("devices.deny")).expect("read to string"); denied_content == device.to_string() } }) diff --git a/crates/libcgroups/src/v1/freezer.rs b/crates/libcgroups/src/v1/freezer.rs index cda198c51..66d44520f 100644 --- a/crates/libcgroups/src/v1/freezer.rs +++ b/crates/libcgroups/src/v1/freezer.rs @@ -1,32 +1,38 @@ -use std::io::prelude::*; -use std::{ - fs::{create_dir_all, OpenOptions}, - path::Path, - thread, time, -}; +use std::io::Read; +use std::{fs::OpenOptions, path::Path, thread, time}; -use anyhow::{Result, *}; - -use super::Controller; -use crate::common; +use crate::common::{self, WrapIoResult, WrappedIoError}; use crate::common::{ControllerOpt, FreezerState}; +use super::controller::Controller; + const CGROUP_FREEZER_STATE: &str = "freezer.state"; const FREEZER_STATE_THAWED: &str = "THAWED"; const FREEZER_STATE_FROZEN: &str = "FROZEN"; const FREEZER_STATE_FREEZING: &str = "FREEZING"; +#[derive(thiserror::Error, Debug)] +pub enum V1FreezerControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("unexpected state {state} while freezing")] + UnexpectedState { state: String }, + #[error("unable to freeze")] + UnableToFreeze, +} + pub struct Freezer {} impl Controller for Freezer { + type Error = V1FreezerControllerError; type Resource = FreezerState; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply Freezer cgroup config"); - create_dir_all(cgroup_root)?; + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply Freezer cgroup config"); + std::fs::create_dir_all(cgroup_root).wrap_create_dir(cgroup_root)?; if let Some(freezer_state) = Self::needs_to_handle(controller_opt) { - Self::apply(freezer_state, cgroup_root).context("failed to appyl freezer")?; + Self::apply(freezer_state, cgroup_root)?; } Ok(()) @@ -38,7 +44,10 @@ impl Controller for Freezer { } impl Freezer { - fn apply(freezer_state: &FreezerState, cgroup_root: &Path) -> Result<()> { + fn apply( + freezer_state: &FreezerState, + cgroup_root: &Path, + ) -> Result<(), V1FreezerControllerError> { match freezer_state { FreezerState::Undefined => {} FreezerState::Thawed => { @@ -48,7 +57,7 @@ impl Freezer { )?; } FreezerState::Frozen => { - let r = || -> Result<()> { + let r = || -> Result<(), V1FreezerControllerError> { // We should do our best to retry if FREEZING is seen until it becomes FROZEN. // Add sleep between retries occasionally helped when system is extremely slow. // see: @@ -78,17 +87,17 @@ impl Freezer { } FREEZER_STATE_FROZEN => { if i > 1 { - log::debug!("frozen after {} retries", i) + tracing::debug!("frozen after {} retries", i) } return Ok(()); } _ => { // should not reach here. - bail!("unexpected state {} while freezing", r.trim()); + return Err(V1FreezerControllerError::UnexpectedState { state: r }); } } } - bail!("unbale to freeze"); + Err(V1FreezerControllerError::UnableToFreeze) }(); if r.is_err() { @@ -105,14 +114,16 @@ impl Freezer { Ok(()) } - fn read_freezer_state(cgroup_root: &Path) -> Result { + fn read_freezer_state(cgroup_root: &Path) -> Result { let path = cgroup_root.join(CGROUP_FREEZER_STATE); let mut content = String::new(); OpenOptions::new() .create(false) .read(true) - .open(path)? - .read_to_string(&mut content)?; + .open(path) + .wrap_open(cgroup_root)? + .read_to_string(&mut content) + .wrap_read(cgroup_root)?; Ok(content) } } @@ -121,54 +132,53 @@ impl Freezer { mod tests { use super::*; use crate::common::{FreezerState, CGROUP_PROCS}; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use nix::unistd::Pid; use oci_spec::runtime::LinuxResourcesBuilder; #[test] fn test_set_freezer_state() { - let tmp = - create_temp_dir("test_set_freezer_state").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_FREEZER_STATE, "").expect("Set fixure for freezer state"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_FREEZER_STATE, "").expect("Set fixure for freezer state"); // set Frozen state. { let freezer_state = FreezerState::Frozen; - Freezer::apply(&freezer_state, &tmp).expect("Set freezer state"); + Freezer::apply(&freezer_state, tmp.path()).expect("Set freezer state"); - let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("Read to string"); + let state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("Read to string"); assert_eq!(FREEZER_STATE_FROZEN, state_content); } // set Thawed state. { let freezer_state = FreezerState::Thawed; - Freezer::apply(&freezer_state, &tmp).expect("Set freezer state"); + Freezer::apply(&freezer_state, tmp.path()).expect("Set freezer state"); - let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("Read to string"); + let state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("Read to string"); assert_eq!(FREEZER_STATE_THAWED, state_content); } // set Undefined state. { - let old_state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("Read to string"); + let old_state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("Read to string"); let freezer_state = FreezerState::Undefined; - Freezer::apply(&freezer_state, &tmp).expect("Set freezer state"); + Freezer::apply(&freezer_state, tmp.path()).expect("Set freezer state"); - let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("Read to string"); + let state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("Read to string"); assert_eq!(old_state_content, state_content); } } #[test] fn test_add_and_apply() { - let tmp = create_temp_dir("test_add_task").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_FREEZER_STATE, "").expect("set fixure for freezer state"); - set_fixture(&tmp, CGROUP_PROCS, "").expect("set fixture for proc file"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_FREEZER_STATE, "").expect("set fixure for freezer state"); + set_fixture(tmp.path(), CGROUP_PROCS, "").expect("set fixture for proc file"); // set Thawed state. { @@ -187,13 +197,13 @@ mod tests { }; let pid = Pid::from_raw(1000); - Freezer::add_task(pid, &tmp).expect("freezer add task"); - ::apply(&controller_opt, &tmp).expect("freezer apply"); - let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("read to string"); + Freezer::add_task(pid, tmp.path()).expect("freezer add task"); + ::apply(&controller_opt, tmp.path()).expect("freezer apply"); + let state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("read to string"); assert_eq!(FREEZER_STATE_THAWED, state_content); let pid_content = - std::fs::read_to_string(tmp.join(CGROUP_PROCS)).expect("read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_PROCS)).expect("read to string"); assert_eq!(pid_content, "1000"); } @@ -214,13 +224,13 @@ mod tests { }; let pid = Pid::from_raw(1001); - Freezer::add_task(pid, &tmp).expect("freezer add task"); - ::apply(&controller_opt, &tmp).expect("freezer apply"); - let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("read to string"); + Freezer::add_task(pid, tmp.path()).expect("freezer add task"); + ::apply(&controller_opt, tmp.path()).expect("freezer apply"); + let state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("read to string"); assert_eq!(FREEZER_STATE_FROZEN, state_content); let pid_content = - std::fs::read_to_string(tmp.join(CGROUP_PROCS)).expect("read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_PROCS)).expect("read to string"); assert_eq!(pid_content, "1001"); } @@ -242,15 +252,15 @@ mod tests { }; let pid = Pid::from_raw(1002); - let old_state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("read to string"); - Freezer::add_task(pid, &tmp).expect("freezer add task"); - ::apply(&controller_opt, &tmp).expect("freezer apply"); - let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZER_STATE)).expect("read to string"); + let old_state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("read to string"); + Freezer::add_task(pid, tmp.path()).expect("freezer add task"); + ::apply(&controller_opt, tmp.path()).expect("freezer apply"); + let state_content = std::fs::read_to_string(tmp.path().join(CGROUP_FREEZER_STATE)) + .expect("read to string"); assert_eq!(old_state_content, state_content); let pid_content = - std::fs::read_to_string(tmp.join(CGROUP_PROCS)).expect("read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_PROCS)).expect("read to string"); assert_eq!(pid_content, "1002"); } } diff --git a/crates/libcgroups/src/v1/hugetlb.rs b/crates/libcgroups/src/v1/hugetlb.rs index 501cb3778..ae301d02f 100644 --- a/crates/libcgroups/src/v1/hugetlb.rs +++ b/crates/libcgroups/src/v1/hugetlb.rs @@ -1,27 +1,40 @@ -use std::{collections::HashMap, path::Path}; - -use anyhow::{bail, Context, Result}; +use std::{collections::HashMap, num::ParseIntError, path::Path}; use crate::{ - common::{self, ControllerOpt}, - stats::{supported_page_sizes, HugeTlbStats, StatsProvider}, + common::{self, ControllerOpt, EitherError, MustBePowerOfTwo, WrappedIoError}, + stats::{supported_page_sizes, HugeTlbStats, StatsProvider, SupportedPageSizesError}, }; -use super::Controller; use oci_spec::runtime::LinuxHugepageLimit; +use super::controller::Controller; + +#[derive(thiserror::Error, Debug)] +pub enum V1HugeTlbControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("malformed page size {page_size}: {err}")] + MalformedPageSize { + page_size: String, + err: EitherError, + }, +} + pub struct HugeTlb {} impl Controller for HugeTlb { + type Error = V1HugeTlbControllerError; type Resource = Vec; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &std::path::Path) -> Result<()> { - log::debug!("Apply Hugetlb cgroup config"); + fn apply( + controller_opt: &ControllerOpt, + cgroup_root: &std::path::Path, + ) -> Result<(), Self::Error> { + tracing::debug!("Apply Hugetlb cgroup config"); if let Some(hugepage_limits) = Self::needs_to_handle(controller_opt) { for hugetlb in hugepage_limits { - Self::apply(cgroup_root, hugetlb) - .context("failed to apply hugetlb resource restrictions")? + Self::apply(cgroup_root, hugetlb)? } } @@ -39,10 +52,21 @@ impl Controller for HugeTlb { } } +#[derive(thiserror::Error, Debug)] +pub enum V1HugeTlbStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("error getting supported page sizes: {0}")] + SupportedPageSizes(#[from] SupportedPageSizesError), + #[error("error parsing value: {0}")] + Parse(#[from] ParseIntError), +} + impl StatsProvider for HugeTlb { + type Error = V1HugeTlbStatsError; type Stats = HashMap; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let page_sizes = supported_page_sizes()?; let mut hugetlb_stats = HashMap::with_capacity(page_sizes.len()); @@ -56,15 +80,29 @@ impl StatsProvider for HugeTlb { } impl HugeTlb { - fn apply(root_path: &Path, hugetlb: &LinuxHugepageLimit) -> Result<()> { - let page_size: String = hugetlb + fn apply( + root_path: &Path, + hugetlb: &LinuxHugepageLimit, + ) -> Result<(), V1HugeTlbControllerError> { + let raw_page_size: String = hugetlb .page_size() .chars() .take_while(|c| c.is_ascii_digit()) .collect(); - let page_size: u64 = page_size.parse()?; + let page_size: u64 = match raw_page_size.parse() { + Ok(page_size) => page_size, + Err(err) => { + return Err(V1HugeTlbControllerError::MalformedPageSize { + page_size: raw_page_size, + err: EitherError::Left(err), + }) + } + }; if !Self::is_power_of_two(page_size) { - bail!("page size must be in the format of 2^(integer)"); + return Err(V1HugeTlbControllerError::MalformedPageSize { + page_size: raw_page_size, + err: EitherError::Right(MustBePowerOfTwo), + }); } common::write_cgroup_file( @@ -75,21 +113,24 @@ impl HugeTlb { } fn is_power_of_two(number: u64) -> bool { - (number != 0) && (number & (number - 1)) == 0 + (number != 0) && (number & (number.saturating_sub(1))) == 0 } - fn stats_for_page_size(cgroup_path: &Path, page_size: &str) -> Result { + fn stats_for_page_size( + cgroup_path: &Path, + page_size: &str, + ) -> Result { let mut stats = HugeTlbStats::default(); - let usage_file = format!("hugetlb.{}.usage_in_bytes", page_size); + let usage_file = format!("hugetlb.{page_size}.usage_in_bytes"); let usage_content = common::read_cgroup_file(cgroup_path.join(usage_file))?; stats.usage = usage_content.trim().parse()?; - let max_file = format!("hugetlb.{}.max_usage_in_bytes", page_size); + let max_file = format!("hugetlb.{page_size}.max_usage_in_bytes"); let max_content = common::read_cgroup_file(cgroup_path.join(max_file))?; stats.max_usage = max_content.trim().parse()?; - let failcnt_file = format!("hugetlb.{}.failcnt", page_size); + let failcnt_file = format!("hugetlb.{page_size}.failcnt"); let failcnt_content = common::read_cgroup_file(cgroup_path.join(failcnt_file))?; stats.fail_count = failcnt_content.trim().parse()?; @@ -100,15 +141,15 @@ impl HugeTlb { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::LinuxHugepageLimitBuilder; use std::fs::read_to_string; #[test] fn test_set_hugetlb() { let page_file_name = "hugetlb.2MB.limit_in_bytes"; - let tmp = create_temp_dir("test_set_hugetlb").expect("create temp directory for test"); - set_fixture(&tmp, page_file_name, "0").expect("Set fixture for 2 MB page size"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), page_file_name, "0").expect("Set fixture for 2 MB page size"); let hugetlb = LinuxHugepageLimitBuilder::default() .page_size("2MB") @@ -116,15 +157,15 @@ mod tests { .build() .unwrap(); - HugeTlb::apply(&tmp, &hugetlb).expect("apply hugetlb"); - let content = read_to_string(tmp.join(page_file_name)).expect("Read hugetlb file content"); + HugeTlb::apply(tmp.path(), &hugetlb).expect("apply hugetlb"); + let content = + read_to_string(tmp.path().join(page_file_name)).expect("Read hugetlb file content"); assert_eq!(hugetlb.limit().to_string(), content); } #[test] fn test_set_hugetlb_with_invalid_page_size() { - let tmp = create_temp_dir("test_set_hugetlb_with_invalid_page_size") - .expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); let hugetlb = LinuxHugepageLimitBuilder::default() .page_size("3MB") @@ -132,7 +173,7 @@ mod tests { .build() .unwrap(); - let result = HugeTlb::apply(&tmp, &hugetlb); + let result = HugeTlb::apply(tmp.path(), &hugetlb); assert!( result.is_err(), "page size that is not a power of two should be an error" @@ -142,10 +183,10 @@ mod tests { quickcheck! { fn property_test_set_hugetlb(hugetlb: LinuxHugepageLimit) -> bool { let page_file_name = format!("hugetlb.{:?}.limit_in_bytes", hugetlb.page_size()); - let tmp = create_temp_dir("property_test_set_hugetlb").expect("create temp directory for test"); - set_fixture(&tmp, &page_file_name, "0").expect("Set fixture for page size"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), &page_file_name, "0").expect("Set fixture for page size"); - let result = HugeTlb::apply(&tmp, &hugetlb); + let result = HugeTlb::apply(tmp.path(), &hugetlb); let page_size: String = hugetlb .page_size() @@ -156,7 +197,7 @@ mod tests { if HugeTlb::is_power_of_two(page_size) && page_size != 1 { let content = - read_to_string(tmp.join(page_file_name)).expect("Read hugetlb file content"); + read_to_string(tmp.path().join(page_file_name)).expect("Read hugetlb file content"); hugetlb.limit().to_string() == content } else { result.is_err() @@ -166,13 +207,13 @@ mod tests { #[test] fn test_stat_hugetlb() { - let tmp = create_temp_dir("test_stat_hugetlb").expect("create temp directory for test"); - set_fixture(&tmp, "hugetlb.2MB.usage_in_bytes", "1024\n").expect("set hugetlb usage"); - set_fixture(&tmp, "hugetlb.2MB.max_usage_in_bytes", "4096\n") + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), "hugetlb.2MB.usage_in_bytes", "1024\n").expect("set hugetlb usage"); + set_fixture(tmp.path(), "hugetlb.2MB.max_usage_in_bytes", "4096\n") .expect("set hugetlb max usage"); - set_fixture(&tmp, "hugetlb.2MB.failcnt", "5").expect("set hugetlb fail count"); + set_fixture(tmp.path(), "hugetlb.2MB.failcnt", "5").expect("set hugetlb fail count"); - let actual = HugeTlb::stats_for_page_size(&tmp, "2MB").expect("get cgroup stats"); + let actual = HugeTlb::stats_for_page_size(tmp.path(), "2MB").expect("get cgroup stats"); let expected = HugeTlbStats { usage: 1024, diff --git a/crates/libcgroups/src/v1/manager.rs b/crates/libcgroups/src/v1/manager.rs index 43ee7b524..022ffbbcb 100644 --- a/crates/libcgroups/src/v1/manager.rs +++ b/crates/libcgroups/src/v1/manager.rs @@ -3,44 +3,103 @@ use std::path::Path; use std::time::Duration; use std::{collections::HashMap, path::PathBuf}; -use anyhow::bail; -use anyhow::Result; use nix::unistd::Pid; use procfs::process::Process; +use procfs::ProcError; -use super::ControllerType as CtrlType; +use super::blkio::V1BlkioStatsError; +use super::cpu::V1CpuStatsError; +use super::cpuacct::V1CpuAcctStatsError; +use super::cpuset::V1CpuSetControllerError; +use super::freezer::V1FreezerControllerError; +use super::hugetlb::{V1HugeTlbControllerError, V1HugeTlbStatsError}; +use super::memory::{V1MemoryControllerError, V1MemoryStatsError}; +use super::util::V1MountPointError; use super::{ - blkio::Blkio, controller_type::CONTROLLERS, cpu::Cpu, cpuacct::CpuAcct, cpuset::CpuSet, - devices::Devices, freezer::Freezer, hugetlb::HugeTlb, memory::Memory, + blkio::Blkio, controller::Controller, controller_type::CONTROLLERS, cpu::Cpu, cpuacct::CpuAcct, + cpuset::CpuSet, devices::Devices, freezer::Freezer, hugetlb::HugeTlb, memory::Memory, network_classifier::NetworkClassifier, network_priority::NetworkPriority, - perf_event::PerfEvent, pids::Pids, util, Controller, + perf_event::PerfEvent, pids::Pids, util, ControllerType as CtrlType, }; -use crate::common::{self, CgroupManager, ControllerOpt, FreezerState, PathBufExt, CGROUP_PROCS}; -use crate::stats::{Stats, StatsProvider}; +use crate::common::{ + self, AnyCgroupManager, CgroupManager, ControllerOpt, FreezerState, JoinSafelyError, + PathBufExt, WrapIoResult, WrappedIoError, CGROUP_PROCS, +}; +use crate::stats::{PidStatsError, Stats, StatsProvider}; +use crate::v1::ControllerType; pub struct Manager { subsystems: HashMap, } +#[derive(thiserror::Error, Debug)] +pub enum V1ManagerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("mount point error: {0}")] + MountPoint(#[from] V1MountPointError), + #[error("proc error: {0}")] + Proc(#[from] ProcError), + #[error("while joining paths: {0}")] + JoinSafely(#[from] JoinSafelyError), + #[error("cgroup {0} is required to fulfill the request, but is not supported by this system")] + CGroupRequired(ControllerType), + #[error("subsystem does not exist")] + SubsystemDoesNotExist, + + #[error(transparent)] + BlkioController(WrappedIoError), + #[error(transparent)] + CpuController(WrappedIoError), + #[error(transparent)] + CpuAcctController(WrappedIoError), + #[error(transparent)] + CpuSetController(#[from] V1CpuSetControllerError), + #[error(transparent)] + FreezerController(#[from] V1FreezerControllerError), + #[error(transparent)] + HugeTlbController(#[from] V1HugeTlbControllerError), + #[error(transparent)] + MemoryController(#[from] V1MemoryControllerError), + #[error(transparent)] + PidsController(WrappedIoError), + + #[error(transparent)] + BlkioStats(#[from] V1BlkioStatsError), + #[error(transparent)] + CpuStats(#[from] V1CpuStatsError), + #[error(transparent)] + CpuAcctStats(#[from] V1CpuAcctStatsError), + #[error(transparent)] + PidsStats(PidStatsError), + #[error(transparent)] + HugeTlbStats(#[from] V1HugeTlbStatsError), + #[error(transparent)] + MemoryStats(#[from] V1MemoryStatsError), +} + impl Manager { /// Constructs a new cgroup manager with cgroups_path being relative to the root of the subsystem - pub fn new(cgroup_path: PathBuf) -> Result { + pub fn new(cgroup_path: &Path) -> Result { let mut subsystems = HashMap::::new(); for subsystem in CONTROLLERS { - if let Ok(subsystem_path) = Self::get_subsystem_path(&cgroup_path, subsystem) { - subsystems.insert(subsystem.clone(), subsystem_path); + if let Ok(subsystem_path) = Self::get_subsystem_path(cgroup_path, subsystem) { + subsystems.insert(*subsystem, subsystem_path); } else { - log::warn!("cgroup {} not supported on this system", subsystem); + tracing::warn!("cgroup {} not supported on this system", subsystem); } } Ok(Manager { subsystems }) } - fn get_subsystem_path(cgroup_path: &Path, subsystem: &CtrlType) -> Result { - log::debug!("Get path for subsystem: {}", subsystem); + fn get_subsystem_path( + cgroup_path: &Path, + subsystem: &CtrlType, + ) -> Result { + tracing::debug!("Get path for subsystem: {}", subsystem); let mount_point = util::get_subsystem_mount_point(subsystem)?; let cgroup = Process::myself()? @@ -61,7 +120,7 @@ impl Manager { fn get_required_controllers( &self, controller_opt: &ControllerOpt, - ) -> Result> { + ) -> Result, V1ManagerError> { let mut required_controllers = HashMap::new(); for controller in CONTROLLERS { @@ -88,25 +147,31 @@ impl Manager { if let Some(subsystem_path) = self.subsystems.get(controller) { required_controllers.insert(controller, subsystem_path); } else { - bail!("cgroup {} is required to fulfill the request, but is not supported by this system", controller); + return Err(V1ManagerError::CGroupRequired(*controller)); } } } Ok(required_controllers) } + + pub fn any(self) -> AnyCgroupManager { + AnyCgroupManager::V1(self) + } } impl CgroupManager for Manager { - fn get_all_pids(&self) -> Result> { + type Error = V1ManagerError; + + fn get_all_pids(&self) -> Result, Self::Error> { let devices = self.subsystems.get(&CtrlType::Devices); if let Some(p) = devices { - common::get_all_pids(p) + Ok(common::get_all_pids(p)?) } else { - bail!("subsystem does not exist") + Err(V1ManagerError::SubsystemDoesNotExist) } } - fn add_task(&self, pid: Pid) -> Result<()> { + fn add_task(&self, pid: Pid) -> Result<(), Self::Error> { for subsys in &self.subsystems { match subsys.0 { CtrlType::Cpu => Cpu::add_task(pid, subsys.1)?, @@ -117,7 +182,9 @@ impl CgroupManager for Manager { CtrlType::Memory => Memory::add_task(pid, subsys.1)?, CtrlType::Pids => Pids::add_task(pid, subsys.1)?, CtrlType::PerfEvent => PerfEvent::add_task(pid, subsys.1)?, - CtrlType::Blkio => Blkio::add_task(pid, subsys.1)?, + CtrlType::Blkio => { + Blkio::add_task(pid, subsys.1).map_err(V1ManagerError::BlkioController)? + } CtrlType::NetworkPriority => NetworkPriority::add_task(pid, subsys.1)?, CtrlType::NetworkClassifier => NetworkClassifier::add_task(pid, subsys.1)?, CtrlType::Freezer => Freezer::add_task(pid, subsys.1)?, @@ -127,7 +194,7 @@ impl CgroupManager for Manager { Ok(()) } - fn apply(&self, controller_opt: &ControllerOpt) -> Result<()> { + fn apply(&self, controller_opt: &ControllerOpt) -> Result<(), Self::Error> { for subsys in self.get_required_controllers(controller_opt)? { match subsys.0 { CtrlType::Cpu => Cpu::apply(controller_opt, subsys.1)?, @@ -138,7 +205,8 @@ impl CgroupManager for Manager { CtrlType::Memory => Memory::apply(controller_opt, subsys.1)?, CtrlType::Pids => Pids::apply(controller_opt, subsys.1)?, CtrlType::PerfEvent => PerfEvent::apply(controller_opt, subsys.1)?, - CtrlType::Blkio => Blkio::apply(controller_opt, subsys.1)?, + CtrlType::Blkio => Blkio::apply(controller_opt, subsys.1) + .map_err(V1ManagerError::BlkioController)?, CtrlType::NetworkPriority => NetworkPriority::apply(controller_opt, subsys.1)?, CtrlType::NetworkClassifier => NetworkClassifier::apply(controller_opt, subsys.1)?, CtrlType::Freezer => Freezer::apply(controller_opt, subsys.1)?, @@ -148,15 +216,18 @@ impl CgroupManager for Manager { Ok(()) } - fn remove(&self) -> Result<()> { + fn remove(&self) -> Result<(), Self::Error> { for cgroup_path in &self.subsystems { if cgroup_path.1.exists() { - log::debug!("remove cgroup {:?}", cgroup_path.1); + tracing::debug!("remove cgroup {:?}", cgroup_path.1); let procs_path = cgroup_path.1.join(CGROUP_PROCS); - let procs = fs::read_to_string(procs_path)?; + let procs = fs::read_to_string(&procs_path).wrap_read(&procs_path)?; for line in procs.lines() { - let pid: i32 = line.parse()?; + let pid: i32 = line + .parse() + .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err)) + .wrap_other(&procs_path)?; let _ = nix::sys::signal::kill(Pid::from_raw(pid), nix::sys::signal::SIGKILL); } @@ -167,27 +238,29 @@ impl CgroupManager for Manager { Ok(()) } - fn freeze(&self, state: FreezerState) -> Result<()> { + fn freeze(&self, state: FreezerState) -> Result<(), Self::Error> { let controller_opt = ControllerOpt { resources: &Default::default(), freezer_state: Some(state), oom_score_adj: None, disable_oom_killer: false, }; - Freezer::apply( + Ok(Freezer::apply( &controller_opt, self.subsystems.get(&CtrlType::Freezer).unwrap(), - ) + )?) } - fn stats(&self) -> Result { + fn stats(&self) -> Result { let mut stats = Stats::default(); for subsystem in &self.subsystems { match subsystem.0 { CtrlType::Cpu => stats.cpu.throttling = Cpu::stats(subsystem.1)?, CtrlType::CpuAcct => stats.cpu.usage = CpuAcct::stats(subsystem.1)?, - CtrlType::Pids => stats.pids = Pids::stats(subsystem.1)?, + CtrlType::Pids => { + stats.pids = Pids::stats(subsystem.1).map_err(V1ManagerError::PidsStats)? + } CtrlType::HugeTlb => stats.hugetlb = HugeTlb::stats(subsystem.1)?, CtrlType::Blkio => stats.blkio = Blkio::stats(subsystem.1)?, CtrlType::Memory => stats.memory = Memory::stats(subsystem.1)?, diff --git a/crates/libcgroups/src/v1/memory.rs b/crates/libcgroups/src/v1/memory.rs index c13145c2a..98fa6b1e3 100644 --- a/crates/libcgroups/src/v1/memory.rs +++ b/crates/libcgroups/src/v1/memory.rs @@ -1,16 +1,21 @@ use std::collections::HashMap; +use std::fmt::Display; use std::io::{prelude::*, Write}; +use std::num::ParseIntError; +use std::path::PathBuf; use std::{fs::OpenOptions, path::Path}; -use anyhow::{anyhow, bail, Result}; use nix::errno::Errno; -use super::Controller; -use crate::common::{self, ControllerOpt}; -use crate::stats::{self, parse_single_value, MemoryData, MemoryStats, StatsProvider}; +use crate::common::{self, ControllerOpt, WrapIoResult, WrappedIoError}; +use crate::stats::{ + self, parse_single_value, MemoryData, MemoryStats, ParseFlatKeyedDataError, StatsProvider, +}; use oci_spec::runtime::LinuxMemory; +use super::controller::Controller; + const CGROUP_MEMORY_SWAP_LIMIT: &str = "memory.memsw.limit_in_bytes"; const CGROUP_MEMORY_LIMIT: &str = "memory.limit_in_bytes"; const CGROUP_MEMORY_USAGE: &str = "memory.usage_in_bytes"; @@ -43,13 +48,57 @@ const MEMORY_LIMIT_IN_BYTES: &str = ".limit_in_bytes"; // Number of times memory usage hit limits const MEMORY_FAIL_COUNT: &str = ".failcnt"; +#[derive(Debug)] +pub enum MalformedThing { + Limit, + Usage, + MaxUsage, +} + +impl Display for MalformedThing { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MalformedThing::Limit => f.write_str("memory limit"), + MalformedThing::Usage => f.write_str("memory usage"), + MalformedThing::MaxUsage => f.write_str("memory max usage"), + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum V1MemoryControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("invalid swappiness value: {supplied}. valid range is 0-100")] + SwappinessOutOfRange { supplied: u64 }, + #[error("read malformed {thing} {limit} from {path}: {err}")] + MalformedValue { + thing: MalformedThing, + limit: String, + path: PathBuf, + err: ParseIntError, + }, + #[error( + "unable to set memory limit to {target} (current usage: {current}, peak usage: {peak})" + )] + UnableToSet { + target: i64, + current: u64, + peak: u64, + }, +} + pub struct Memory {} impl Controller for Memory { + type Error = V1MemoryControllerError; type Resource = LinuxMemory; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply Memory cgroup config"); + fn apply( + controller_opt: &ControllerOpt, + cgroup_root: &Path, + ) -> Result<(), V1MemoryControllerError> { + tracing::debug!("Apply Memory cgroup config"); if let Some(memory) = &controller_opt.resources.memory() { let reservation = memory.reservation().unwrap_or(0); @@ -77,10 +126,9 @@ impl Controller for Memory { )?; } else { // invalid swappiness value - return Err(anyhow!( - "Invalid swappiness value: {}. Valid range is 0-100", - swappiness - )); + return Err(V1MemoryControllerError::SwappinessOutOfRange { + supplied: swappiness, + }); } } @@ -105,11 +153,19 @@ impl Controller for Memory { controller_opt.resources.memory().as_ref() } } +#[derive(thiserror::Error, Debug)] +pub enum V1MemoryStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("error parsing stat data: {0}")] + Parse(#[from] ParseFlatKeyedDataError), +} impl StatsProvider for Memory { + type Error = V1MemoryStatsError; type Stats = MemoryStats; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let memory = Self::get_memory_data(cgroup_path, MEMORY_PREFIX)?; let memswap = Self::get_memory_data(cgroup_path, MEMORY_AND_SWAP_PREFIX)?; let kernel = Self::get_memory_data(cgroup_path, MEMORY_KERNEL_PREFIX)?; @@ -131,26 +187,29 @@ impl StatsProvider for Memory { } impl Memory { - fn get_memory_data(cgroup_path: &Path, file_prefix: &str) -> Result { + fn get_memory_data( + cgroup_path: &Path, + file_prefix: &str, + ) -> Result { let memory_data = MemoryData { usage: parse_single_value( - &cgroup_path.join(format!("{}{}", file_prefix, MEMORY_USAGE_IN_BYTES)), + &cgroup_path.join(format!("{file_prefix}{MEMORY_USAGE_IN_BYTES}")), )?, max_usage: parse_single_value( - &cgroup_path.join(format!("{}{}", file_prefix, MEMORY_MAX_USAGE_IN_BYTES)), + &cgroup_path.join(format!("{file_prefix}{MEMORY_MAX_USAGE_IN_BYTES}")), )?, limit: parse_single_value( - &cgroup_path.join(format!("{}{}", file_prefix, MEMORY_LIMIT_IN_BYTES)), + &cgroup_path.join(format!("{file_prefix}{MEMORY_LIMIT_IN_BYTES}")), )?, fail_count: parse_single_value( - &cgroup_path.join(format!("{}{}", file_prefix, MEMORY_FAIL_COUNT)), + &cgroup_path.join(format!("{file_prefix}{MEMORY_FAIL_COUNT}")), )?, }; Ok(memory_data) } - fn hierarchy_enabled(cgroup_path: &Path) -> Result { + fn hierarchy_enabled(cgroup_path: &Path) -> Result { let hierarchy_path = cgroup_path.join(MEMORY_USE_HIERARCHY); let hierarchy = common::read_cgroup_file(hierarchy_path)?; let enabled = matches!(hierarchy.trim(), "1"); @@ -158,18 +217,20 @@ impl Memory { Ok(enabled) } - fn get_stat_data(cgroup_path: &Path) -> Result> { + fn get_stat_data(cgroup_path: &Path) -> Result, ParseFlatKeyedDataError> { stats::parse_flat_keyed_data(&cgroup_path.join(MEMORY_STAT)) } - fn get_memory_usage(cgroup_root: &Path) -> Result { + fn get_memory_usage(cgroup_root: &Path) -> Result { let path = cgroup_root.join(CGROUP_MEMORY_USAGE); let mut contents = String::new(); OpenOptions::new() .create(false) .read(true) - .open(path)? - .read_to_string(&mut contents)?; + .open(&path) + .wrap_open(&path)? + .read_to_string(&mut contents) + .wrap_read(&path)?; contents = contents.trim().to_string(); @@ -177,18 +238,28 @@ impl Memory { return Ok(u64::MAX); } - let val = contents.parse::()?; + let val = + contents + .parse::() + .map_err(|err| V1MemoryControllerError::MalformedValue { + thing: MalformedThing::Usage, + limit: contents, + path, + err, + })?; Ok(val) } - fn get_memory_max_usage(cgroup_root: &Path) -> Result { + fn get_memory_max_usage(cgroup_root: &Path) -> Result { let path = cgroup_root.join(CGROUP_MEMORY_MAX_USAGE); let mut contents = String::new(); OpenOptions::new() .create(false) .read(true) - .open(path)? - .read_to_string(&mut contents)?; + .open(&path) + .wrap_open(&path)? + .read_to_string(&mut contents) + .wrap_read(&path)?; contents = contents.trim().to_string(); @@ -196,18 +267,28 @@ impl Memory { return Ok(u64::MAX); } - let val = contents.parse::()?; + let val = + contents + .parse::() + .map_err(|err| V1MemoryControllerError::MalformedValue { + thing: MalformedThing::MaxUsage, + limit: contents, + path, + err, + })?; Ok(val) } - fn get_memory_limit(cgroup_root: &Path) -> Result { + fn get_memory_limit(cgroup_root: &Path) -> Result { let path = cgroup_root.join(CGROUP_MEMORY_LIMIT); let mut contents = String::new(); OpenOptions::new() .create(false) .read(true) - .open(path)? - .read_to_string(&mut contents)?; + .open(&path) + .wrap_open(&path)? + .read_to_string(&mut contents) + .wrap_read(&path)?; contents = contents.trim().to_string(); @@ -215,21 +296,32 @@ impl Memory { return Ok(i64::MAX); } - let val = contents.parse::()?; + let val = + contents + .parse::() + .map_err(|err| V1MemoryControllerError::MalformedValue { + thing: MalformedThing::Limit, + limit: contents, + path, + err, + })?; Ok(val) } - fn set(val: T, path: &Path) -> std::io::Result<()> { + fn set(val: T, path: &Path) -> Result<(), WrappedIoError> { + let data = val.to_string(); OpenOptions::new() .create(false) .write(true) .truncate(true) - .open(path)? - .write_all(val.to_string().as_bytes())?; + .open(path) + .wrap_open(path)? + .write_all(data.as_bytes()) + .wrap_write(path, data)?; Ok(()) } - fn set_memory(val: i64, cgroup_root: &Path) -> Result<()> { + fn set_memory(val: i64, cgroup_root: &Path) -> Result<(), V1MemoryControllerError> { if val == 0 { return Ok(()); } @@ -239,27 +331,26 @@ impl Memory { Ok(_) => Ok(()), Err(e) => { // we need to look into the raw OS error for an EBUSY status - match e.raw_os_error() { + match e.inner().raw_os_error() { Some(code) => match Errno::from_i32(code) { Errno::EBUSY => { let usage = Self::get_memory_usage(cgroup_root)?; let max_usage = Self::get_memory_max_usage(cgroup_root)?; - bail!( - "unable to set memory limit to {} (current usage: {}, peak usage: {})", - val, - usage, - max_usage, - ) + Err(V1MemoryControllerError::UnableToSet { + target: val, + current: usage, + peak: max_usage, + }) } - _ => bail!(e), + _ => Err(e)?, }, - None => bail!(e), + None => Err(e)?, } } } } - fn set_swap(swap: i64, cgroup_root: &Path) -> Result<()> { + fn set_swap(swap: i64, cgroup_root: &Path) -> Result<(), V1MemoryControllerError> { if swap == 0 { return Ok(()); } @@ -273,7 +364,7 @@ impl Memory { swap: i64, is_updated: bool, cgroup_root: &Path, - ) -> Result<()> { + ) -> Result<(), V1MemoryControllerError> { // According to runc we need to change the write sequence of // limit and swap so it won't fail, because the new and old // values don't fit the kernel's validation @@ -288,7 +379,7 @@ impl Memory { Ok(()) } - fn apply(resource: &LinuxMemory, cgroup_root: &Path) -> Result<()> { + fn apply(resource: &LinuxMemory, cgroup_root: &Path) -> Result<(), V1MemoryControllerError> { match resource.limit() { Some(limit) => { let current_limit = Self::get_memory_limit(cgroup_root)?; @@ -320,19 +411,20 @@ impl Memory { mod tests { use super::*; use crate::common::CGROUP_PROCS; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::{LinuxMemoryBuilder, LinuxResourcesBuilder}; #[test] fn test_set_memory() { let limit = 1024; - let tmp = create_temp_dir("test_set_memory").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_USAGE, "0").expect("Set fixure for memory usage"); - set_fixture(&tmp, CGROUP_MEMORY_MAX_USAGE, "0").expect("Set fixure for max memory usage"); - set_fixture(&tmp, CGROUP_MEMORY_LIMIT, "0").expect("Set fixure for memory limit"); - Memory::set_memory(limit, &tmp).expect("Set memory limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_USAGE, "0").expect("Set fixure for memory usage"); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX_USAGE, "0") + .expect("Set fixure for max memory usage"); + set_fixture(tmp.path(), CGROUP_MEMORY_LIMIT, "0").expect("Set fixure for memory limit"); + Memory::set_memory(limit, tmp.path()).expect("Set memory limit"); let content = - std::fs::read_to_string(tmp.join(CGROUP_MEMORY_LIMIT)).expect("Read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_LIMIT)).expect("Read to string"); assert_eq!(limit.to_string(), content) } @@ -340,46 +432,46 @@ mod tests { fn pass_set_memory_if_limit_is_zero() { let sample_val = "1024"; let limit = 0; - let tmp = create_temp_dir("pass_set_memory_if_limit_is_zero") - .expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_LIMIT, sample_val).expect("Set fixure for memory limit"); - Memory::set_memory(limit, &tmp).expect("Set memory limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_LIMIT, sample_val) + .expect("Set fixure for memory limit"); + Memory::set_memory(limit, tmp.path()).expect("Set memory limit"); let content = - std::fs::read_to_string(tmp.join(CGROUP_MEMORY_LIMIT)).expect("Read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_LIMIT)).expect("Read to string"); assert_eq!(content, sample_val) } #[test] fn test_set_swap() { let limit = 512; - let tmp = create_temp_dir("test_set_swap").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP_LIMIT, "0").expect("Set fixure for swap limit"); - Memory::set_swap(limit, &tmp).expect("Set swap limit"); - let content = - std::fs::read_to_string(tmp.join(CGROUP_MEMORY_SWAP_LIMIT)).expect("Read to string"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP_LIMIT, "0").expect("Set fixure for swap limit"); + Memory::set_swap(limit, tmp.path()).expect("Set swap limit"); + let content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_SWAP_LIMIT)) + .expect("Read to string"); assert_eq!(limit.to_string(), content) } #[test] fn test_set_memory_and_swap() { - let tmp = - create_temp_dir("test_set_memory_and_swap").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_USAGE, "0").expect("Set fixure for memory usage"); - set_fixture(&tmp, CGROUP_MEMORY_MAX_USAGE, "0").expect("Set fixure for max memory usage"); - set_fixture(&tmp, CGROUP_MEMORY_LIMIT, "0").expect("Set fixure for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP_LIMIT, "0").expect("Set fixure for swap limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_USAGE, "0").expect("Set fixure for memory usage"); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX_USAGE, "0") + .expect("Set fixure for max memory usage"); + set_fixture(tmp.path(), CGROUP_MEMORY_LIMIT, "0").expect("Set fixure for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP_LIMIT, "0").expect("Set fixure for swap limit"); // test unlimited memory with no set swap { let limit = -1; let linux_memory = LinuxMemoryBuilder::default().limit(limit).build().unwrap(); - Memory::apply(&linux_memory, &tmp).expect("Set memory and swap"); + Memory::apply(&linux_memory, tmp.path()).expect("Set memory and swap"); - let limit_content = - std::fs::read_to_string(tmp.join(CGROUP_MEMORY_LIMIT)).expect("Read to string"); + let limit_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_LIMIT)) + .expect("Read to string"); assert_eq!(limit.to_string(), limit_content); - let swap_content = std::fs::read_to_string(tmp.join(CGROUP_MEMORY_SWAP_LIMIT)) + let swap_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_SWAP_LIMIT)) .expect("Read to string"); // swap should be set to -1 also assert_eq!(limit.to_string(), swap_content); @@ -394,13 +486,13 @@ mod tests { .swap(swap) .build() .unwrap(); - Memory::apply(&linux_memory, &tmp).expect("Set memory and swap"); + Memory::apply(&linux_memory, tmp.path()).expect("Set memory and swap"); - let limit_content = - std::fs::read_to_string(tmp.join(CGROUP_MEMORY_LIMIT)).expect("Read to string"); + let limit_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_LIMIT)) + .expect("Read to string"); assert_eq!(limit.to_string(), limit_content); - let swap_content = std::fs::read_to_string(tmp.join(CGROUP_MEMORY_SWAP_LIMIT)) + let swap_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_SWAP_LIMIT)) .expect("Read to string"); assert_eq!(swap.to_string(), swap_content); } @@ -408,18 +500,17 @@ mod tests { quickcheck! { fn property_test_set_memory(linux_memory: LinuxMemory, disable_oom_killer: bool) -> bool { - let tmp = - create_temp_dir("property_test_set_memory").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_USAGE, "0").expect("Set fixure for memory usage"); - set_fixture(&tmp, CGROUP_MEMORY_MAX_USAGE, "0").expect("Set fixure for max memory usage"); - set_fixture(&tmp, CGROUP_MEMORY_LIMIT, "0").expect("Set fixure for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP_LIMIT, "0").expect("Set fixure for swap limit"); - set_fixture(&tmp, CGROUP_MEMORY_SWAPPINESS, "0").expect("Set fixure for swappiness"); - set_fixture(&tmp, CGROUP_MEMORY_RESERVATION, "0").expect("Set fixture for memory reservation"); - set_fixture(&tmp, CGROUP_MEMORY_OOM_CONTROL, "0").expect("Set fixture for oom control"); - set_fixture(&tmp, CGROUP_KERNEL_MEMORY_LIMIT, "0").expect("Set fixture for kernel memory limit"); - set_fixture(&tmp, CGROUP_KERNEL_TCP_MEMORY_LIMIT, "0").expect("Set fixture for kernel tcp memory limit"); - set_fixture(&tmp, CGROUP_PROCS, "").expect("set fixture for proc file"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_USAGE, "0").expect("Set fixure for memory usage"); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX_USAGE, "0").expect("Set fixure for max memory usage"); + set_fixture(tmp.path(), CGROUP_MEMORY_LIMIT, "0").expect("Set fixure for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP_LIMIT, "0").expect("Set fixure for swap limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAPPINESS, "0").expect("Set fixure for swappiness"); + set_fixture(tmp.path(), CGROUP_MEMORY_RESERVATION, "0").expect("Set fixture for memory reservation"); + set_fixture(tmp.path(), CGROUP_MEMORY_OOM_CONTROL, "0").expect("Set fixture for oom control"); + set_fixture(tmp.path(), CGROUP_KERNEL_MEMORY_LIMIT, "0").expect("Set fixture for kernel memory limit"); + set_fixture(tmp.path(), CGROUP_KERNEL_TCP_MEMORY_LIMIT, "0").expect("Set fixture for kernel tcp memory limit"); + set_fixture(tmp.path(), CGROUP_PROCS, "").expect("set fixture for proc file"); // clone to avoid use of moved value later on @@ -434,7 +525,7 @@ mod tests { freezer_state: None, }; - let result = ::apply(&controller_opt, &tmp); + let result = ::apply(&controller_opt, tmp.path()); if result.is_err() { @@ -452,7 +543,7 @@ mod tests { } // check memory reservation - let reservation_content = std::fs::read_to_string(tmp.join(CGROUP_MEMORY_RESERVATION)).expect("read memory reservation"); + let reservation_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_RESERVATION)).expect("read memory reservation"); let reservation_check = match memory_limits.reservation() { Some(reservation) => { reservation_content == reservation.to_string() @@ -461,7 +552,7 @@ mod tests { }; // check kernel memory limit - let kernel_content = std::fs::read_to_string(tmp.join(CGROUP_KERNEL_MEMORY_LIMIT)).expect("read kernel memory limit"); + let kernel_content = std::fs::read_to_string(tmp.path().join(CGROUP_KERNEL_MEMORY_LIMIT)).expect("read kernel memory limit"); let kernel_check = match memory_limits.kernel() { Some(kernel) => { kernel_content == kernel.to_string() @@ -470,7 +561,7 @@ mod tests { }; // check kernel tcp memory limit - let kernel_tcp_content = std::fs::read_to_string(tmp.join(CGROUP_KERNEL_TCP_MEMORY_LIMIT)).expect("read kernel tcp memory limit"); + let kernel_tcp_content = std::fs::read_to_string(tmp.path().join(CGROUP_KERNEL_TCP_MEMORY_LIMIT)).expect("read kernel tcp memory limit"); let kernel_tcp_check = match memory_limits.kernel_tcp() { Some(kernel_tcp) => { kernel_tcp_content == kernel_tcp.to_string() @@ -479,7 +570,7 @@ mod tests { }; // check swappiness - let swappiness_content = std::fs::read_to_string(tmp.join(CGROUP_MEMORY_SWAPPINESS)).expect("read swappiness"); + let swappiness_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_SWAPPINESS)).expect("read swappiness"); let swappiness_check = match memory_limits.swappiness() { Some(swappiness) if swappiness <= 100 => { swappiness_content == swappiness.to_string() @@ -490,8 +581,8 @@ mod tests { }; // check limit and swap - let limit_content = std::fs::read_to_string(tmp.join(CGROUP_MEMORY_LIMIT)).expect("read memory limit"); - let swap_content = std::fs::read_to_string(tmp.join(CGROUP_MEMORY_SWAP_LIMIT)).expect("read swap memory limit"); + let limit_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_LIMIT)).expect("read memory limit"); + let swap_content = std::fs::read_to_string(tmp.path().join(CGROUP_MEMORY_SWAP_LIMIT)).expect("read swap memory limit"); let limit_swap_check = match memory_limits.limit() { Some(limit) => { match memory_limits.swap() { @@ -522,11 +613,11 @@ mod tests { }; // useful for debugging - println!("reservation_check: {:?}", reservation_check); - println!("kernel_check: {:?}", kernel_check); - println!("kernel_tcp_check: {:?}", kernel_tcp_check); - println!("swappiness_check: {:?}", swappiness_check); - println!("limit_swap_check: {:?}", limit_swap_check); + println!("reservation_check: {reservation_check:?}"); + println!("kernel_check: {kernel_check:?}"); + println!("kernel_tcp_check: {kernel_tcp_check:?}"); + println!("swappiness_check: {swappiness_check:?}"); + println!("limit_swap_check: {limit_swap_check:?}"); // combine all the checks reservation_check && kernel_check && kernel_tcp_check && swappiness_check && limit_swap_check @@ -535,33 +626,33 @@ mod tests { #[test] fn test_stat_memory_data() { - let tmp = create_temp_dir("test_stat_memory_data").expect("create test directory"); + let tmp = tempfile::tempdir().unwrap(); set_fixture( - &tmp, - &format!("{}{}", MEMORY_PREFIX, MEMORY_USAGE_IN_BYTES), + tmp.path(), + &format!("{MEMORY_PREFIX}{MEMORY_USAGE_IN_BYTES}"), "1024\n", ) .unwrap(); set_fixture( - &tmp, - &format!("{}{}", MEMORY_PREFIX, MEMORY_MAX_USAGE_IN_BYTES), + tmp.path(), + &format!("{MEMORY_PREFIX}{MEMORY_MAX_USAGE_IN_BYTES}"), "2048\n", ) .unwrap(); set_fixture( - &tmp, - &format!("{}{}", MEMORY_PREFIX, MEMORY_LIMIT_IN_BYTES), + tmp.path(), + &format!("{MEMORY_PREFIX}{MEMORY_LIMIT_IN_BYTES}"), "4096\n", ) .unwrap(); set_fixture( - &tmp, - &format!("{}{}", MEMORY_PREFIX, MEMORY_FAIL_COUNT), + tmp.path(), + &format!("{MEMORY_PREFIX}{MEMORY_FAIL_COUNT}"), "5\n", ) .unwrap(); - let actual = Memory::get_memory_data(&tmp, MEMORY_PREFIX).expect("get cgroup stats"); + let actual = Memory::get_memory_data(tmp.path(), MEMORY_PREFIX).expect("get cgroup stats"); let expected = MemoryData { usage: 1024, max_usage: 2048, @@ -574,25 +665,25 @@ mod tests { #[test] fn test_stat_hierarchy_enabled() { - let tmp = create_temp_dir("test_stat_hierarchy_enabled").expect("create test directory"); - set_fixture(&tmp, MEMORY_USE_HIERARCHY, "1").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), MEMORY_USE_HIERARCHY, "1").unwrap(); - let enabled = Memory::hierarchy_enabled(&tmp).expect("get cgroup stats"); + let enabled = Memory::hierarchy_enabled(tmp.path()).expect("get cgroup stats"); assert!(enabled) } #[test] fn test_stat_hierarchy_disabled() { - let tmp = create_temp_dir("test_stat_hierarchy_disabled").expect("create test directory"); - set_fixture(&tmp, MEMORY_USE_HIERARCHY, "0").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), MEMORY_USE_HIERARCHY, "0").unwrap(); - let enabled = Memory::hierarchy_enabled(&tmp).expect("get cgroup stats"); + let enabled = Memory::hierarchy_enabled(tmp.path()).expect("get cgroup stats"); assert!(!enabled) } #[test] fn test_stat_memory_stats() { - let tmp = create_temp_dir("test_stat_memory_stats").expect("create test directory"); + let tmp = tempfile::tempdir().unwrap(); let content = [ "cache 0", "rss 0", @@ -604,9 +695,9 @@ mod tests { "hierarchical_memsw_limit 9223372036854771712", ] .join("\n"); - set_fixture(&tmp, MEMORY_STAT, &content).unwrap(); + set_fixture(tmp.path(), MEMORY_STAT, &content).unwrap(); - let actual = Memory::get_stat_data(&tmp).expect("get cgroup data"); + let actual = Memory::get_stat_data(tmp.path()).expect("get cgroup data"); let expected: HashMap = [ ("cache".to_owned(), 0), ("rss".to_owned(), 0), diff --git a/crates/libcgroups/src/v1/mod.rs b/crates/libcgroups/src/v1/mod.rs index e80e9a01e..59405acd2 100644 --- a/crates/libcgroups/src/v1/mod.rs +++ b/crates/libcgroups/src/v1/mod.rs @@ -14,6 +14,5 @@ mod network_priority; pub mod perf_event; mod pids; pub mod util; -pub use controller::Controller; pub use controller_type::ControllerType; pub use manager::Manager; diff --git a/crates/libcgroups/src/v1/network_classifier.rs b/crates/libcgroups/src/v1/network_classifier.rs index 6c27d2abd..c9a272103 100644 --- a/crates/libcgroups/src/v1/network_classifier.rs +++ b/crates/libcgroups/src/v1/network_classifier.rs @@ -1,22 +1,21 @@ use std::path::Path; -use anyhow::{Context, Result}; - -use super::Controller; -use crate::common::{self, ControllerOpt}; +use crate::common::{self, ControllerOpt, WrappedIoError}; use oci_spec::runtime::LinuxNetwork; +use super::controller::Controller; + pub struct NetworkClassifier {} impl Controller for NetworkClassifier { + type Error = WrappedIoError; type Resource = LinuxNetwork; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply NetworkClassifier cgroup config"); + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply NetworkClassifier cgroup config"); if let Some(network) = Self::needs_to_handle(controller_opt) { - Self::apply(cgroup_root, network) - .context("failed to apply network classifier resource restrictions")?; + Self::apply(cgroup_root, network)?; } Ok(()) @@ -28,7 +27,7 @@ impl Controller for NetworkClassifier { } impl NetworkClassifier { - fn apply(root_path: &Path, network: &LinuxNetwork) -> Result<()> { + fn apply(root_path: &Path, network: &LinuxNetwork) -> Result<(), WrappedIoError> { if let Some(class_id) = network.class_id() { common::write_cgroup_file(root_path.join("net_cls.classid"), class_id)?; } @@ -40,14 +39,13 @@ impl NetworkClassifier { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::LinuxNetworkBuilder; #[test] fn test_apply_network_classifier() { - let tmp = create_temp_dir("test_apply_network_classifier") - .expect("create temp directory for test"); - set_fixture(&tmp, "net_cls.classid", "0").expect("set fixture for classID"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), "net_cls.classid", "0").expect("set fixture for classID"); let id = 0x100001u32; let network = LinuxNetworkBuilder::default() @@ -56,10 +54,10 @@ mod tests { .build() .unwrap(); - NetworkClassifier::apply(&tmp, &network).expect("apply network classID"); + NetworkClassifier::apply(tmp.path(), &network).expect("apply network classID"); - let content = - std::fs::read_to_string(tmp.join("net_cls.classid")).expect("Read classID contents"); + let content = std::fs::read_to_string(tmp.path().join("net_cls.classid")) + .expect("Read classID contents"); assert_eq!(id.to_string(), content); } } diff --git a/crates/libcgroups/src/v1/network_priority.rs b/crates/libcgroups/src/v1/network_priority.rs index c55533378..fd5a3bcd1 100644 --- a/crates/libcgroups/src/v1/network_priority.rs +++ b/crates/libcgroups/src/v1/network_priority.rs @@ -1,22 +1,21 @@ use std::path::Path; -use anyhow::{Context, Result}; - -use super::Controller; -use crate::common::{self, ControllerOpt}; +use crate::common::{self, ControllerOpt, WrappedIoError}; use oci_spec::runtime::LinuxNetwork; +use super::controller::Controller; + pub struct NetworkPriority {} impl Controller for NetworkPriority { + type Error = WrappedIoError; type Resource = LinuxNetwork; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply NetworkPriority cgroup config"); + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply NetworkPriority cgroup config"); if let Some(network) = Self::needs_to_handle(controller_opt) { - Self::apply(cgroup_root, network) - .context("failed to apply network priority resource restrictions")?; + Self::apply(cgroup_root, network)?; } Ok(()) @@ -28,7 +27,7 @@ impl Controller for NetworkPriority { } impl NetworkPriority { - fn apply(root_path: &Path, network: &LinuxNetwork) -> Result<()> { + fn apply(root_path: &Path, network: &LinuxNetwork) -> Result<(), WrappedIoError> { if let Some(ni_priorities) = network.priorities() { let priorities: String = ni_priorities.iter().map(|p| p.to_string()).collect(); common::write_cgroup_file_str(root_path.join("net_prio.ifpriomap"), priorities.trim())?; @@ -41,14 +40,13 @@ impl NetworkPriority { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::{LinuxInterfacePriorityBuilder, LinuxNetworkBuilder}; #[test] fn test_apply_network_priorites() { - let tmp = create_temp_dir("test_apply_network_priorites") - .expect("create temp directory for test"); - set_fixture(&tmp, "net_prio.ifpriomap", "").expect("set fixture for priority map"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), "net_prio.ifpriomap", "").expect("set fixture for priority map"); let priorities = vec![ LinuxInterfacePriorityBuilder::default() .name("a") @@ -67,10 +65,10 @@ mod tests { .build() .unwrap(); - NetworkPriority::apply(&tmp, &network).expect("apply network priorities"); + NetworkPriority::apply(tmp.path(), &network).expect("apply network priorities"); - let content = - std::fs::read_to_string(tmp.join("net_prio.ifpriomap")).expect("Read classID contents"); + let content = std::fs::read_to_string(tmp.path().join("net_prio.ifpriomap")) + .expect("Read classID contents"); assert_eq!(priorities_string.trim(), content); } } diff --git a/crates/libcgroups/src/v1/perf_event.rs b/crates/libcgroups/src/v1/perf_event.rs index f9654d473..cd220d763 100644 --- a/crates/libcgroups/src/v1/perf_event.rs +++ b/crates/libcgroups/src/v1/perf_event.rs @@ -1,14 +1,15 @@ -use super::Controller; -use crate::common::ControllerOpt; -use anyhow::Result; +use crate::common::{ControllerOpt, WrappedIoError}; use std::path::Path; +use super::controller::Controller; + pub struct PerfEvent {} impl Controller for PerfEvent { + type Error = WrappedIoError; type Resource = (); - fn apply(_controller_opt: &ControllerOpt, _cgroup_root: &Path) -> Result<()> { + fn apply(_controller_opt: &ControllerOpt, _cgroup_root: &Path) -> Result<(), Self::Error> { Ok(()) } //no need to handle any case @@ -28,13 +29,13 @@ mod tests { #[test] fn test_add_task() { - let (tmp, procs) = setup("test_perf_event_add_task", CGROUP_PROCS); + let (tmp, procs) = setup(CGROUP_PROCS); let pid = Pid::from_raw(1000); - PerfEvent::add_task(pid, &tmp).expect("apply perf_event"); + PerfEvent::add_task(pid, tmp.path()).expect("apply perf_event"); let content = fs::read_to_string(procs) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_PROCS)); + .unwrap_or_else(|_| panic!("read {CGROUP_PROCS} file content")); assert_eq!(content, "1000"); } } diff --git a/crates/libcgroups/src/v1/pids.rs b/crates/libcgroups/src/v1/pids.rs index cd7ffb18c..de040760a 100644 --- a/crates/libcgroups/src/v1/pids.rs +++ b/crates/libcgroups/src/v1/pids.rs @@ -1,27 +1,27 @@ use std::path::Path; -use anyhow::{Context, Result}; - -use super::Controller; use crate::{ - common::{self, ControllerOpt}, - stats::{self, PidStats, StatsProvider}, + common::{self, ControllerOpt, WrappedIoError}, + stats::{self, PidStats, PidStatsError, StatsProvider}, }; use oci_spec::runtime::LinuxPids; +use super::controller::Controller; + // Contains the maximum allowed number of active pids const CGROUP_PIDS_MAX: &str = "pids.max"; pub struct Pids {} impl Controller for Pids { + type Error = WrappedIoError; type Resource = LinuxPids; - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply pids cgroup config"); + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply pids cgroup config"); if let Some(pids) = &controller_opt.resources.pids() { - Self::apply(cgroup_root, pids).context("failed to apply pids resource restrictions")?; + Self::apply(cgroup_root, pids)?; } Ok(()) @@ -33,15 +33,16 @@ impl Controller for Pids { } impl StatsProvider for Pids { + type Error = PidStatsError; type Stats = PidStats; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { stats::pid_stats(cgroup_path) } } impl Pids { - fn apply(root_path: &Path, pids: &LinuxPids) -> Result<()> { + fn apply(root_path: &Path, pids: &LinuxPids) -> Result<(), WrappedIoError> { let limit = if pids.limit() > 0 { pids.limit().to_string() } else { @@ -56,7 +57,7 @@ impl Pids { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::LinuxPidsBuilder; // Contains the current number of active pids @@ -64,38 +65,38 @@ mod tests { #[test] fn test_set_pids() { - let tmp = create_temp_dir("test_set_pids").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_PIDS_MAX, "1000").expect("Set fixture for 1000 pids"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_PIDS_MAX, "1000").expect("Set fixture for 1000 pids"); let pids = LinuxPidsBuilder::default().limit(1000).build().unwrap(); - Pids::apply(&tmp, &pids).expect("apply pids"); + Pids::apply(tmp.path(), &pids).expect("apply pids"); let content = - std::fs::read_to_string(tmp.join(CGROUP_PIDS_MAX)).expect("Read pids contents"); + std::fs::read_to_string(tmp.path().join(CGROUP_PIDS_MAX)).expect("Read pids contents"); assert_eq!(pids.limit().to_string(), content); } #[test] fn test_set_pids_max() { - let tmp = create_temp_dir("test_set_pids_max").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_PIDS_MAX, "0").expect("set fixture for 0 pids"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_PIDS_MAX, "0").expect("set fixture for 0 pids"); let pids = LinuxPidsBuilder::default().limit(0).build().unwrap(); - Pids::apply(&tmp, &pids).expect("apply pids"); + Pids::apply(tmp.path(), &pids).expect("apply pids"); let content = - std::fs::read_to_string(tmp.join(CGROUP_PIDS_MAX)).expect("Read pids contents"); + std::fs::read_to_string(tmp.path().join(CGROUP_PIDS_MAX)).expect("Read pids contents"); assert_eq!("max".to_string(), content); } #[test] fn test_stat_pids() { - let tmp = create_temp_dir("test_stat_pids").expect("create temp dir for test"); - set_fixture(&tmp, CGROUP_PIDS_CURRENT, "5\n").unwrap(); - set_fixture(&tmp, CGROUP_PIDS_MAX, "30\n").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_PIDS_CURRENT, "5\n").unwrap(); + set_fixture(tmp.path(), CGROUP_PIDS_MAX, "30\n").unwrap(); - let stats = Pids::stats(&tmp).expect("get cgroup stats"); + let stats = Pids::stats(tmp.path()).expect("get cgroup stats"); assert_eq!(stats.current, 5); assert_eq!(stats.limit, 30); @@ -103,11 +104,11 @@ mod tests { #[test] fn test_stat_pids_max() { - let tmp = create_temp_dir("test_stat_pids_max").expect("create temp dir for test"); - set_fixture(&tmp, CGROUP_PIDS_CURRENT, "5\n").unwrap(); - set_fixture(&tmp, CGROUP_PIDS_MAX, "max\n").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_PIDS_CURRENT, "5\n").unwrap(); + set_fixture(tmp.path(), CGROUP_PIDS_MAX, "max\n").unwrap(); - let stats = Pids::stats(&tmp).expect("get cgroup stats"); + let stats = Pids::stats(tmp.path()).expect("get cgroup stats"); assert_eq!(stats.current, 5); assert_eq!(stats.limit, 0); diff --git a/crates/libcgroups/src/v1/util.rs b/crates/libcgroups/src/v1/util.rs index acc7bb3f1..050967a7a 100644 --- a/crates/libcgroups/src/v1/util.rs +++ b/crates/libcgroups/src/v1/util.rs @@ -1,16 +1,26 @@ use std::{collections::HashMap, path::PathBuf}; -use anyhow::{anyhow, Context, Result}; -use procfs::process::Process; +use procfs::{process::Process, ProcError}; use super::{controller_type::CONTROLLERS, ControllerType}; +#[derive(thiserror::Error, Debug)] +pub enum V1MountPointError { + #[error("failed to read process info from /proc/self: {0}")] + ReadSelf(ProcError), + #[error("failed to get mountinfo: {0}")] + MountInfo(ProcError), + #[error("could not find mountpoint for {subsystem}")] + NotFound { subsystem: ControllerType }, +} + /// List all cgroup v1 subsystem mount points on the system. This can include unsupported /// subsystems, comounted controllers and named hierarchies. -pub fn list_subsystem_mount_points() -> Result> { - Ok(Process::myself()? +pub fn list_subsystem_mount_points() -> Result, V1MountPointError> { + Ok(Process::myself() + .map_err(V1MountPointError::ReadSelf)? .mountinfo() - .context("failed to get mountinfo")? + .map_err(V1MountPointError::MountInfo)? .into_iter() .filter(|m| m.fs_type == "cgroup") .map(|m| m.mount_point) @@ -18,7 +28,8 @@ pub fn list_subsystem_mount_points() -> Result> { } /// List the mount points of all currently supported cgroup subsystems. -pub fn list_supported_mount_points() -> Result> { +pub fn list_supported_mount_points() -> Result, V1MountPointError> +{ let mut mount_paths = HashMap::with_capacity(CONTROLLERS.len()); for controller in CONTROLLERS { @@ -30,38 +41,41 @@ pub fn list_supported_mount_points() -> Result> Ok(mount_paths) } -pub fn get_subsystem_mount_point(subsystem: &ControllerType) -> Result { - let subsystem = subsystem.to_string(); - Process::myself()? +pub fn get_subsystem_mount_point(subsystem: &ControllerType) -> Result { + let subsystem_name = subsystem.to_string(); + Process::myself() + .map_err(V1MountPointError::ReadSelf)? .mountinfo() - .context("failed to get mountinfo")? + .map_err(V1MountPointError::MountInfo)? .into_iter() .find(|m| { if m.fs_type == "cgroup" { // Some systems mount net_prio and net_cls in the same directory - // other systems mount them in their own diretories. This + // other systems mount them in their own directories. This // should handle both cases. - if subsystem == "net_cls" { + if subsystem_name == "net_cls" { return m.mount_point.ends_with("net_cls,net_prio") || m.mount_point.ends_with("net_prio,net_cls") || m.mount_point.ends_with("net_cls"); - } else if subsystem == "net_prio" { + } else if subsystem_name == "net_prio" { return m.mount_point.ends_with("net_cls,net_prio") || m.mount_point.ends_with("net_prio,net_cls") || m.mount_point.ends_with("net_prio"); } - if subsystem == "cpu" { + if subsystem_name == "cpu" { return m.mount_point.ends_with("cpu,cpuacct") || m.mount_point.ends_with("cpu"); } - if subsystem == "cpuacct" { + if subsystem_name == "cpuacct" { return m.mount_point.ends_with("cpu,cpuacct") || m.mount_point.ends_with("cpuacct"); } } - m.mount_point.ends_with(&subsystem) + m.mount_point.ends_with(&subsystem_name) }) .map(|m| m.mount_point) - .ok_or_else(|| anyhow!("could not find mountpoint for {}", subsystem)) + .ok_or(V1MountPointError::NotFound { + subsystem: *subsystem, + }) } diff --git a/crates/libcgroups/src/v2/controller.rs b/crates/libcgroups/src/v2/controller.rs index 7287c1eb3..e88b45887 100644 --- a/crates/libcgroups/src/v2/controller.rs +++ b/crates/libcgroups/src/v2/controller.rs @@ -1,8 +1,9 @@ -use anyhow::Result; use std::path::Path; use crate::common::ControllerOpt; -pub trait Controller { - fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<()>; +pub(super) trait Controller { + type Error; + + fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<(), Self::Error>; } diff --git a/crates/libcgroups/src/v2/controller_type.rs b/crates/libcgroups/src/v2/controller_type.rs index 33672b915..87830de84 100644 --- a/crates/libcgroups/src/v2/controller_type.rs +++ b/crates/libcgroups/src/v2/controller_type.rs @@ -21,7 +21,7 @@ impl Display for ControllerType { Self::Pids => "pids", }; - write!(f, "{}", print) + write!(f, "{print}") } } @@ -49,7 +49,7 @@ impl Display for PseudoControllerType { Self::Unified => "unified", }; - write!(f, "{}", print) + write!(f, "{print}") } } diff --git a/crates/libcgroups/src/v2/cpu.rs b/crates/libcgroups/src/v2/cpu.rs index a94ef487f..bf0f444ba 100644 --- a/crates/libcgroups/src/v2/cpu.rs +++ b/crates/libcgroups/src/v2/cpu.rs @@ -1,8 +1,11 @@ -use anyhow::{bail, Context, Result}; -use std::{borrow::Cow, path::Path}; +use std::{ + borrow::Cow, + num::ParseIntError, + path::{Path, PathBuf}, +}; use crate::{ - common::{self, ControllerOpt}, + common::{self, ControllerOpt, WrappedIoError}, stats::{self, CpuStats, StatsProvider}, }; @@ -20,32 +23,64 @@ const MAX_CPU_WEIGHT: u64 = 10000; const CPU_STAT: &str = "cpu.stat"; const CPU_PSI: &str = "cpu.pressure"; +#[derive(thiserror::Error, Debug)] +pub enum V2CpuControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("realtime is not supported on v2 yet")] + RealtimeV2, +} + pub struct Cpu {} impl Controller for Cpu { - fn apply(controller_opt: &ControllerOpt, path: &Path) -> Result<()> { + type Error = V2CpuControllerError; + + fn apply(controller_opt: &ControllerOpt, path: &Path) -> Result<(), Self::Error> { if let Some(cpu) = &controller_opt.resources.cpu() { - Self::apply(path, cpu).context("failed to apply cpu resource restrictions")?; + Self::apply(path, cpu)?; } Ok(()) } } +#[derive(thiserror::Error, Debug)] +pub enum V2CpuStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("failed parsing value {value} for field {field} in {path}: {err}")] + ParseField { + value: String, + field: String, + path: PathBuf, + err: ParseIntError, + }, +} + impl StatsProvider for Cpu { + type Error = V2CpuStatsError; type Stats = CpuStats; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let mut stats = CpuStats::default(); + let stats_path = cgroup_path.join(CPU_STAT); - let stat_content = common::read_cgroup_file(cgroup_path.join(CPU_STAT))?; + let stat_content = common::read_cgroup_file(&stats_path)?; for entry in stat_content.lines() { let parts: Vec<&str> = entry.split_ascii_whitespace().collect(); if parts.len() != 2 { continue; } - let value = parts[1].parse()?; + let value = parts[1] + .parse() + .map_err(|err| V2CpuStatsError::ParseField { + value: parts[1].into(), + field: parts[0].into(), + path: stats_path.clone(), + err, + })?; match parts[0] { "usage_usec" => stats.usage.usage_total = value, "user_usec" => stats.usage.usage_user = value, @@ -54,16 +89,15 @@ impl StatsProvider for Cpu { } } - stats.psi = - stats::psi_stats(&cgroup_path.join(CPU_PSI)).context("could not read cpu psi")?; + stats.psi = stats::psi_stats(&cgroup_path.join(CPU_PSI))?; Ok(stats) } } impl Cpu { - fn apply(path: &Path, cpu: &LinuxCpu) -> Result<()> { + fn apply(path: &Path, cpu: &LinuxCpu) -> Result<(), V2CpuControllerError> { if Self::is_realtime_requested(cpu) { - bail!("realtime is not supported on cgroup v2 yet"); + return Err(V2CpuControllerError::RealtimeV2); } if let Some(mut shares) = cpu.shares() { @@ -79,11 +113,9 @@ impl Cpu { (None, Some(period)) => Self::create_period_only_value(&cpu_max_file, period)?, (Some(quota), None) if quota > 0 => Some(quota.to_string().into()), (Some(quota), None) if quota <= 0 => Some(UNRESTRICTED_QUOTA.into()), - (Some(quota), Some(period)) if quota > 0 => { - Some(format!("{} {}", quota, period).into()) - } + (Some(quota), Some(period)) if quota > 0 => Some(format!("{quota} {period}").into()), (Some(quota), Some(period)) if quota <= 0 => { - Some(format!("{} {}", UNRESTRICTED_QUOTA, period).into()) + Some(format!("{UNRESTRICTED_QUOTA} {period}").into()) } _ => None, }; @@ -112,7 +144,7 @@ impl Cpu { return 0; } - let weight = 1 + ((shares - 2) * 9999) / 262142; + let weight = 1 + ((shares.saturating_sub(2)) * 9999) / 262142; weight.min(MAX_CPU_WEIGHT) } @@ -128,10 +160,13 @@ impl Cpu { false } - fn create_period_only_value(cpu_max_file: &Path, period: u64) -> Result>> { + fn create_period_only_value( + cpu_max_file: &Path, + period: u64, + ) -> Result>, V2CpuControllerError> { let old_cpu_max = common::read_cgroup_file(cpu_max_file)?; if let Some(old_quota) = old_cpu_max.split_whitespace().next() { - return Ok(Some(format!("{} {}", old_quota, period).into())); + return Ok(Some(format!("{old_quota} {period}").into())); } Ok(None) } @@ -142,7 +177,7 @@ mod tests { use super::*; use crate::{ stats::CpuUsage, - test::{create_temp_dir, set_fixture, setup}, + test::{set_fixture, setup}, }; use oci_spec::runtime::LinuxCpuBuilder; use std::fs; @@ -150,17 +185,17 @@ mod tests { #[test] fn test_set_valid_shares() { // arrange - let (tmp, weight) = setup("test_set_shares", CGROUP_CPU_WEIGHT); - let _ = set_fixture(&tmp, CGROUP_CPU_MAX, "") - .unwrap_or_else(|_| panic!("set test fixture for {}", CGROUP_CPU_MAX)); + let (tmp, weight) = setup(CGROUP_CPU_WEIGHT); + let _ = set_fixture(tmp.path(), CGROUP_CPU_MAX, "") + .unwrap_or_else(|_| panic!("set test fixture for {CGROUP_CPU_MAX}")); let cpu = LinuxCpuBuilder::default().shares(22000u64).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(weight) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_WEIGHT)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_WEIGHT} file content")); assert_eq!(content, 840.to_string()); } @@ -179,46 +214,46 @@ mod tests { return; } - let (tmp, max) = setup("test_set_cpu_idle", CGROUP_CPU_IDLE); + let (tmp, max) = setup(CGROUP_CPU_IDLE); let cpu = LinuxCpuBuilder::default().idle(IDLE).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_IDLE)); - assert_eq!(content, format!("{}", IDLE)) + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_IDLE} file content")); + assert_eq!(content, format!("{IDLE}")) } #[test] fn test_set_positive_quota() { // arrange const QUOTA: i64 = 200000; - let (tmp, max) = setup("test_set_positive_quota", CGROUP_CPU_MAX); + let (tmp, max) = setup(CGROUP_CPU_MAX); let cpu = LinuxCpuBuilder::default().quota(QUOTA).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_MAX)); - assert_eq!(content, format!("{}", QUOTA)) + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_MAX} file content")); + assert_eq!(content, format!("{QUOTA}")) } #[test] fn test_set_negative_quota() { // arrange - let (tmp, max) = setup("test_set_negative_quota", CGROUP_CPU_MAX); + let (tmp, max) = setup(CGROUP_CPU_MAX); let cpu = LinuxCpuBuilder::default().quota(-500).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_MAX)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_MAX} file content")); assert_eq!(content, UNRESTRICTED_QUOTA) } @@ -227,17 +262,17 @@ mod tests { // arrange const QUOTA: u64 = 50000; const PERIOD: u64 = 100000; - let (tmp, max) = setup("test_set_positive_period", CGROUP_CPU_MAX); + let (tmp, max) = setup(CGROUP_CPU_MAX); common::write_cgroup_file(&max, QUOTA).unwrap(); let cpu = LinuxCpuBuilder::default().period(PERIOD).build().unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_MAX)); - assert_eq!(content, format!("{} {}", QUOTA, PERIOD)) + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_MAX} file content")); + assert_eq!(content, format!("{QUOTA} {PERIOD}")) } #[test] @@ -245,7 +280,7 @@ mod tests { // arrange const QUOTA: i64 = 200000; const PERIOD: u64 = 100000; - let (tmp, max) = setup("test_set_quota_and_period", CGROUP_CPU_MAX); + let (tmp, max) = setup(CGROUP_CPU_MAX); let cpu = LinuxCpuBuilder::default() .quota(QUOTA) .period(PERIOD) @@ -253,26 +288,25 @@ mod tests { .unwrap(); // act - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); // assert let content = fs::read_to_string(max) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPU_MAX)); - assert_eq!(content, format!("{} {}", QUOTA, PERIOD)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPU_MAX} file content")); + assert_eq!(content, format!("{QUOTA} {PERIOD}")); } #[test] fn test_realtime_runtime_not_supported() { // arrange - let tmp = create_temp_dir("test_realtime_runtime_not_supported") - .expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); let cpu = LinuxCpuBuilder::default() .realtime_runtime(5) .build() .unwrap(); // act - let result = Cpu::apply(&tmp, &cpu); + let result = Cpu::apply(tmp.path(), &cpu); // assert assert!( @@ -284,15 +318,14 @@ mod tests { #[test] fn test_realtime_period_not_supported() { // arrange - let tmp = create_temp_dir("test_realtime_period_not_supported") - .expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); let cpu = LinuxCpuBuilder::default() .realtime_period(5u64) .build() .unwrap(); // act - let result = Cpu::apply(&tmp, &cpu); + let result = Cpu::apply(tmp.path(), &cpu); // assert assert!( @@ -303,12 +336,12 @@ mod tests { #[test] fn test_stat_usage() { - let tmp = create_temp_dir("test_stat_usage").expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); let content = ["usage_usec 7730", "user_usec 4387", "system_usec 3498"].join("\n"); - set_fixture(&tmp, CPU_STAT, &content).expect("create stat file"); - set_fixture(&tmp, CPU_PSI, "").expect("create psi file"); + set_fixture(tmp.path(), CPU_STAT, &content).expect("create stat file"); + set_fixture(tmp.path(), CPU_PSI, "").expect("create psi file"); - let actual = Cpu::stats(&tmp).expect("get cgroup stats"); + let actual = Cpu::stats(tmp.path()).expect("get cgroup stats"); let expected = CpuUsage { usage_total: 7730, usage_user: 4387, @@ -322,10 +355,10 @@ mod tests { #[test] fn test_burst() { let expected = 100000u64; - let (tmp, burst_file) = setup("test_burst", CGROUP_CPU_BURST); + let (tmp, burst_file) = setup(CGROUP_CPU_BURST); let cpu = LinuxCpuBuilder::default().burst(expected).build().unwrap(); - Cpu::apply(&tmp, &cpu).expect("apply cpu"); + Cpu::apply(tmp.path(), &cpu).expect("apply cpu"); let actual = fs::read_to_string(burst_file).expect("read burst file"); assert_eq!(actual, expected.to_string()); diff --git a/crates/libcgroups/src/v2/cpuset.rs b/crates/libcgroups/src/v2/cpuset.rs index 4e6755d66..ba93789b5 100644 --- a/crates/libcgroups/src/v2/cpuset.rs +++ b/crates/libcgroups/src/v2/cpuset.rs @@ -1,7 +1,6 @@ -use anyhow::{Context, Result}; use std::path::Path; -use crate::common::{self, ControllerOpt}; +use crate::common::{self, ControllerOpt, WrappedIoError}; use oci_spec::runtime::LinuxCpu; use super::controller::Controller; @@ -12,10 +11,11 @@ const CGROUP_CPUSET_MEMS: &str = "cpuset.mems"; pub struct CpuSet {} impl Controller for CpuSet { - fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<()> { + type Error = WrappedIoError; + + fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<(), Self::Error> { if let Some(cpuset) = &controller_opt.resources.cpu() { - Self::apply(cgroup_path, cpuset) - .context("failed to apply cpuset resource restrictions")?; + Self::apply(cgroup_path, cpuset)?; } Ok(()) @@ -23,7 +23,7 @@ impl Controller for CpuSet { } impl CpuSet { - fn apply(path: &Path, cpuset: &LinuxCpu) -> Result<()> { + fn apply(path: &Path, cpuset: &LinuxCpu) -> Result<(), WrappedIoError> { if let Some(cpus) = &cpuset.cpus() { common::write_cgroup_file_str(path.join(CGROUP_CPUSET_CPUS), cpus)?; } @@ -47,36 +47,36 @@ mod tests { #[test] fn test_set_cpus() { // arrange - let (tmp, cpus) = setup("test_set_cpus", CGROUP_CPUSET_CPUS); + let (tmp, cpus) = setup(CGROUP_CPUSET_CPUS); let cpuset = LinuxCpuBuilder::default() .cpus("1-3".to_owned()) .build() .unwrap(); // act - CpuSet::apply(&tmp, &cpuset).expect("apply cpuset"); + CpuSet::apply(tmp.path(), &cpuset).expect("apply cpuset"); // assert let content = fs::read_to_string(cpus) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPUSET_CPUS)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPUSET_CPUS} file content")); assert_eq!(content, "1-3"); } #[test] fn test_set_mems() { // arrange - let (tmp, mems) = setup("test_set_mems", CGROUP_CPUSET_MEMS); + let (tmp, mems) = setup(CGROUP_CPUSET_MEMS); let cpuset = LinuxCpuBuilder::default() .mems("1-3".to_owned()) .build() .unwrap(); // act - CpuSet::apply(&tmp, &cpuset).expect("apply cpuset"); + CpuSet::apply(tmp.path(), &cpuset).expect("apply cpuset"); // assert let content = fs::read_to_string(mems) - .unwrap_or_else(|_| panic!("read {} file content", CGROUP_CPUSET_MEMS)); + .unwrap_or_else(|_| panic!("read {CGROUP_CPUSET_MEMS} file content")); assert_eq!(content, "1-3"); } } diff --git a/crates/libcgroups/src/v2/devices/bpf.rs b/crates/libcgroups/src/v2/devices/bpf.rs index ab9330ae9..db53a886f 100644 --- a/crates/libcgroups/src/v2/devices/bpf.rs +++ b/crates/libcgroups/src/v2/devices/bpf.rs @@ -4,10 +4,17 @@ pub struct ProgramInfo { pub fd: i32, } +#[derive(thiserror::Error, Debug)] +pub enum BpfError { + #[error(transparent)] + Errno(#[from] errno::Errno), + #[error("Failed to increase rlimit")] + FailedToIncreaseRLimit, +} + #[cfg_attr(test, automock)] pub mod prog { use super::ProgramInfo; - use anyhow::{bail, Result}; use std::os::unix::io::RawFd; use std::ptr; @@ -31,12 +38,12 @@ pub mod prog { bpf_prog_attach, bpf_prog_detach2, bpf_prog_get_fd_by_id, bpf_prog_load, bpf_prog_query, }; - pub fn load(license: &str, insns: &[u8]) -> Result { + pub fn load(license: &str, insns: &[u8]) -> Result { let insns_cnt = insns.len() / std::mem::size_of::(); let insns = insns as *const _ as *const bpf_insn; - let opts = libbpf_sys::bpf_prog_load_opts { + let mut opts = libbpf_sys::bpf_prog_load_opts { kern_version: 0, - log_buf: ptr::null_mut::(), + log_buf: ptr::null_mut::<::std::os::raw::c_char>(), log_size: 0, ..Default::default() }; @@ -44,11 +51,11 @@ pub mod prog { let prog_fd = unsafe { bpf_prog_load( BPF_PROG_TYPE_CGROUP_DEVICE, - ptr::null::(), - license as *const _ as *const i8, + ptr::null::<::std::os::raw::c_char>(), + license as *const _ as *const ::std::os::raw::c_char, insns, insns_cnt as u64, - &opts, + &mut opts as *mut libbpf_sys::bpf_prog_load_opts, ) }; @@ -59,7 +66,7 @@ pub mod prog { } /// Given a fd for a cgroup, collect the programs associated with it - pub fn query(cgroup_fd: RawFd) -> Result> { + pub fn query(cgroup_fd: RawFd) -> Result, super::BpfError> { let mut prog_ids: Vec = vec![0_u32; 64]; let mut attach_flags = 0_u32; for _ in 0..10 { @@ -99,7 +106,7 @@ pub mod prog { #[allow(unused_unsafe)] let prog_fd = unsafe { bpf_prog_get_fd_by_id(*prog_id) }; if prog_fd < 0 { - log::debug!("bpf_prog_get_fd_by_id failed: {}", errno::errno()); + tracing::debug!("bpf_prog_get_fd_by_id failed: {}", errno::errno()); continue; } prog_fds.push(ProgramInfo { @@ -110,7 +117,7 @@ pub mod prog { Ok(prog_fds) } - pub fn detach2(prog_fd: RawFd, cgroup_fd: RawFd) -> Result<()> { + pub fn detach2(prog_fd: RawFd, cgroup_fd: RawFd) -> Result<(), super::BpfError> { #[allow(unused_unsafe)] let ret = unsafe { bpf_prog_detach2(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE) }; if ret != 0 { @@ -119,7 +126,7 @@ pub mod prog { Ok(()) } - pub fn attach(prog_fd: RawFd, cgroup_fd: RawFd) -> Result<()> { + pub fn attach(prog_fd: RawFd, cgroup_fd: RawFd) -> Result<(), super::BpfError> { #[allow(unused_unsafe)] let ret = unsafe { bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI) }; @@ -130,7 +137,7 @@ pub mod prog { Ok(()) } - pub fn bump_memlock_rlimit() -> Result<()> { + pub fn bump_memlock_rlimit() -> Result<(), super::BpfError> { let rlimit = rlimit { rlim_cur: 128 << 20, rlim_max: 128 << 20, @@ -138,7 +145,7 @@ pub mod prog { #[allow(unused_unsafe)] if unsafe { setrlimit(RLIMIT_MEMLOCK, &rlimit) } != 0 { - bail!("Failed to increase rlimit"); + return Err(super::BpfError::FailedToIncreaseRLimit); } Ok(()) diff --git a/crates/libcgroups/src/v2/devices/controller.rs b/crates/libcgroups/src/v2/devices/controller.rs index f3592a883..a7e5b9755 100644 --- a/crates/libcgroups/src/v2/devices/controller.rs +++ b/crates/libcgroups/src/v2/devices/controller.rs @@ -1,8 +1,8 @@ use std::os::unix::io::AsRawFd; use std::path::Path; -use anyhow::Result; - +use super::bpf::BpfError; +use super::program::ProgramError; use super::*; use nix::fcntl::OFlag; use nix::sys::stat::Mode; @@ -21,8 +21,23 @@ const LICENSE: &str = "Apache"; pub struct Devices {} +#[derive(thiserror::Error, Debug)] +pub enum DevicesControllerError { + #[error("bpf error: {0}")] + Bpf(#[from] BpfError), + #[error("nix error: {0}")] + Nix(#[from] nix::Error), + #[error("program error: {0}")] + Program(#[from] ProgramError), +} + impl Controller for Devices { - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { + type Error = DevicesControllerError; + + fn apply( + controller_opt: &ControllerOpt, + cgroup_root: &Path, + ) -> Result<(), DevicesControllerError> { #[cfg(not(feature = "cgroupsv2_devices"))] return Ok(()); @@ -35,8 +50,8 @@ impl Devices { pub fn apply_devices( cgroup_root: &Path, linux_devices: &Option>, - ) -> Result<()> { - log::debug!("Apply Devices cgroup config"); + ) -> Result<(), DevicesControllerError> { + tracing::debug!("Apply Devices cgroup config"); // FIXME: should we start as "deny all"? let mut emulator = emulator::Emulator::with_default_allow(false); @@ -44,8 +59,8 @@ impl Devices { // FIXME: apply user-defined and default rules in which order? if let Some(devices) = linux_devices { for d in devices { - log::debug!("apply user defined rule: {:?}", d); - emulator.add_rule(d)?; + tracing::debug!("apply user defined rule: {:?}", d); + emulator.add_rule(d); } } @@ -55,8 +70,8 @@ impl Devices { ] .concat() { - log::debug!("apply default rule: {:?}", d); - emulator.add_rule(&d)?; + tracing::debug!("apply default rule: {:?}", d); + emulator.add_rule(&d); } let prog = program::Program::from_rules(&emulator.rules, emulator.default_allow)?; @@ -71,7 +86,7 @@ impl Devices { // 2. attach this program (not use BPF_F_REPLACE, see below) // 3. detach all programs of 1 // - // runc will use BPF_F_REPLACE to replace currently attached progam if: + // runc will use BPF_F_REPLACE to replace currently attached program if: // 1. BPF_F_REPLACE is supported by kernel // 2. there is exactly one attached program // https://github.com/opencontainers/runc/blob/8e6871a3b14bb74e0ef358aca3b9f8f9cb80f041/libcontainer/cgroups/ebpf/ebpf_linux.go#L165 @@ -114,7 +129,7 @@ mod tests { #[serial(bpf)] // mock contexts are shared fn test_apply_devices() { // arrange - let (tmp, _) = setup("test_apply_devices", "some.value"); + let (tmp, _) = setup("some.value"); let a_type = LinuxDeviceCgroupBuilder::default() .typ(LinuxDeviceType::A) .build() @@ -136,14 +151,14 @@ mod tests { detach2.expect().never(); // act - Devices::apply_devices(&tmp, &Some(vec![a_type])).expect("Could not apply devices"); + Devices::apply_devices(tmp.path(), &Some(vec![a_type])).expect("Could not apply devices"); } #[test] #[serial(bpf)] // mock contexts are shared fn test_existing_programs() { // arrange - let (tmp, _) = setup("test_existing_programs", "some.value"); + let (tmp, _) = setup("some.value"); let a_type = LinuxDeviceCgroupBuilder::default() .typ(LinuxDeviceType::A) .build() @@ -172,6 +187,6 @@ mod tests { detach2.expect().once().returning(|_, _| Ok(())); // act - Devices::apply_devices(&tmp, &Some(vec![a_type])).expect("Could not apply devices"); + Devices::apply_devices(tmp.path(), &Some(vec![a_type])).expect("Could not apply devices"); } } diff --git a/crates/libcgroups/src/v2/devices/emulator.rs b/crates/libcgroups/src/v2/devices/emulator.rs index 16757126c..b99ab9a25 100644 --- a/crates/libcgroups/src/v2/devices/emulator.rs +++ b/crates/libcgroups/src/v2/devices/emulator.rs @@ -1,7 +1,6 @@ -use anyhow::Result; use oci_spec::runtime::{LinuxDeviceCgroup, LinuxDeviceType}; -// For cgroup v1 compatibility, runc implements a device emulator to caculate the final rules given +// For cgroup v1 compatibility, runc implements a device emulator to calculate the final rules given // a list of user-defined rules. // https://github.com/opencontainers/runc/commit/2353ffec2bb670a200009dc7a54a56b93145f141 // @@ -28,29 +27,27 @@ impl Emulator { } } - pub fn add_rules(&mut self, rules: &[LinuxDeviceCgroup]) -> Result<()> { + pub fn add_rules(&mut self, rules: &[LinuxDeviceCgroup]) { for rule in rules { - self.add_rule(rule)?; + self.add_rule(rule); } - Ok(()) } - pub fn add_rule(&mut self, rule: &LinuxDeviceCgroup) -> Result<()> { + pub fn add_rule(&mut self, rule: &LinuxDeviceCgroup) { // special case, switch to blacklist or whitelist and clear all existing rules // NOTE: we ignore other fields when type='a', this is same as cgroup v1 and runc if rule.typ().unwrap_or_default() == LinuxDeviceType::A { self.default_allow = rule.allow(); self.rules.clear(); - return Ok(()); + return; } // empty access match nothing, just discard this rule if rule.access().is_none() { - return Ok(()); + return; } self.rules.push(rule.clone()); - Ok(()) } } @@ -79,7 +76,7 @@ mod tests { .unwrap(); // act - emulator.add_rule(&cgroup).expect("add type A rule"); + emulator.add_rule(&cgroup); // assert assert_eq!(emulator.rules.len(), 0); @@ -93,7 +90,7 @@ mod tests { let cgroup = LinuxDeviceCgroupBuilder::default().build().unwrap(); // act - emulator.add_rule(&cgroup).expect("add empty rule"); + emulator.add_rule(&cgroup); // assert assert_eq!(emulator.rules.len(), 0); @@ -112,7 +109,7 @@ mod tests { .unwrap(); // act - emulator.add_rule(&cgroup).expect("add permission rule"); + emulator.add_rule(&cgroup); // assert let top_rule = emulator.rules.first().unwrap(); diff --git a/crates/libcgroups/src/v2/devices/mocks.rs b/crates/libcgroups/src/v2/devices/mocks.rs index 3a8629406..5d7ed9104 100644 --- a/crates/libcgroups/src/v2/devices/mocks.rs +++ b/crates/libcgroups/src/v2/devices/mocks.rs @@ -16,7 +16,7 @@ pub mod libc { pub mod libbpf_sys { pub fn bpf_prog_load( _type_: libbpf_sys::bpf_prog_type, - _name: *const i8, + _name: *const ::std::os::raw::c_char, _license: *const ::std::os::raw::c_char, _insns: *const libbpf_sys::bpf_insn, _insns_cnt: libbpf_sys::size_t, diff --git a/crates/libcgroups/src/v2/devices/program.rs b/crates/libcgroups/src/v2/devices/program.rs index e64b872a5..c091ac8a4 100644 --- a/crates/libcgroups/src/v2/devices/program.rs +++ b/crates/libcgroups/src/v2/devices/program.rs @@ -1,4 +1,3 @@ -use anyhow::{bail, Result}; use oci_spec::runtime::*; use rbpf::disassembler::disassemble; @@ -9,8 +8,23 @@ pub struct Program { prog: BpfCode, } +#[derive(thiserror::Error, Debug)] +pub enum ProgramError { + #[error("io error: {0}")] + Io(#[from] std::io::Error), + #[error("invalid access: {0}")] + InvalidAccess(char), + #[error("{0} device not supported")] + DeviceNotSupported(&'static str), + #[error("wildcard device type should be removed when cleaning rules")] + WildcardDevice, +} + impl Program { - pub fn from_rules(rules: &[LinuxDeviceCgroup], default_allow: bool) -> Result { + pub fn from_rules( + rules: &[LinuxDeviceCgroup], + default_allow: bool, + ) -> Result { let mut prog = Program { prog: BpfCode::new(), }; @@ -89,7 +103,7 @@ impl Program { .push(); } - fn add_rule(&mut self, rule: &LinuxDeviceCgroup) -> Result<()> { + fn add_rule(&mut self, rule: &LinuxDeviceCgroup) -> Result<(), ProgramError> { let dev_type = bpf_dev_type(rule.typ().unwrap_or_default())?; let access = bpf_access(rule.access().clone().unwrap_or_default())?; let has_access = access @@ -188,7 +202,7 @@ impl Program { major: u32, minor: u32, access: String, - ) -> Result { + ) -> Result { let mut mem = bpf_cgroup_dev_ctx(typ, major, minor, access)?; let vm = rbpf::EbpfVmRaw::new(Some(self.prog.into_bytes()))?; let result = vm.execute_program(&mut mem[..])?; @@ -196,27 +210,25 @@ impl Program { } } -fn bpf_dev_type(typ: LinuxDeviceType) -> Result { +fn bpf_dev_type(typ: LinuxDeviceType) -> Result { let dev_type: u32 = match typ { LinuxDeviceType::C => libbpf_sys::BPF_DEVCG_DEV_CHAR, - LinuxDeviceType::U => bail!("unbuffered char device not supported"), + LinuxDeviceType::U => return Err(ProgramError::DeviceNotSupported("unbuffered char")), LinuxDeviceType::B => libbpf_sys::BPF_DEVCG_DEV_BLOCK, - LinuxDeviceType::P => bail!("pipe device not supported"), - LinuxDeviceType::A => { - bail!("wildcard device type should be removed when cleaning rules") - } + LinuxDeviceType::P => return Err(ProgramError::DeviceNotSupported("pipe device")), + LinuxDeviceType::A => return Err(ProgramError::WildcardDevice), }; Ok(dev_type) } -fn bpf_access(access: String) -> Result { +fn bpf_access(access: String) -> Result { let mut v = 0_u32; for c in access.chars() { let cur_access = match c { 'r' => libbpf_sys::BPF_DEVCG_ACC_READ, 'w' => libbpf_sys::BPF_DEVCG_ACC_WRITE, 'm' => libbpf_sys::BPF_DEVCG_ACC_MKNOD, - _ => bail!("invalid access: {}", c), + _ => return Err(ProgramError::InvalidAccess(c)), }; v |= cur_access; } @@ -228,7 +240,7 @@ fn bpf_cgroup_dev_ctx( major: u32, minor: u32, access: String, -) -> Result> { +) -> Result, ProgramError> { let mut mem = Vec::with_capacity(12); let mut type_access = 0_u32; @@ -248,15 +260,16 @@ fn bpf_cgroup_dev_ctx( #[cfg(test)] mod tests { use super::*; + use anyhow::Result; use oci_spec::runtime::LinuxDeviceCgroupBuilder; fn build_bpf_program(rules: &Option>) -> Result { let mut em = crate::v2::devices::emulator::Emulator::with_default_allow(false); if let Some(rules) = rules { - em.add_rules(rules)?; + em.add_rules(rules); } - Program::from_rules(&em.rules, em.default_allow) + Ok(Program::from_rules(&em.rules, em.default_allow)?) } #[test] @@ -287,10 +300,7 @@ mod tests { let ret = prog.execute(*ty, *major, *minor, access.to_string()); assert!(ret.is_ok()); - println!( - "execute {:?} {} {} {} -> {:?}", - ty, major, minor, access, ret - ); + println!("execute {ty:?} {major} {minor} {access} -> {ret:?}"); if *ty == LinuxDeviceType::C // only this is allowed && *major == 10 && *minor == 20 @@ -358,10 +368,7 @@ mod tests { let ret = prog.execute(*ty, *major, *minor, access.to_string()); assert!(ret.is_ok()); - println!( - "execute {:?} {} {} {} -> {:?}", - ty, major, minor, access, ret - ); + println!("execute {ty:?} {major} {minor} {access} -> {ret:?}"); assert_eq!(ret.unwrap(), 1); } } @@ -396,10 +403,7 @@ mod tests { let ret = prog.execute(*ty, *major, *minor, access.to_string()); assert!(ret.is_ok()); - println!( - "execute {:?} {} {} {} -> {:?}", - ty, major, minor, access, ret - ); + println!("execute {ty:?} {major} {minor} {access} -> {ret:?}"); if *ty == LinuxDeviceType::C && *minor == 20 && access.eq(&"r") { assert_eq!(ret.unwrap(), 1); } else { @@ -447,10 +451,7 @@ mod tests { let ret = prog.execute(*ty, *major, *minor, access.to_string()); assert!(ret.is_ok()); - println!( - "execute {:?} {} {} {} -> {:?}", - ty, major, minor, access, ret - ); + println!("execute {ty:?} {major} {minor} {access} -> {ret:?}"); if *ty == LinuxDeviceType::C && *major == 10 && access.eq(&"r") { assert_eq!(ret.unwrap(), 0); } else if *ty == LinuxDeviceType::C diff --git a/crates/libcgroups/src/v2/freezer.rs b/crates/libcgroups/src/v2/freezer.rs index a5a464291..6f372e7ca 100644 --- a/crates/libcgroups/src/v2/freezer.rs +++ b/crates/libcgroups/src/v2/freezer.rs @@ -1,25 +1,46 @@ -use anyhow::{bail, Context, Result}; use std::{ fs::OpenOptions, - io::{BufRead, BufReader, Read, Seek, SeekFrom, Write}, + io::{BufRead, BufReader, Read, Seek, Write}, path::Path, - str, thread, + str::{self, Utf8Error}, + thread, time::Duration, }; -use crate::common::{ControllerOpt, FreezerState}; +use crate::common::{ControllerOpt, FreezerState, WrapIoResult, WrappedIoError}; use super::controller::Controller; const CGROUP_FREEZE: &str = "cgroup.freeze"; const CGROUP_EVENTS: &str = "cgroup.events"; +#[derive(thiserror::Error, Debug)] +pub enum V2FreezerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("freezer not supported: {0}")] + NotSupported(WrappedIoError), + #[error("expected \"cgroup.freeze\" to be in state {expected:?} but was in {actual:?}")] + ExpectedToBe { + expected: FreezerState, + actual: FreezerState, + }, + #[error("unexpected \"cgroup.freeze\" state: {state}")] + UnknownState { state: String }, + #[error("timeout of {0} ms reached waiting for the cgroup to freeze")] + Timeout(u128), + #[error("invalid utf8: {0}")] + InvalidUtf8(#[from] Utf8Error), +} + pub struct Freezer {} impl Controller for Freezer { - fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<()> { + type Error = V2FreezerError; + + fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<(), Self::Error> { if let Some(freezer_state) = controller_opt.freezer_state { - Self::apply(freezer_state, cgroup_path).context("failed to apply freezer")?; + Self::apply(freezer_state, cgroup_path)?; } Ok(()) @@ -27,62 +48,70 @@ impl Controller for Freezer { } impl Freezer { - fn apply(freezer_state: FreezerState, path: &Path) -> Result<()> { + fn apply(freezer_state: FreezerState, path: &Path) -> Result<(), V2FreezerError> { let state_str = match freezer_state { FreezerState::Undefined => return Ok(()), FreezerState::Frozen => "1", FreezerState::Thawed => "0", }; - match OpenOptions::new() - .create(false) - .write(true) - .open(path.join(CGROUP_FREEZE)) - { - Err(e) => { - if let FreezerState::Frozen = freezer_state { - bail!("freezer not supported {}", e); + let target = path.join(CGROUP_FREEZE); + match OpenOptions::new().create(false).write(true).open(&target) { + Err(err) => { + if freezer_state == FreezerState::Frozen { + return Err(V2FreezerError::NotSupported(WrappedIoError::Open { + err, + path: target, + })); } return Ok(()); } - Ok(mut file) => file.write_all(state_str.as_bytes())?, + Ok(mut file) => file + .write_all(state_str.as_bytes()) + .wrap_write(target, state_str)?, }; // confirm that the cgroup did actually change states. let actual_state = Self::read_freezer_state(path)?; if !actual_state.eq(&freezer_state) { - bail!( - "expected \"cgroup.freeze\" to be in state {:?} but was in {:?}", - freezer_state, - actual_state - ); + return Err(V2FreezerError::ExpectedToBe { + expected: freezer_state, + actual: actual_state, + }); } Ok(()) } - fn read_freezer_state(path: &Path) -> Result { + fn read_freezer_state(path: &Path) -> Result { + let target = path.join(CGROUP_FREEZE); let mut buf = [0; 1]; OpenOptions::new() .create(false) .read(true) - .open(path.join(CGROUP_FREEZE))? - .read_exact(&mut buf)?; + .open(&target) + .wrap_open(&target)? + .read_exact(&mut buf) + .wrap_read(&target)?; let state = str::from_utf8(&buf)?; match state { "0" => Ok(FreezerState::Thawed), "1" => Self::wait_frozen(path), - _ => bail!("unknown \"cgroup.freeze\" state: {}", state), + _ => Err(V2FreezerError::UnknownState { + state: state.into(), + }), } } // wait_frozen polls cgroup.events until it sees "frozen 1" in it. - fn wait_frozen(path: &Path) -> Result { + fn wait_frozen(path: &Path) -> Result { + let path = path.join(CGROUP_EVENTS); let f = OpenOptions::new() .create(false) .read(true) - .open(path.join(CGROUP_EVENTS))?; + .open(&path) + .wrap_open(&path)?; let mut f = BufReader::new(f); let wait_time = Duration::from_millis(10); @@ -92,26 +121,23 @@ impl Freezer { loop { if iter == max_iter { - bail!( - "timeout of {} ms reached waiting for the cgroup to freeze", - wait_time.as_millis() * max_iter - ); + return Err(V2FreezerError::Timeout(wait_time.as_millis() * max_iter)); } line.clear(); - let num_bytes = f.read_line(&mut line)?; + let num_bytes = f.read_line(&mut line).wrap_read(&path)?; if num_bytes == 0 { break; } if line.starts_with("frozen ") { if line.starts_with("frozen 1") { if iter > 1 { - log::debug!("frozen after {} retries", iter) + tracing::debug!("frozen after {} retries", iter) } return Ok(FreezerState::Frozen); } iter += 1; thread::sleep(wait_time); - f.seek(SeekFrom::Start(0))?; + f.rewind().wrap_other(&path)?; } } @@ -123,16 +149,14 @@ impl Freezer { mod tests { use super::*; use crate::common::FreezerState; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use std::sync::Arc; #[test] fn test_set_freezer_state() { - let tmp = Arc::new( - create_temp_dir("test_set_freezer_state").expect("create temp directory for test"), - ); - set_fixture(&tmp, CGROUP_FREEZE, "").expect("Set fixure for freezer state"); - set_fixture(&tmp, CGROUP_EVENTS, "populated 0\nfrozen 0") + let tmp = Arc::new(tempfile::tempdir().unwrap()); + set_fixture(tmp.path(), CGROUP_FREEZE, "").expect("Set fixure for freezer state"); + set_fixture(tmp.path(), CGROUP_EVENTS, "populated 0\nfrozen 0") .expect("Set fixure for freezer state"); // set Frozen state. @@ -141,51 +165,50 @@ mod tests { let p = Arc::clone(&tmp); thread::spawn(move || { thread::sleep(Duration::from_millis(100)); - set_fixture(&p, CGROUP_EVENTS, "populated 0\nfrozen 1") + set_fixture(p.path(), CGROUP_EVENTS, "populated 0\nfrozen 1") .expect("Set fixure for freezer state"); }); let freezer_state = FreezerState::Frozen; - Freezer::apply(freezer_state, &tmp).expect("Set freezer state"); + Freezer::apply(freezer_state, tmp.path()).expect("Set freezer state"); let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZE)).expect("Read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_FREEZE)).expect("Read to string"); assert_eq!("1", state_content); } // set Thawed state. { let freezer_state = FreezerState::Thawed; - Freezer::apply(freezer_state, &tmp).expect("Set freezer state"); + Freezer::apply(freezer_state, tmp.path()).expect("Set freezer state"); let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZE)).expect("Read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_FREEZE)).expect("Read to string"); assert_eq!("0", state_content); } // set Undefined state. { let old_state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZE)).expect("Read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_FREEZE)).expect("Read to string"); let freezer_state = FreezerState::Undefined; - Freezer::apply(freezer_state, &tmp).expect("Set freezer state"); + Freezer::apply(freezer_state, tmp.path()).expect("Set freezer state"); let state_content = - std::fs::read_to_string(tmp.join(CGROUP_FREEZE)).expect("Read to string"); + std::fs::read_to_string(tmp.path().join(CGROUP_FREEZE)).expect("Read to string"); assert_eq!(old_state_content, state_content); } } #[test] fn test_set_freezer_state_error() { - let tmp = create_temp_dir("test_set_freezer_state_error") - .expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_FREEZE, "").expect("Set fixure for freezer state"); - set_fixture(&tmp, CGROUP_EVENTS, "").expect("Set fixure for freezer state"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_FREEZE, "").expect("Set fixure for freezer state"); + set_fixture(tmp.path(), CGROUP_EVENTS, "").expect("Set fixure for freezer state"); // events file does not contain "frozen 1" { let freezer_state = FreezerState::Frozen; - let r = Freezer::apply(freezer_state, &tmp); + let r = Freezer::apply(freezer_state, tmp.path()); assert!(r.is_err()); } } diff --git a/crates/libcgroups/src/v2/hugetlb.rs b/crates/libcgroups/src/v2/hugetlb.rs index f147dbfd3..188cfbbc4 100644 --- a/crates/libcgroups/src/v2/hugetlb.rs +++ b/crates/libcgroups/src/v2/hugetlb.rs @@ -1,33 +1,65 @@ -use anyhow::{bail, Context, Result}; -use std::{collections::HashMap, path::Path}; +use std::{ + collections::HashMap, + num::ParseIntError, + path::{Path, PathBuf}, +}; use super::controller::Controller; use crate::{ - common::{self, ControllerOpt}, - stats::{parse_single_value, supported_page_sizes, HugeTlbStats, StatsProvider}, + common::{self, ControllerOpt, EitherError, MustBePowerOfTwo, WrappedIoError}, + stats::{ + parse_single_value, supported_page_sizes, HugeTlbStats, StatsProvider, + SupportedPageSizesError, + }, }; use oci_spec::runtime::LinuxHugepageLimit; +#[derive(thiserror::Error, Debug)] +pub enum V2HugeTlbControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("malformed page size {page_size}: {err}")] + MalformedPageSize { + page_size: String, + err: EitherError, + }, +} + pub struct HugeTlb {} impl Controller for HugeTlb { - fn apply(controller_opt: &ControllerOpt, cgroup_root: &std::path::Path) -> Result<()> { - log::debug!("Apply hugetlb cgroup v2 config"); + type Error = V2HugeTlbControllerError; + + fn apply( + controller_opt: &ControllerOpt, + cgroup_root: &std::path::Path, + ) -> Result<(), Self::Error> { + tracing::debug!("Apply hugetlb cgroup v2 config"); if let Some(hugepage_limits) = controller_opt.resources.hugepage_limits() { for hugetlb in hugepage_limits { - Self::apply(cgroup_root, hugetlb) - .context("failed to apply hugetlb resource restrictions")? + Self::apply(cgroup_root, hugetlb)? } } Ok(()) } } +#[derive(thiserror::Error, Debug)] +pub enum V2HugeTlbStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("getting supported huge page sizes: {0}")] + SupportedPageSizes(#[from] SupportedPageSizesError), + #[error("failed to parse max value for {path}: {err}")] + ParseMax { path: PathBuf, err: ParseIntError }, +} + impl StatsProvider for HugeTlb { + type Error = V2HugeTlbStatsError; type Stats = HashMap; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let page_sizes = supported_page_sizes()?; let mut hugetlb_stats = HashMap::with_capacity(page_sizes.len()); @@ -43,15 +75,29 @@ impl StatsProvider for HugeTlb { } impl HugeTlb { - fn apply(root_path: &Path, hugetlb: &LinuxHugepageLimit) -> Result<()> { - let page_size: String = hugetlb + fn apply( + root_path: &Path, + hugetlb: &LinuxHugepageLimit, + ) -> Result<(), V2HugeTlbControllerError> { + let page_size_raw: String = hugetlb .page_size() .chars() .take_while(|c| c.is_ascii_digit()) .collect(); - let page_size: u64 = page_size.parse()?; + let page_size: u64 = match page_size_raw.parse() { + Ok(page_size) => page_size, + Err(err) => { + return Err(V2HugeTlbControllerError::MalformedPageSize { + page_size: page_size_raw, + err: EitherError::Left(err), + }) + } + }; if !Self::is_power_of_two(page_size) { - bail!("page size must be in the format of 2^(integer)"); + return Err(V2HugeTlbControllerError::MalformedPageSize { + page_size: page_size_raw, + err: EitherError::Right(MustBePowerOfTwo), + }); } common::write_cgroup_file( @@ -62,22 +108,29 @@ impl HugeTlb { } fn is_power_of_two(number: u64) -> bool { - (number != 0) && (number & (number - 1)) == 0 + (number != 0) && (number & (number.saturating_sub(1))) == 0 } - fn stats_for_page_size(cgroup_path: &Path, page_size: &str) -> Result { - let events_file = format!("hugetlb.{}.events", page_size); - let events = common::read_cgroup_file(cgroup_path.join(&events_file))?; + fn stats_for_page_size( + cgroup_path: &Path, + page_size: &str, + ) -> Result { + let events_file = format!("hugetlb.{page_size}.events"); + let path = cgroup_path.join(events_file); + let events = common::read_cgroup_file(&path)?; let fail_count: u64 = events .lines() .find(|l| l.starts_with("max")) .map(|l| l[3..].trim().parse()) .transpose() - .with_context(|| format!("failed to parse max value for {}", events_file))? + .map_err(|err| V2HugeTlbStatsError::ParseMax { + path: path.clone(), + err, + })? .unwrap_or_default(); Ok(HugeTlbStats { - usage: parse_single_value(&cgroup_path.join(format!("hugetlb.{}.current", page_size)))?, + usage: parse_single_value(&cgroup_path.join(format!("hugetlb.{page_size}.current")))?, fail_count, ..Default::default() }) @@ -87,30 +140,30 @@ impl HugeTlb { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::LinuxHugepageLimitBuilder; use std::fs::read_to_string; #[test] fn test_set_hugetlb() { let page_file_name = "hugetlb.2MB.max"; - let tmp = create_temp_dir("test_set_hugetlbv2").expect("create temp directory for test"); - set_fixture(&tmp, page_file_name, "0").expect("Set fixture for 2 MB page size"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), page_file_name, "0").expect("Set fixture for 2 MB page size"); let hugetlb = LinuxHugepageLimitBuilder::default() .page_size("2MB") .limit(16384) .build() .unwrap(); - HugeTlb::apply(&tmp, &hugetlb).expect("apply hugetlb"); - let content = read_to_string(tmp.join(page_file_name)).expect("Read hugetlb file content"); + HugeTlb::apply(tmp.path(), &hugetlb).expect("apply hugetlb"); + let content = + read_to_string(tmp.path().join(page_file_name)).expect("Read hugetlb file content"); assert_eq!(hugetlb.limit().to_string(), content); } #[test] fn test_set_hugetlb_with_invalid_page_size() { - let tmp = create_temp_dir("test_set_hugetlbv2_with_invalid_page_size") - .expect("create temp directory for test"); + let tmp = tempfile::tempdir().unwrap(); let hugetlb = LinuxHugepageLimitBuilder::default() .page_size("3MB") @@ -118,7 +171,7 @@ mod tests { .build() .unwrap(); - let result = HugeTlb::apply(&tmp, &hugetlb); + let result = HugeTlb::apply(tmp.path(), &hugetlb); assert!( result.is_err(), "page size that is not a power of two should be an error" @@ -128,9 +181,9 @@ mod tests { quickcheck! { fn property_test_set_hugetlb(hugetlb: LinuxHugepageLimit) -> bool { let page_file_name = format!("hugetlb.{:?}.max", hugetlb.page_size()); - let tmp = create_temp_dir("property_test_set_hugetlbv2").expect("create temp directory for test"); - set_fixture(&tmp, &page_file_name, "0").expect("Set fixture for page size"); - let result = HugeTlb::apply(&tmp, &hugetlb); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), &page_file_name, "0").expect("Set fixture for page size"); + let result = HugeTlb::apply(tmp.path(), &hugetlb); let page_size: String = hugetlb .page_size() @@ -141,7 +194,7 @@ mod tests { if HugeTlb::is_power_of_two(page_size) && page_size != 1 { let content = - read_to_string(tmp.join(page_file_name)).expect("Read hugetlb file content"); + read_to_string(tmp.path().join(page_file_name)).expect("Read hugetlb file content"); hugetlb.limit().to_string() == content } else { result.is_err() @@ -151,11 +204,11 @@ mod tests { #[test] fn test_stat_hugetbl() { - let tmp = create_temp_dir("test_stat_hugetlb").expect("create temp directory for test"); - set_fixture(&tmp, "hugetlb.2MB.current", "1024\n").expect("set hugetlb current"); - set_fixture(&tmp, "hugetlb.2MB.events", "max 5\n").expect("set hugetlb events"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), "hugetlb.2MB.current", "1024\n").expect("set hugetlb current"); + set_fixture(tmp.path(), "hugetlb.2MB.events", "max 5\n").expect("set hugetlb events"); - let actual = HugeTlb::stats_for_page_size(&tmp, "2MB").expect("get cgroup stats"); + let actual = HugeTlb::stats_for_page_size(tmp.path(), "2MB").expect("get cgroup stats"); let expected = HugeTlbStats { usage: 1024, diff --git a/crates/libcgroups/src/v2/io.rs b/crates/libcgroups/src/v2/io.rs index caa99d5cf..8266af62a 100644 --- a/crates/libcgroups/src/v2/io.rs +++ b/crates/libcgroups/src/v2/io.rs @@ -1,10 +1,14 @@ -use std::path::{Path, PathBuf}; - -use anyhow::{bail, Context, Result}; +use std::{ + num::ParseIntError, + path::{Path, PathBuf}, +}; use crate::{ - common::{self, ControllerOpt}, - stats::{self, psi_stats, BlkioDeviceStat, BlkioStats, StatsProvider}, + common::{self, ControllerOpt, WrappedIoError}, + stats::{ + self, psi_stats, BlkioDeviceStat, BlkioStats, ParseDeviceNumberError, + ParseNestedKeyedDataError, StatsProvider, + }, }; use super::controller::Controller; @@ -15,28 +19,51 @@ const CGROUP_IO_WEIGHT: &str = "io.weight"; const CGROUP_IO_STAT: &str = "io.stat"; const CGROUP_IO_PSI: &str = "io.pressure"; +#[derive(thiserror::Error, Debug)] +pub enum V2IoControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("cannot set leaf_weight with cgroupv2")] + LeafWeight, +} + pub struct Io {} impl Controller for Io { - fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { - log::debug!("Apply io cgroup v2 config"); + type Error = V2IoControllerError; + + fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<(), Self::Error> { + tracing::debug!("Apply io cgroup v2 config"); if let Some(io) = &controller_opt.resources.block_io() { - Self::apply(cgroup_root, io).context("failed to apply io resource restrictions")?; + Self::apply(cgroup_root, io)?; } Ok(()) } } +#[derive(thiserror::Error, Debug)] +pub enum V2IoStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("while parsing stat table: {0}")] + ParseNestedKeyedData(#[from] ParseNestedKeyedDataError), + #[error("while parsing device number: {0}")] + ParseDeviceNumber(#[from] ParseDeviceNumberError), + #[error("while parsing table value: {0}")] + ParseInt(#[from] ParseIntError), +} + impl StatsProvider for Io { + type Error = V2IoStatsError; type Stats = BlkioStats; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let keyed_data = stats::parse_nested_keyed_data(&cgroup_path.join(CGROUP_IO_STAT))?; let mut service_bytes = Vec::with_capacity(keyed_data.len()); let mut serviced = Vec::with_capacity(keyed_data.len()); for entry in keyed_data { let (major, minor) = stats::parse_device_number(&entry.0)?; - for value in &entry.1 { + for value in entry.1 { if value.starts_with("rbytes") { service_bytes.push(BlkioDeviceStat { major, @@ -72,7 +99,7 @@ impl StatsProvider for Io { let stats = BlkioStats { service_bytes, serviced, - psi: psi_stats(&cgroup_path.join(CGROUP_IO_PSI)).context("could not read io psi")?, + psi: psi_stats(&cgroup_path.join(CGROUP_IO_PSI))?, ..Default::default() }; @@ -89,7 +116,7 @@ impl Io { if v == 0 { return 0; } - 1 + (v - 10) * 9999 / 990 + 1 + (v.saturating_sub(10)) * 9999 / 990 } fn io_max_path(path: &Path) -> PathBuf { @@ -97,7 +124,7 @@ impl Io { } // linux kernel doc: https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#io - fn apply(root_path: &Path, blkio: &LinuxBlockIo) -> Result<()> { + fn apply(root_path: &Path, blkio: &LinuxBlockIo) -> Result<(), V2IoControllerError> { if let Some(weight_device) = blkio.weight_device() { for wd in weight_device { common::write_cgroup_file( @@ -108,7 +135,7 @@ impl Io { } if let Some(leaf_weight) = blkio.leaf_weight() { if leaf_weight > 0 { - bail!("cannot set leaf_weight with cgroupv2"); + return Err(V2IoControllerError::LeafWeight); } } if let Some(io_weight) = blkio.weight() { @@ -169,7 +196,7 @@ impl Io { #[cfg(test)] mod test { use super::*; - use crate::test::{create_temp_dir, set_fixture, setup}; + use crate::test::{set_fixture, setup}; use oci_spec::runtime::{ LinuxBlockIoBuilder, LinuxThrottleDeviceBuilder, LinuxWeightDeviceBuilder, @@ -178,7 +205,7 @@ mod test { #[test] fn test_set_io_read_bps() { - let (tmp, throttle) = setup("test_set_io_read_bps", "io.max"); + let (tmp, throttle) = setup("io.max"); let blkio = LinuxBlockIoBuilder::default() .throttle_read_bps_device(vec![LinuxThrottleDeviceBuilder::default() @@ -190,7 +217,7 @@ mod test { .build() .unwrap(); - Io::apply(&tmp, &blkio).expect("apply blkio"); + Io::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle).unwrap_or_else(|_| panic!("read rbps content")); assert_eq!("8:0 rbps=102400", content); @@ -198,7 +225,7 @@ mod test { #[test] fn test_set_io_write_bps() { - let (tmp, throttle) = setup("test_set_io_write_bps", "io.max"); + let (tmp, throttle) = setup("io.max"); let blkio = LinuxBlockIoBuilder::default() .throttle_write_bps_device(vec![LinuxThrottleDeviceBuilder::default() @@ -210,7 +237,7 @@ mod test { .build() .unwrap(); - Io::apply(&tmp, &blkio).expect("apply blkio"); + Io::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle).unwrap_or_else(|_| panic!("read rbps content")); assert_eq!("8:0 wbps=102400", content); @@ -218,7 +245,7 @@ mod test { #[test] fn test_set_io_read_iops() { - let (tmp, throttle) = setup("test_set_io_read_iops", "io.max"); + let (tmp, throttle) = setup("io.max"); let blkio = LinuxBlockIoBuilder::default() .throttle_read_iops_device(vec![LinuxThrottleDeviceBuilder::default() @@ -230,7 +257,7 @@ mod test { .build() .unwrap(); - Io::apply(&tmp, &blkio).expect("apply blkio"); + Io::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle).unwrap_or_else(|_| panic!("read riops content")); assert_eq!("8:0 riops=102400", content); @@ -238,7 +265,7 @@ mod test { #[test] fn test_set_io_write_iops() { - let (tmp, throttle) = setup("test_set_io_write_iops", "io.max"); + let (tmp, throttle) = setup("io.max"); let blkio = LinuxBlockIoBuilder::default() .throttle_write_iops_device(vec![LinuxThrottleDeviceBuilder::default() @@ -250,7 +277,7 @@ mod test { .build() .unwrap(); - Io::apply(&tmp, &blkio).expect("apply blkio"); + Io::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle).unwrap_or_else(|_| panic!("read wiops content")); assert_eq!("8:0 wiops=102400", content); @@ -258,7 +285,7 @@ mod test { #[test] fn test_set_ioweight_device() { - let (tmp, throttle) = setup("test_set_io_weight_device", CGROUP_BFQ_IO_WEIGHT); + let (tmp, throttle) = setup(CGROUP_BFQ_IO_WEIGHT); let blkio = LinuxBlockIoBuilder::default() .weight_device(vec![LinuxWeightDeviceBuilder::default() .major(8) @@ -270,7 +297,7 @@ mod test { .build() .unwrap(); - Io::apply(&tmp, &blkio).expect("apply blkio"); + Io::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(throttle).unwrap_or_else(|_| panic!("read bfq_io_weight content")); @@ -296,13 +323,13 @@ mod test { expected_weight: String::from("1"), }, ] { - let (tmp, weight_file) = setup("test_set_io_weight", case.cgroup_file); + let (tmp, weight_file) = setup(case.cgroup_file); let blkio = LinuxBlockIoBuilder::default() .weight(case.weight) .build() .unwrap(); - Io::apply(&tmp, &blkio).expect("apply blkio"); + Io::apply(tmp.path(), &blkio).expect("apply blkio"); let content = fs::read_to_string(weight_file).expect("read blkio weight"); assert_eq!(case.expected_weight, content); } @@ -310,16 +337,16 @@ mod test { #[test] fn test_stat_io() { - let tmp = create_temp_dir("test_stat_io").expect("create test directory"); + let tmp = tempfile::tempdir().unwrap(); let stat_content = [ "7:10 rbytes=18432 wbytes=16842 rios=12 wios=0 dbytes=0 dios=0", "7:9 rbytes=34629632 wbytes=274965 rios=1066 wios=319 dbytes=0 dios=0", ] .join("\n"); - set_fixture(&tmp, "io.stat", &stat_content).unwrap(); - set_fixture(&tmp, CGROUP_IO_PSI, "").expect("create psi file"); + set_fixture(tmp.path(), "io.stat", &stat_content).unwrap(); + set_fixture(tmp.path(), CGROUP_IO_PSI, "").expect("create psi file"); - let mut actual = Io::stats(&tmp).expect("get cgroup stats"); + let mut actual = Io::stats(tmp.path()).expect("get cgroup stats"); let expected = BlkioStats { service_bytes: vec![ BlkioDeviceStat { diff --git a/crates/libcgroups/src/v2/manager.rs b/crates/libcgroups/src/v2/manager.rs index e4c654669..9aa5b8d56 100644 --- a/crates/libcgroups/src/v2/manager.rs +++ b/crates/libcgroups/src/v2/manager.rs @@ -5,8 +5,6 @@ use std::{ time::Duration, }; -use anyhow::{Context, Result}; - use nix::unistd::Pid; #[cfg(feature = "cgroupsv2_devices")] @@ -16,23 +14,67 @@ use super::{ controller_type::{ ControllerType, PseudoControllerType, CONTROLLER_TYPES, PSEUDO_CONTROLLER_TYPES, }, - cpu::Cpu, + cpu::{Cpu, V2CpuControllerError, V2CpuStatsError}, cpuset::CpuSet, - freezer::Freezer, - hugetlb::HugeTlb, - io::Io, - memory::Memory, + freezer::{Freezer, V2FreezerError}, + hugetlb::{HugeTlb, V2HugeTlbControllerError, V2HugeTlbStatsError}, + io::{Io, V2IoControllerError, V2IoStatsError}, + memory::{Memory, V2MemoryControllerError, V2MemoryStatsError}, pids::Pids, - unified::Unified, - util::{self, CGROUP_SUBTREE_CONTROL}, + unified::{Unified, V2UnifiedError}, + util::{self, V2UtilError, CGROUP_SUBTREE_CONTROL}, }; use crate::{ - common::{self, CgroupManager, ControllerOpt, FreezerState, PathBufExt, CGROUP_PROCS}, - stats::{Stats, StatsProvider}, + common::{ + self, AnyCgroupManager, CgroupManager, ControllerOpt, FreezerState, JoinSafelyError, + PathBufExt, WrapIoResult, WrappedIoError, CGROUP_PROCS, + }, + stats::{PidStatsError, Stats, StatsProvider}, }; pub const CGROUP_KILL: &str = "cgroup.kill"; +#[derive(thiserror::Error, Debug)] +pub enum V2ManagerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("while joining paths: {0}")] + JoinSafely(#[from] JoinSafelyError), + #[error(transparent)] + Util(#[from] V2UtilError), + + #[error(transparent)] + CpuController(#[from] V2CpuControllerError), + #[error(transparent)] + CpuSetController(WrappedIoError), + #[error(transparent)] + HugeTlbController(#[from] V2HugeTlbControllerError), + #[error(transparent)] + IoController(#[from] V2IoControllerError), + #[error(transparent)] + MemoryController(#[from] V2MemoryControllerError), + #[error(transparent)] + PidsController(WrappedIoError), + #[error(transparent)] + UnifiedController(#[from] V2UnifiedError), + #[error(transparent)] + FreezerController(#[from] V2FreezerError), + #[cfg(feature = "cgroupsv2_devices")] + #[error(transparent)] + DevicesController(#[from] super::devices::controller::DevicesControllerError), + + #[error(transparent)] + CpuStats(#[from] V2CpuStatsError), + #[error(transparent)] + HugeTlbStats(#[from] V2HugeTlbStatsError), + #[error(transparent)] + PidsStats(PidStatsError), + #[error(transparent)] + MemoryStats(#[from] V2MemoryStatsError), + #[error(transparent)] + IoStats(#[from] V2IoStatsError), +} + pub struct Manager { root_path: PathBuf, cgroup_path: PathBuf, @@ -42,7 +84,7 @@ pub struct Manager { impl Manager { /// Constructs a new cgroup manager with root path being the mount point /// of a cgroup v2 fs and cgroup path being a relative path from the root - pub fn new(root_path: PathBuf, cgroup_path: PathBuf) -> Result { + pub fn new(root_path: PathBuf, cgroup_path: PathBuf) -> Result { let full_path = root_path.join_safely(&cgroup_path)?; Ok(Self { @@ -52,7 +94,7 @@ impl Manager { }) } - fn create_unified_cgroup(&self, pid: Pid) -> Result<()> { + fn create_unified_cgroup(&self, pid: Pid) -> Result<(), V2ManagerError> { let controllers: Vec = util::get_available_controllers(&self.root_path)? .iter() .map(|c| format!("{}{}", "+", c)) @@ -69,8 +111,11 @@ impl Manager { while let Some(component) = components.next() { current_path = current_path.join(component); if !current_path.exists() { - fs::create_dir(¤t_path)?; - fs::metadata(¤t_path)?.permissions().set_mode(0o755); + fs::create_dir(¤t_path).wrap_create_dir(¤t_path)?; + fs::metadata(¤t_path) + .wrap_other(¤t_path)? + .permissions() + .set_mode(0o755); } // last component cannot have subtree_control enabled due to internal process constraint @@ -84,22 +129,28 @@ impl Manager { Ok(()) } - fn write_controllers(path: &Path, controllers: &[String]) -> Result<()> { + fn write_controllers(path: &Path, controllers: &[String]) -> Result<(), WrappedIoError> { for controller in controllers { common::write_cgroup_file_str(path.join(CGROUP_SUBTREE_CONTROL), controller)?; } Ok(()) } + + pub fn any(self) -> AnyCgroupManager { + AnyCgroupManager::V2(self) + } } impl CgroupManager for Manager { - fn add_task(&self, pid: Pid) -> Result<()> { + type Error = V2ManagerError; + + fn add_task(&self, pid: Pid) -> Result<(), Self::Error> { self.create_unified_cgroup(pid)?; Ok(()) } - fn apply(&self, controller_opt: &ControllerOpt) -> Result<()> { + fn apply(&self, controller_opt: &ControllerOpt) -> Result<(), Self::Error> { for controller in CONTROLLER_TYPES { match controller { ControllerType::Cpu => Cpu::apply(controller_opt, &self.full_path)?, @@ -127,18 +178,21 @@ impl CgroupManager for Manager { Ok(()) } - fn remove(&self) -> Result<()> { + fn remove(&self) -> Result<(), Self::Error> { if self.full_path.exists() { - log::debug!("remove cgroup {:?}", self.full_path); + tracing::debug!("remove cgroup {:?}", self.full_path); let kill_file = self.full_path.join(CGROUP_KILL); if kill_file.exists() { - fs::write(kill_file, "1").context("failed to kill cgroup")?; + fs::write(&kill_file, "1").wrap_write(&kill_file, "1")?; } else { let procs_path = self.full_path.join(CGROUP_PROCS); - let procs = fs::read_to_string(procs_path)?; + let procs = fs::read_to_string(&procs_path).wrap_read(&procs_path)?; for line in procs.lines() { - let pid: i32 = line.parse()?; + let pid: i32 = line + .parse() + .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err)) + .wrap_other(&procs_path)?; let _ = nix::sys::signal::kill(Pid::from_raw(pid), nix::sys::signal::SIGKILL); } } @@ -149,24 +203,26 @@ impl CgroupManager for Manager { Ok(()) } - fn freeze(&self, state: FreezerState) -> Result<()> { + fn freeze(&self, state: FreezerState) -> Result<(), Self::Error> { let controller_opt = ControllerOpt { resources: &Default::default(), freezer_state: Some(state), oom_score_adj: None, disable_oom_killer: false, }; - Freezer::apply(&controller_opt, &self.full_path) + Ok(Freezer::apply(&controller_opt, &self.full_path)?) } - fn stats(&self) -> Result { + fn stats(&self) -> Result { let mut stats = Stats::default(); for subsystem in CONTROLLER_TYPES { match subsystem { ControllerType::Cpu => stats.cpu = Cpu::stats(&self.full_path)?, ControllerType::HugeTlb => stats.hugetlb = HugeTlb::stats(&self.full_path)?, - ControllerType::Pids => stats.pids = Pids::stats(&self.full_path)?, + ControllerType::Pids => { + stats.pids = Pids::stats(&self.full_path).map_err(V2ManagerError::PidsStats)? + } ControllerType::Memory => stats.memory = Memory::stats(&self.full_path)?, ControllerType::Io => stats.blkio = Io::stats(&self.full_path)?, _ => continue, @@ -176,7 +232,7 @@ impl CgroupManager for Manager { Ok(stats) } - fn get_all_pids(&self) -> Result> { - common::get_all_pids(&self.full_path) + fn get_all_pids(&self) -> Result, Self::Error> { + Ok(common::get_all_pids(&self.full_path)?) } } diff --git a/crates/libcgroups/src/v2/memory.rs b/crates/libcgroups/src/v2/memory.rs index f9082f22c..05ed7d149 100644 --- a/crates/libcgroups/src/v2/memory.rs +++ b/crates/libcgroups/src/v2/memory.rs @@ -1,11 +1,10 @@ -use anyhow::{bail, Context, Result}; use std::path::Path; use oci_spec::runtime::LinuxMemory; use crate::{ - common::{self, ControllerOpt}, - stats::{self, MemoryData, MemoryStats, StatsProvider}, + common::{self, ControllerOpt, WrappedIoError}, + stats::{self, MemoryData, MemoryStats, ParseFlatKeyedDataError, StatsProvider}, }; use super::controller::Controller; @@ -16,30 +15,54 @@ const CGROUP_MEMORY_LOW: &str = "memory.low"; const MEMORY_STAT: &str = "memory.stat"; const MEMORY_PSI: &str = "memory.pressure"; +#[derive(thiserror::Error, Debug)] +pub enum V2MemoryControllerError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("invalid memory value {0}")] + MemoryValue(i64), + #[error("invalid swap value {0}")] + SwapValue(i64), + #[error("swap memory ({swap}) should be bigger than memory limit ({limit})")] + SwapTooSmall { swap: i64, limit: i64 }, + #[error("unable to set swap limit without memory limit")] + SwapWithoutLimit, + #[error("invalid memory reservation value: {0}")] + MemoryReservation(i64), +} + pub struct Memory {} impl Controller for Memory { - fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<()> { + type Error = V2MemoryControllerError; + + fn apply(controller_opt: &ControllerOpt, cgroup_path: &Path) -> Result<(), Self::Error> { if let Some(memory) = &controller_opt.resources.memory() { - Self::apply(cgroup_path, memory) - .context("failed to apply memory resource restrictions")?; + Self::apply(cgroup_path, memory)?; } Ok(()) } } +#[derive(thiserror::Error, Debug)] +pub enum V2MemoryStatsError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("while parsing stat table: {0}")] + ParseNestedKeyedData(#[from] ParseFlatKeyedDataError), +} impl StatsProvider for Memory { + type Error = V2MemoryStatsError; type Stats = MemoryStats; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { let stats = MemoryStats { memory: Self::get_memory_data(cgroup_path, "memory", "oom")?, memswap: Self::get_memory_data(cgroup_path, "memory.swap", "fail")?, hierarchy: true, stats: stats::parse_flat_keyed_data(&cgroup_path.join(MEMORY_STAT))?, - psi: stats::psi_stats(&cgroup_path.join(MEMORY_PSI)) - .context("could not read memory psi")?, + psi: stats::psi_stats(&cgroup_path.join(MEMORY_PSI))?, ..Default::default() }; @@ -52,7 +75,7 @@ impl Memory { cgroup_path: &Path, file_prefix: &str, fail_event: &str, - ) -> Result { + ) -> Result { let usage = stats::parse_single_value(&cgroup_path.join(format!("{}.{}", file_prefix, "current")))?; let limit = @@ -75,17 +98,17 @@ impl Memory { }) } - fn set>(path: P, val: i64) -> Result<()> { + fn set>(path: P, val: i64) -> Result<(), WrappedIoError> { if val == 0 { Ok(()) } else if val == -1 { - common::write_cgroup_file_str(path, "max") + Ok(common::write_cgroup_file_str(path, "max")?) } else { - common::write_cgroup_file(path, val) + Ok(common::write_cgroup_file(path, val)?) } } - fn apply(path: &Path, memory: &LinuxMemory) -> Result<()> { + fn apply(path: &Path, memory: &LinuxMemory) -> Result<(), V2MemoryControllerError> { // if nothing is set just exit right away if memory.reservation().is_none() && memory.limit().is_none() && memory.swap().is_none() { return Ok(()); @@ -93,11 +116,11 @@ impl Memory { match memory.limit() { Some(limit) if limit < -1 => { - bail!("invalid memory value: {}", limit); + return Err(V2MemoryControllerError::MemoryValue(limit)); } Some(limit) => match memory.swap() { Some(swap) if swap < -1 => { - bail!("invalid swap value: {}", swap); + return Err(V2MemoryControllerError::SwapValue(swap)); } Some(swap) => { // -1 means max @@ -105,11 +128,7 @@ impl Memory { Memory::set(path.join(CGROUP_MEMORY_SWAP), swap)?; } else { if swap < limit { - bail!( - "swap memory ({}) should be bigger than memory limit ({})", - swap, - limit - ); + return Err(V2MemoryControllerError::SwapTooSmall { swap, limit }); } // In cgroup v1 swap is memory+swap, but in cgroup v2 swap is @@ -129,14 +148,14 @@ impl Memory { }, None => { if memory.swap().is_some() { - bail!("unable to set swap limit without memory limit"); + return Err(V2MemoryControllerError::SwapWithoutLimit); } } }; if let Some(reservation) = memory.reservation() { if reservation < -1 { - bail!("invalid memory reservation value: {}", reservation); + return Err(V2MemoryControllerError::MemoryReservation(reservation)); } Memory::set(path.join(CGROUP_MEMORY_LOW), reservation)?; } @@ -148,16 +167,17 @@ impl Memory { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::LinuxMemoryBuilder; use std::fs::read_to_string; #[test] fn test_set_memory() { - let tmp = create_temp_dir("test_set_memory_v2").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_LOW, "0").expect("set fixture for memory reservation"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_LOW, "0") + .expect("set fixture for memory reservation"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); let limit = 1024; let reservation = 512; @@ -170,73 +190,79 @@ mod tests { .build() .unwrap(); - Memory::apply(&tmp, &memory_limits).expect("apply memory limits"); + Memory::apply(tmp.path(), &memory_limits).expect("apply memory limits"); - let limit_content = read_to_string(tmp.join(CGROUP_MEMORY_MAX)).expect("read memory limit"); + let limit_content = + read_to_string(tmp.path().join(CGROUP_MEMORY_MAX)).expect("read memory limit"); assert_eq!(limit_content, limit.to_string()); - let swap_content = read_to_string(tmp.join(CGROUP_MEMORY_SWAP)).expect("read swap limit"); + let swap_content = + read_to_string(tmp.path().join(CGROUP_MEMORY_SWAP)).expect("read swap limit"); assert_eq!(swap_content, (swap - limit).to_string()); let reservation_content = - read_to_string(tmp.join(CGROUP_MEMORY_LOW)).expect("read memory reservation"); + read_to_string(tmp.path().join(CGROUP_MEMORY_LOW)).expect("read memory reservation"); assert_eq!(reservation_content, reservation.to_string()); } #[test] fn test_set_memory_unlimited() { - let tmp = create_temp_dir("test_set_memory_unlimited_v2") - .expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_LOW, "0").expect("set fixture for memory reservation"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_LOW, "0") + .expect("set fixture for memory reservation"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); let memory_limits = LinuxMemoryBuilder::default().limit(-1).build().unwrap(); - Memory::apply(&tmp, &memory_limits).expect("apply memory limits"); + Memory::apply(tmp.path(), &memory_limits).expect("apply memory limits"); - let limit_content = read_to_string(tmp.join(CGROUP_MEMORY_MAX)).expect("read memory limit"); + let limit_content = + read_to_string(tmp.path().join(CGROUP_MEMORY_MAX)).expect("read memory limit"); assert_eq!(limit_content, "max"); - let swap_content = read_to_string(tmp.join(CGROUP_MEMORY_SWAP)).expect("read swap limit"); + let swap_content = + read_to_string(tmp.path().join(CGROUP_MEMORY_SWAP)).expect("read swap limit"); assert_eq!(swap_content, "max"); } #[test] fn test_err_swap_no_memory() { - let tmp = - create_temp_dir("test_err_swap_no_memory_v2").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_LOW, "0").expect("set fixture for memory reservation"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_LOW, "0") + .expect("set fixture for memory reservation"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); let memory_limits = LinuxMemoryBuilder::default().swap(512).build().unwrap(); - let result = Memory::apply(&tmp, &memory_limits); + let result = Memory::apply(tmp.path(), &memory_limits); assert!(result.is_err()); } #[test] fn test_err_bad_limit() { - let tmp = create_temp_dir("test_err_bad_limit_v2").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_LOW, "0").expect("set fixture for memory reservation"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_LOW, "0") + .expect("set fixture for memory reservation"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); let memory_limits = LinuxMemoryBuilder::default().limit(-2).build().unwrap(); - let result = Memory::apply(&tmp, &memory_limits); + let result = Memory::apply(tmp.path(), &memory_limits); assert!(result.is_err()); } #[test] fn test_err_bad_swap() { - let tmp = create_temp_dir("test_err_bad_swap_v2").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_LOW, "0").expect("set fixture for memory reservation"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_LOW, "0") + .expect("set fixture for memory reservation"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); let memory_limits = LinuxMemoryBuilder::default() .limit(512) @@ -244,19 +270,19 @@ mod tests { .build() .unwrap(); - let result = Memory::apply(&tmp, &memory_limits); + let result = Memory::apply(tmp.path(), &memory_limits); assert!(result.is_err()); } quickcheck! { fn property_test_set_memory(linux_memory: LinuxMemory) -> bool { - let tmp = create_temp_dir("property_test_set_memory_v2").expect("create temp directory for test"); - set_fixture(&tmp, CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); - set_fixture(&tmp, CGROUP_MEMORY_LOW, "0").expect("set fixture for memory reservation"); - set_fixture(&tmp, CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), CGROUP_MEMORY_MAX, "0").expect("set fixture for memory limit"); + set_fixture(tmp.path(), CGROUP_MEMORY_LOW, "0").expect("set fixture for memory reservation"); + set_fixture(tmp.path(), CGROUP_MEMORY_SWAP, "0").expect("set fixture for swap limit"); - let result = Memory::apply(&tmp, &linux_memory); + let result = Memory::apply(tmp.path(), &linux_memory); // we need to check for expected errors first and foremost or we'll get false negatives // later @@ -287,7 +313,7 @@ mod tests { } // check the limit file is set as expected - let limit_content = read_to_string(tmp.join(CGROUP_MEMORY_MAX)).expect("read memory limit to string"); + let limit_content = read_to_string(tmp.path().join(CGROUP_MEMORY_MAX)).expect("read memory limit to string"); let limit_check = match linux_memory.limit() { Some(limit) if limit == -1 => limit_content == "max", Some(limit) => limit_content == limit.to_string(), @@ -295,7 +321,7 @@ mod tests { }; // check the swap file is set as expected - let swap_content = read_to_string(tmp.join(CGROUP_MEMORY_SWAP)).expect("read swap limit to string"); + let swap_content = read_to_string(tmp.path().join(CGROUP_MEMORY_SWAP)).expect("read swap limit to string"); let swap_check = match linux_memory.swap() { Some(swap) if swap == -1 => swap_content == "max", Some(swap) => { @@ -319,29 +345,30 @@ mod tests { // check the resevation file is set as expected - let reservation_content = read_to_string(tmp.join(CGROUP_MEMORY_LOW)).expect("read memory reservation to string"); + let reservation_content = read_to_string(tmp.path().join(CGROUP_MEMORY_LOW)).expect("read memory reservation to string"); let reservation_check = match linux_memory.reservation() { Some(reservation) if reservation == -1 => reservation_content == "max", Some(reservation) => reservation_content == reservation.to_string(), None => reservation_content == "0", }; - println!("limit_check: {}", limit_check); - println!("swap_check: {}", swap_check); - println!("reservation_check: {}", reservation_check); + println!("limit_check: {limit_check}"); + println!("swap_check: {swap_check}"); + println!("reservation_check: {reservation_check}"); limit_check && swap_check && reservation_check } } #[test] fn test_get_memory_data() { - let tmp = create_temp_dir("test_stat_memory").expect("create test directory"); - set_fixture(&tmp, "memory.current", "12500\n").unwrap(); - set_fixture(&tmp, "memory.max", "25000\n").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), "memory.current", "12500\n").unwrap(); + set_fixture(tmp.path(), "memory.max", "25000\n").unwrap(); let events = ["slab 5", "anon 13", "oom 3"].join("\n"); - set_fixture(&tmp, "memory.events", &events).unwrap(); + set_fixture(tmp.path(), "memory.events", &events).unwrap(); - let actual = Memory::get_memory_data(&tmp, "memory", "oom").expect("get cgroup stats"); + let actual = + Memory::get_memory_data(tmp.path(), "memory", "oom").expect("get cgroup stats"); let expected = MemoryData { usage: 12500, limit: 25000, diff --git a/crates/libcgroups/src/v2/pids.rs b/crates/libcgroups/src/v2/pids.rs index 38d90df3b..a488671be 100644 --- a/crates/libcgroups/src/v2/pids.rs +++ b/crates/libcgroups/src/v2/pids.rs @@ -1,10 +1,8 @@ use std::path::Path; -use anyhow::{Context, Result}; - use crate::{ - common::{self, ControllerOpt}, - stats::{self, PidStats, StatsProvider}, + common::{self, ControllerOpt, WrappedIoError}, + stats::{self, PidStats, PidStatsError, StatsProvider}, }; use super::controller::Controller; @@ -13,25 +11,31 @@ use oci_spec::runtime::LinuxPids; pub struct Pids {} impl Controller for Pids { - fn apply(controller_opt: &ControllerOpt, cgroup_root: &std::path::Path) -> Result<()> { - log::debug!("Apply pids cgroup v2 config"); + type Error = WrappedIoError; + + fn apply( + controller_opt: &ControllerOpt, + cgroup_root: &std::path::Path, + ) -> Result<(), Self::Error> { + tracing::debug!("Apply pids cgroup v2 config"); if let Some(pids) = &controller_opt.resources.pids() { - Self::apply(cgroup_root, pids).context("failed to apply pids resource restrictions")?; + Self::apply(cgroup_root, pids)?; } Ok(()) } } impl StatsProvider for Pids { + type Error = PidStatsError; type Stats = PidStats; - fn stats(cgroup_path: &Path) -> Result { + fn stats(cgroup_path: &Path) -> Result { stats::pid_stats(cgroup_path) } } impl Pids { - fn apply(root_path: &Path, pids: &LinuxPids) -> Result<()> { + fn apply(root_path: &Path, pids: &LinuxPids) -> Result<(), WrappedIoError> { let limit = if pids.limit() > 0 { pids.limit().to_string() } else { @@ -44,35 +48,35 @@ impl Pids { #[cfg(test)] mod tests { use super::*; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use oci_spec::runtime::LinuxPidsBuilder; #[test] fn test_set_pids() { let pids_file_name = "pids.max"; - let tmp = create_temp_dir("v2_test_set_pids").expect("create temp directory for test"); - set_fixture(&tmp, pids_file_name, "1000").expect("Set fixture for 1000 pids"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), pids_file_name, "1000").expect("Set fixture for 1000 pids"); let pids = LinuxPidsBuilder::default().limit(1000).build().unwrap(); - Pids::apply(&tmp, &pids).expect("apply pids"); + Pids::apply(tmp.path(), &pids).expect("apply pids"); let content = - std::fs::read_to_string(tmp.join(pids_file_name)).expect("Read pids contents"); + std::fs::read_to_string(tmp.path().join(pids_file_name)).expect("Read pids contents"); assert_eq!(pids.limit().to_string(), content); } #[test] fn test_set_pids_max() { let pids_file_name = "pids.max"; - let tmp = create_temp_dir("v2_test_set_pids_max").expect("create temp directory for test"); - set_fixture(&tmp, pids_file_name, "0").expect("set fixture for 0 pids"); + let tmp = tempfile::tempdir().unwrap(); + set_fixture(tmp.path(), pids_file_name, "0").expect("set fixture for 0 pids"); let pids = LinuxPidsBuilder::default().limit(0).build().unwrap(); - Pids::apply(&tmp, &pids).expect("apply pids"); + Pids::apply(tmp.path(), &pids).expect("apply pids"); let content = - std::fs::read_to_string(tmp.join(pids_file_name)).expect("Read pids contents"); + std::fs::read_to_string(tmp.path().join(pids_file_name)).expect("Read pids contents"); assert_eq!("max".to_string(), content); } } diff --git a/crates/libcgroups/src/v2/unified.rs b/crates/libcgroups/src/v2/unified.rs index 22e541688..89aca1509 100644 --- a/crates/libcgroups/src/v2/unified.rs +++ b/crates/libcgroups/src/v2/unified.rs @@ -1,9 +1,18 @@ use std::{collections::HashMap, path::Path}; -use anyhow::{Context, Result}; - use super::controller_type::ControllerType; -use crate::common::{self, ControllerOpt}; +use crate::common::{self, ControllerOpt, WrappedIoError}; + +#[derive(thiserror::Error, Debug)] +pub enum V2UnifiedError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("subsystem {subsystem} is not available: {err}")] + SubsystemNotAvailable { + subsystem: String, + err: WrappedIoError, + }, +} pub struct Unified {} @@ -12,10 +21,9 @@ impl Unified { controller_opt: &ControllerOpt, cgroup_path: &Path, controllers: Vec, - ) -> Result<()> { + ) -> Result<(), V2UnifiedError> { if let Some(unified) = &controller_opt.resources.unified() { - Self::apply_impl(unified, cgroup_path, &controllers) - .context("failed to apply unified resource restrictions")?; + Self::apply_impl(unified, cgroup_path, &controllers)?; } Ok(()) @@ -25,30 +33,20 @@ impl Unified { unified: &HashMap, cgroup_path: &Path, controllers: &[ControllerType], - ) -> Result<()> { - { - log::debug!("Apply unified cgroup config"); - for (cgroup_file, value) in unified { - common::write_cgroup_file_str(cgroup_path.join(cgroup_file), value).map_err( - |e| { - let (subsystem, _) = cgroup_file - .split_once('.') - .with_context(|| { - format!("failed to split {} with {}", cgroup_file, ".") - }) - .unwrap(); - let context = if !controllers.iter().any(|c| c.to_string() == subsystem) { - format!( - "failed to set {} to {}: subsystem {} is not available", - cgroup_file, value, subsystem - ) - } else { - format!("failed to set {} to {}: {}", cgroup_file, value, e) - }; - - e.context(context) - }, - )?; + ) -> Result<(), V2UnifiedError> { + tracing::debug!("Apply unified cgroup config"); + for (cgroup_file, value) in unified { + if let Err(err) = common::write_cgroup_file_str(cgroup_path.join(cgroup_file), value) { + let (subsystem, _) = cgroup_file.split_once('.').unwrap_or((cgroup_file, "")); + + if controllers.iter().any(|c| c.to_string() == subsystem) { + Err(err)?; + } else { + return Err(V2UnifiedError::SubsystemNotAvailable { + subsystem: subsystem.into(), + err, + }); + } } } @@ -63,7 +61,7 @@ mod tests { use oci_spec::runtime::LinuxResourcesBuilder; - use crate::test::{create_temp_dir, set_fixture}; + use crate::test::set_fixture; use crate::v2::controller_type::ControllerType; use super::*; @@ -71,9 +69,9 @@ mod tests { #[test] fn test_set_unified() { // arrange - let tmp = create_temp_dir("test_set_unified").unwrap(); - let hugetlb_limit_path = set_fixture(&tmp, "hugetlb.1GB.limit_in_bytes", "").unwrap(); - let cpu_weight_path = set_fixture(&tmp, "cpu.weight", "").unwrap(); + let tmp = tempfile::tempdir().unwrap(); + let hugetlb_limit_path = set_fixture(tmp.path(), "hugetlb.1GB.limit_in_bytes", "").unwrap(); + let cpu_weight_path = set_fixture(tmp.path(), "cpu.weight", "").unwrap(); let unified = { let mut u = HashMap::new(); @@ -98,7 +96,7 @@ mod tests { }; // act - Unified::apply(&controller_opt, &tmp, vec![]).expect("apply unified"); + Unified::apply(&controller_opt, tmp.path(), vec![]).expect("apply unified"); // assert let hugetlb_limit = fs::read_to_string(hugetlb_limit_path).expect("read hugetlb limit"); @@ -110,8 +108,7 @@ mod tests { #[test] fn test_set_unified_failed_to_write_subsystem_not_enabled() { // arrange - let tmp = - create_temp_dir("test_set_unified_failed_to_write_subsystem_not_enabled").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let unified = { let mut u = HashMap::new(); @@ -136,7 +133,7 @@ mod tests { }; // act - let result = Unified::apply(&controller_opt, &tmp, vec![]); + let result = Unified::apply(&controller_opt, tmp.path(), vec![]); // assert assert!(result.is_err()); @@ -145,7 +142,7 @@ mod tests { #[test] fn test_set_unified_failed_to_write_subsystem_enabled() { // arrange - let tmp = create_temp_dir("test_set_unified_failed_to_write_subsystem_enabled").unwrap(); + let tmp = tempfile::tempdir().unwrap(); let unified = { let mut u = HashMap::new(); @@ -172,7 +169,7 @@ mod tests { // act let result = Unified::apply( &controller_opt, - &tmp, + tmp.path(), vec![ControllerType::HugeTlb, ControllerType::Cpu], ); diff --git a/crates/libcgroups/src/v2/util.rs b/crates/libcgroups/src/v2/util.rs index 9bed000ce..57746bc3a 100644 --- a/crates/libcgroups/src/v2/util.rs +++ b/crates/libcgroups/src/v2/util.rs @@ -1,32 +1,42 @@ use std::path::{Path, PathBuf}; -use anyhow::{anyhow, bail, Result}; -use procfs::process::Process; +use procfs::{process::Process, ProcError}; -use crate::common; +use crate::common::{self, WrappedIoError}; use super::controller_type::ControllerType; pub const CGROUP_CONTROLLERS: &str = "cgroup.controllers"; pub const CGROUP_SUBTREE_CONTROL: &str = "cgroup.subtree_control"; -pub fn get_unified_mount_point() -> Result { +#[derive(thiserror::Error, Debug)] +pub enum V2UtilError { + #[error("io error: {0}")] + WrappedIo(#[from] WrappedIoError), + #[error("proc error: {0}")] + Proc(#[from] ProcError), + #[error("could not find mountpoint for unified")] + CouldNotFind, + #[error("cannot get available controllers. {0} does not exist")] + DoesNotExist(PathBuf), +} + +pub fn get_unified_mount_point() -> Result { Process::myself()? .mountinfo()? .into_iter() .find(|m| m.fs_type == "cgroup2") .map(|m| m.mount_point) - .ok_or_else(|| anyhow!("could not find mountpoint for unified")) + .ok_or(V2UtilError::CouldNotFind) } -pub fn get_available_controllers>(root_path: P) -> Result> { +pub fn get_available_controllers>( + root_path: P, +) -> Result, V2UtilError> { let root_path = root_path.as_ref(); let controllers_path = root_path.join(CGROUP_CONTROLLERS); if !controllers_path.exists() { - bail!( - "cannot get available controllers. {:?} does not exist", - controllers_path - ) + return Err(V2UtilError::DoesNotExist(controllers_path)); } let mut controllers = Vec::new(); @@ -38,7 +48,7 @@ pub fn get_available_controllers>(root_path: P) -> Result controllers.push(ControllerType::Io), "memory" => controllers.push(ControllerType::Memory), "pids" => controllers.push(ControllerType::Pids), - tpe => log::warn!("Controller {} is not yet implemented.", tpe), + tpe => tracing::warn!("Controller {} is not yet implemented.", tpe), } } diff --git a/crates/libcontainer/Cargo.toml b/crates/libcontainer/Cargo.toml index 6fe7c428d..280050704 100644 --- a/crates/libcontainer/Cargo.toml +++ b/crates/libcontainer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libcontainer" -version = "0.0.4" +version = "0.1.0" description = "Library for container control" license-file = "../../LICENSE" repository = "https://github.com/containers/youki" @@ -12,45 +12,40 @@ rust-version = "1.58.1" keywords = ["youki", "container", "cgroups"] [features] -default = ["systemd", "v2", "v1"] -wasm-wasmer = ["wasmer", "wasmer-wasi"] -wasm-wasmedge = ["wasmedge-sdk/standalone"] -wasm-wasmtime = ["wasmtime", "wasmtime-wasi"] +default = ["systemd", "v2", "v1", "libseccomp"] +libseccomp = ["dep:libseccomp"] systemd = ["libcgroups/systemd", "v2"] v2 = ["libcgroups/v2"] v1 = ["libcgroups/v1"] cgroupsv2_devices = ["libcgroups/cgroupsv2_devices"] [dependencies] -anyhow = "1.0" -bitflags = "1.3.2" +bitflags = "2.3.3" caps = "0.5.5" -chrono = { version = "0.4", features = ["serde"] } -crossbeam-channel = "0.5" -fastrand = "^1.7.0" +chrono = { version = "0.4", default-features = false, features = ["clock", "serde"] } +fastrand = "^2.0.0" futures = { version = "0.3", features = ["thread-pool"] } -libc = "0.2.139" -log = "0.4" -mio = { version = "0.8.5", features = ["os-ext", "os-poll"] } -nix = "0.25.0" -oci-spec = { version = "^0.5.5", features = ["runtime"] } -path-clean = "0.1.0" -procfs = "0.14.2" +libc = "0.2.147" +nix = "0.26.2" +oci-spec = { version = "~0.6.2", features = ["runtime"] } +once_cell = "1.18.0" +procfs = "0.15.1" prctl = "1.0.0" -libcgroups = { version = "0.0.4", path = "../libcgroups", default-features = false } -libseccomp = { version = "0.3.0" } +libcgroups = { version = "0.1.0", path = "../libcgroups", default-features = false } +libseccomp = { version = "0.3.0", optional=true } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -syscalls = "0.6.7" rust-criu = "0.4.0" -wasmer = { version = "2.2.0", optional = true } -wasmer-wasi = { version = "2.3.0", optional = true } -wasmedge-sdk = { version = "0.7.1", optional = true } -wasmtime = {version = "4.0.0", optional = true } -wasmtime-wasi = {version = "4.0.0", optional = true } +clone3 = "0.2.3" +regex = "1.9.1" +thiserror = "1.0.44" +tracing = { version = "0.1.37", features = ["attributes"]} +safe-path = "0.1.0" [dev-dependencies] -oci-spec = { version = "^0.5.5", features = ["proptests", "runtime"] } +oci-spec = { version = "~0.6.2", features = ["proptests", "runtime"] } quickcheck = "1" -serial_test = "1.0.0" -rand = "0.8.5" +serial_test = "2.0.0" +tempfile = "3" +anyhow = "1.0" +rand = { version = "0.8.5" } diff --git a/crates/libcontainer/README.md b/crates/libcontainer/README.md index ec79d7057..b5ac248ed 100644 --- a/crates/libcontainer/README.md +++ b/crates/libcontainer/README.md @@ -1 +1,22 @@ # libcontainer + +### Building with musl + +In order to build with musl you must first remove the libseccomp dependency as it will reference shared libraries (`libdbus` and `libseccomp`) which cannot be built with musl. + +Do this by using adding flags to Cargo. Use the `--no-default-features` flag followed by `-F` and whatever features you intend to build with such as `v2` as defined in Cargo.toml under features section. + +Next you will also need the `+nightly` flags when building with `rustup` and `cargo`. + +```bash +# Add rustup +nightly musl to toolchain +rustup +nightly target add $(uname -m)-unknown-linux-musl + +# Build rustup +nightly stdlib with musl +rustup +nightly toolchain install nightly-$(uname -m)-unknown-linux-musl + +# Build musl standard library +cargo +nightly build -Zbuild-std --target $(uname -m)-unknown-linux-musl --no-default-features -F v2 + +cargo +nightly build --target $(uname -m)-unknown-linux-musl --no-default-features -F v2 +``` diff --git a/crates/libcontainer/src/apparmor.rs b/crates/libcontainer/src/apparmor.rs index 9bf44199e..86480608f 100644 --- a/crates/libcontainer/src/apparmor.rs +++ b/crates/libcontainer/src/apparmor.rs @@ -1,17 +1,28 @@ -use anyhow::{Context, Result}; +use crate::utils; use std::{ fs::{self}, path::Path, }; -use crate::utils; +#[derive(Debug, thiserror::Error)] +pub enum AppArmorError { + #[error("failed to apply AppArmor profile")] + ActivateProfile { + path: std::path::PathBuf, + profile: String, + source: std::io::Error, + }, + #[error(transparent)] + EnsureProcfs(#[from] utils::EnsureProcfsError), +} + +type Result = std::result::Result; const ENABLED_PARAMETER_PATH: &str = "/sys/module/apparmor/parameters/enabled"; /// Checks if AppArmor has been enabled on the system. -pub fn is_enabled() -> Result { - let aa_enabled = fs::read_to_string(ENABLED_PARAMETER_PATH) - .with_context(|| format!("could not read {}", ENABLED_PARAMETER_PATH))?; +pub fn is_enabled() -> std::result::Result { + let aa_enabled = fs::read_to_string(ENABLED_PARAMETER_PATH)?; Ok(aa_enabled.starts_with('Y')) } @@ -32,6 +43,10 @@ pub fn apply_profile(profile: &str) -> Result<()> { } fn activate_profile(path: &Path, profile: &str) -> Result<()> { - utils::ensure_procfs(path)?; - utils::write_file(path, format!("exec {}", profile)) + utils::ensure_procfs(path).map_err(AppArmorError::EnsureProcfs)?; + fs::write(path, format!("exec {profile}")).map_err(|err| AppArmorError::ActivateProfile { + path: path.to_owned(), + profile: profile.to_owned(), + source: err, + }) } diff --git a/crates/libcontainer/src/capabilities.rs b/crates/libcontainer/src/capabilities.rs index b289ec1bb..d3962877f 100644 --- a/crates/libcontainer/src/capabilities.rs +++ b/crates/libcontainer/src/capabilities.rs @@ -1,9 +1,8 @@ //! Handles Management of Capabilities -use crate::syscall::Syscall; +use crate::syscall::{Syscall, SyscallError}; use caps::Capability as CapsCapability; use caps::*; -use anyhow::Result; use oci_spec::runtime::{Capabilities, Capability as SpecCapability, LinuxCapabilities}; /// Converts a list of capability types to capabilities has set @@ -124,15 +123,20 @@ impl CapabilityExt for SpecCapability { /// reset capabilities of process calling this to effective capabilities /// effective capability set is set of capabilities used by kernel to perform checks /// see for more information -pub fn reset_effective(syscall: &S) -> Result<()> { - log::debug!("reset all caps"); - syscall.set_capability(CapSet::Effective, &caps::all())?; +pub fn reset_effective(syscall: &S) -> Result<(), SyscallError> { + tracing::debug!("reset all caps"); + // permitted capabilities are all the capabilities that we are allowed to acquire + let permitted = caps::read(None, CapSet::Permitted)?; + syscall.set_capability(CapSet::Effective, &permitted)?; Ok(()) } /// Drop any extra granted capabilities, and reset to defaults which are in oci specification -pub fn drop_privileges(cs: &LinuxCapabilities, syscall: &S) -> Result<()> { - log::debug!("dropping bounding capabilities to {:?}", cs.bounding()); +pub fn drop_privileges( + cs: &LinuxCapabilities, + syscall: &S, +) -> Result<(), SyscallError> { + tracing::debug!("dropping bounding capabilities to {:?}", cs.bounding()); if let Some(bounding) = cs.bounding() { syscall.set_capability(CapSet::Bounding, &to_set(bounding))?; } @@ -152,7 +156,7 @@ pub fn drop_privileges(cs: &LinuxCapabilities, syscall: &S) if let Some(ambient) = cs.ambient() { // check specifically for ambient, as those might not always be available if let Err(e) = syscall.set_capability(CapSet::Ambient, &to_set(ambient)) { - log::error!("failed to set ambient capabilities: {}", e); + tracing::error!("failed to set ambient capabilities: {}", e); } } @@ -170,13 +174,14 @@ mod tests { #[test] fn test_reset_effective() { let test_command = TestHelperSyscall::default(); + let permitted_caps = caps::read(None, CapSet::Permitted).unwrap(); assert!(reset_effective(&test_command).is_ok()); let set_capability_args: Vec<_> = test_command .get_set_capability_args() .into_iter() .map(|(_capset, caps)| caps) .collect(); - assert_eq!(set_capability_args, vec![caps::all()]); + assert_eq!(set_capability_args, vec![permitted_caps]); } #[test] @@ -557,7 +562,7 @@ mod tests { let tests = vec![ Testcase { - name: format!("all LinuxCapabilities fields with caps: {:?}", cps), + name: format!("all LinuxCapabilities fields with caps: {cps:?}"), input: LinuxCapabilitiesBuilder::default() .bounding(cps.clone().into_iter().collect::()) .effective(cps.clone().into_iter().collect::()) @@ -575,7 +580,7 @@ mod tests { ], }, Testcase { - name: format!("partial LinuxCapabilities fields with caps: {:?}", cps), + name: format!("partial LinuxCapabilities fields with caps: {cps:?}"), input: LinuxCapabilitiesBuilder::default() .bounding(cps.clone().into_iter().collect::()) .effective(cps.clone().into_iter().collect::()) @@ -591,7 +596,7 @@ mod tests { ], }, Testcase { - name: format!("empty LinuxCapabilities fields with caps: {:?}", cps), + name: format!("empty LinuxCapabilities fields with caps: {cps:?}"), input: LinuxCapabilitiesBuilder::default() .bounding(HashSet::new()) .effective(HashSet::new()) diff --git a/crates/libcontainer/src/channel.rs b/crates/libcontainer/src/channel.rs new file mode 100644 index 000000000..a772a10d3 --- /dev/null +++ b/crates/libcontainer/src/channel.rs @@ -0,0 +1,219 @@ +use nix::{ + sys::socket::{self, UnixAddr}, + unistd::{self}, +}; +use serde::{Deserialize, Serialize}; +use std::{ + io::{IoSlice, IoSliceMut}, + marker::PhantomData, + os::unix::prelude::RawFd, +}; + +#[derive(Debug, thiserror::Error)] +pub enum ChannelError { + #[error("failed unix syscalls")] + Nix(#[from] nix::Error), + #[error("failed serde serialization")] + Serde(#[from] serde_json::Error), + #[error("channel connection broken")] + BrokenChannel, +} +#[derive(Clone)] +pub struct Receiver { + receiver: RawFd, + phantom: PhantomData, +} + +#[derive(Clone)] +pub struct Sender { + sender: RawFd, + phantom: PhantomData, +} + +impl Sender +where + T: Serialize, +{ + fn send_iovec( + &mut self, + iov: &[IoSlice], + fds: Option<&[RawFd]>, + ) -> Result { + let cmsgs = if let Some(fds) = fds { + vec![socket::ControlMessage::ScmRights(fds)] + } else { + vec![] + }; + socket::sendmsg::(self.sender, iov, &cmsgs, socket::MsgFlags::empty(), None) + .map_err(|e| e.into()) + } + + fn send_slice_with_len( + &mut self, + data: &[u8], + fds: Option<&[RawFd]>, + ) -> Result { + let len = data.len() as u64; + // Here we prefix the length of the data onto the serialized data. + let iov = [ + IoSlice::new(unsafe { + std::slice::from_raw_parts( + (&len as *const u64) as *const u8, + std::mem::size_of::(), + ) + }), + IoSlice::new(data), + ]; + self.send_iovec(&iov[..], fds) + } + + pub fn send(&mut self, object: T) -> Result<(), ChannelError> { + let payload = serde_json::to_vec(&object)?; + self.send_slice_with_len(&payload, None)?; + + Ok(()) + } + + pub fn send_fds(&mut self, object: T, fds: &[RawFd]) -> Result<(), ChannelError> { + let payload = serde_json::to_vec(&object)?; + self.send_slice_with_len(&payload, Some(fds))?; + + Ok(()) + } + + pub fn close(&self) -> Result<(), ChannelError> { + Ok(unistd::close(self.sender)?) + } +} + +impl Receiver +where + T: serde::de::DeserializeOwned, +{ + fn peek_size_iovec(&mut self) -> Result { + let mut len: u64 = 0; + let mut iov = [IoSliceMut::new(unsafe { + std::slice::from_raw_parts_mut( + (&mut len as *mut u64) as *mut u8, + std::mem::size_of::(), + ) + })]; + let _ = + socket::recvmsg::(self.receiver, &mut iov, None, socket::MsgFlags::MSG_PEEK)?; + match len { + 0 => Err(ChannelError::BrokenChannel), + _ => Ok(len), + } + } + + fn recv_into_iovec( + &mut self, + iov: &mut [IoSliceMut], + ) -> Result<(usize, Option), ChannelError> + where + F: Default + AsMut<[RawFd]>, + { + let mut cmsgspace = nix::cmsg_space!(F); + let msg = socket::recvmsg::( + self.receiver, + iov, + Some(&mut cmsgspace), + socket::MsgFlags::MSG_CMSG_CLOEXEC, + )?; + + // Sending multiple SCM_RIGHTS message will led to platform dependent + // behavior, with some system choose to return EINVAL when sending or + // silently only process the first msg or send all of it. Here we assume + // there is only one SCM_RIGHTS message and will only process the first + // message. + let fds: Option = msg + .cmsgs() + .find_map(|cmsg| { + if let socket::ControlMessageOwned::ScmRights(fds) = cmsg { + Some(fds) + } else { + None + } + }) + .map(|fds| { + let mut fds_array: F = Default::default(); + >::as_mut(&mut fds_array).clone_from_slice(&fds); + fds_array + }); + + Ok((msg.bytes, fds)) + } + + fn recv_into_buf_with_len(&mut self) -> Result<(Vec, Option), ChannelError> + where + F: Default + AsMut<[RawFd]>, + { + let msg_len = self.peek_size_iovec()?; + let mut len: u64 = 0; + let mut buf = vec![0u8; msg_len as usize]; + let (bytes, fds) = { + let mut iov = [ + IoSliceMut::new(unsafe { + std::slice::from_raw_parts_mut( + (&mut len as *mut u64) as *mut u8, + std::mem::size_of::(), + ) + }), + IoSliceMut::new(&mut buf), + ]; + self.recv_into_iovec(&mut iov)? + }; + + match bytes { + 0 => Err(ChannelError::BrokenChannel), + _ => Ok((buf, fds)), + } + } + + // Recv the next message of type T. + pub fn recv(&mut self) -> Result { + let (buf, _) = self.recv_into_buf_with_len::<[RawFd; 0]>()?; + Ok(serde_json::from_slice(&buf[..])?) + } + + // Works similar to `recv`, but will look for fds sent by SCM_RIGHTS + // message. We use F as as `[RawFd; n]`, where `n` is the number of + // descriptors you want to receive. + pub fn recv_with_fds(&mut self) -> Result<(T, Option), ChannelError> + where + F: Default + AsMut<[RawFd]>, + { + let (buf, fds) = self.recv_into_buf_with_len::()?; + Ok((serde_json::from_slice(&buf[..])?, fds)) + } + + pub fn close(&self) -> Result<(), ChannelError> { + Ok(unistd::close(self.receiver)?) + } +} + +pub fn channel() -> Result<(Sender, Receiver), ChannelError> +where + T: for<'de> Deserialize<'de> + Serialize, +{ + let (os_sender, os_receiver) = unix_channel()?; + let receiver = Receiver { + receiver: os_receiver, + phantom: PhantomData, + }; + let sender = Sender { + sender: os_sender, + phantom: PhantomData, + }; + Ok((sender, receiver)) +} + +// Use socketpair as the underlying pipe. +fn unix_channel() -> Result<(RawFd, RawFd), ChannelError> { + Ok(socket::socketpair( + socket::AddressFamily::Unix, + socket::SockType::SeqPacket, + None, + socket::SockFlag::SOCK_CLOEXEC, + )?) +} diff --git a/crates/libcontainer/src/config.rs b/crates/libcontainer/src/config.rs index 657653c6d..b2db7d920 100644 --- a/crates/libcontainer/src/config.rs +++ b/crates/libcontainer/src/config.rs @@ -1,14 +1,39 @@ +use crate::utils; +use oci_spec::runtime::{Hooks, Spec}; +use serde::{Deserialize, Serialize}; use std::{ fs, + io::{BufReader, BufWriter, Write}, path::{Path, PathBuf}, }; -use anyhow::{Context, Result}; -use serde::{Deserialize, Serialize}; - -use oci_spec::runtime::{Hooks, Spec}; +#[derive(Debug, thiserror::Error)] +pub enum ConfigError { + #[error("failed to save config")] + SaveIO { + source: std::io::Error, + path: PathBuf, + }, + #[error("failed to save config")] + SaveEncode { + source: serde_json::Error, + path: PathBuf, + }, + #[error("failed to parse config")] + LoadIO { + source: std::io::Error, + path: PathBuf, + }, + #[error("failed to parse config")] + LoadParse { + source: serde_json::Error, + path: PathBuf, + }, + #[error("missing linux in spec")] + MissingLinux, +} -use crate::utils; +type Result = std::result::Result; const YOUKI_CONFIG_NAME: &str = "youki_config.json"; @@ -28,7 +53,7 @@ impl<'a> YoukiConfig { cgroup_path: utils::get_cgroup_path( spec.linux() .as_ref() - .context("no linux in spec")? + .ok_or(ConfigError::MissingLinux)? .cgroups_path(), container_id, rootless, @@ -37,24 +62,43 @@ impl<'a> YoukiConfig { } pub fn save>(&self, path: P) -> Result<()> { - let file = fs::File::create(path.as_ref().join(YOUKI_CONFIG_NAME))?; - serde_json::to_writer(&file, self)?; + let file = fs::File::create(path.as_ref().join(YOUKI_CONFIG_NAME)).map_err(|err| { + ConfigError::SaveIO { + source: err, + path: path.as_ref().to_owned(), + } + })?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, self).map_err(|err| ConfigError::SaveEncode { + source: err, + path: path.as_ref().to_owned(), + })?; + writer.flush().map_err(|err| ConfigError::SaveIO { + source: err, + path: path.as_ref().to_owned(), + })?; + Ok(()) } pub fn load>(path: P) -> Result { let path = path.as_ref(); - let file = fs::File::open(path.join(YOUKI_CONFIG_NAME))?; - let config = serde_json::from_reader(&file) - .with_context(|| format!("failed to load config from {:?}", path))?; + let file = + fs::File::open(path.join(YOUKI_CONFIG_NAME)).map_err(|err| ConfigError::LoadIO { + source: err, + path: path.to_owned(), + })?; + let reader = BufReader::new(file); + let config = serde_json::from_reader(reader).map_err(|err| ConfigError::LoadParse { + source: err, + path: path.to_owned(), + })?; Ok(config) } } #[cfg(test)] mod tests { - use crate::utils::create_temp_dir; - use super::*; use anyhow::Result; @@ -72,7 +116,7 @@ mod tests { #[test] fn test_config_save_and_load() -> Result<()> { let container_id = "sample"; - let tmp = create_temp_dir("test_config_save_and_load").expect("create test directory"); + let tmp = tempfile::tempdir().expect("create temp dir"); let spec = Spec::default(); let config = YoukiConfig::from_spec(&spec, container_id, false)?; config.save(&tmp)?; diff --git a/crates/libcontainer/src/container/builder.rs b/crates/libcontainer/src/container/builder.rs index 95a3989ba..e74641984 100644 --- a/crates/libcontainer/src/container/builder.rs +++ b/crates/libcontainer/src/container/builder.rs @@ -1,16 +1,18 @@ -use crate::{syscall::Syscall, utils::PathBufExt}; -use anyhow::{Context, Result}; +use crate::error::{ErrInvalidID, LibcontainerError}; +use crate::syscall::syscall::SyscallType; +use crate::utils::PathBufExt; +use crate::workload::{self, Executor}; use std::path::PathBuf; use super::{init_builder::InitContainerBuilder, tenant_builder::TenantContainerBuilder}; -pub struct ContainerBuilder<'a> { +pub struct ContainerBuilder { /// Id of the container pub(super) container_id: String, /// Root directory for container state pub(super) root_path: PathBuf, /// Interface to operating system primitives - pub(super) syscall: &'a dyn Syscall, + pub(super) syscall: SyscallType, /// File which will be used to communicate the pid of the /// container process to the higher level runtime pub(super) pid_file: Option, @@ -18,6 +20,9 @@ pub struct ContainerBuilder<'a> { pub(super) console_socket: Option, /// File descriptors to be passed into the container process pub(super) preserve_fds: i32, + /// The function that actually runs on the container init process. Default + /// is to execute the specified command in the oci spec. + pub(super) executor: Executor, } /// Builder that can be used to configure the common properties of @@ -27,16 +32,19 @@ pub struct ContainerBuilder<'a> { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; -/// use libcontainer::syscall::syscall::create_syscall; +/// use libcontainer::syscall::syscall::SyscallType; /// -/// ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) +/// ContainerBuilder::new( +/// "74f1a4cb3801".to_owned(), +/// SyscallType::default(), +/// ) /// .with_root_path("/run/containers/youki").expect("invalid root path") /// .with_pid_file(Some("/var/run/docker.pid")).expect("invalid pid file") /// .with_console_socket(Some("/var/run/docker/sock.tty")) /// .as_init("/var/run/docker/bundle") /// .build(); /// ``` -impl<'a> ContainerBuilder<'a> { +impl ContainerBuilder { /// Generates the base configuration for a container which can be /// transformed into either a init container or a tenant container /// @@ -44,13 +52,15 @@ impl<'a> ContainerBuilder<'a> { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; - /// use libcontainer::syscall::syscall::create_syscall; + /// use libcontainer::syscall::syscall::SyscallType; /// - /// let builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()); + /// let builder = ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ); /// ``` - pub fn new(container_id: String, syscall: &'a dyn Syscall) -> Self { + pub fn new(container_id: String, syscall: SyscallType) -> Self { let root_path = PathBuf::from("/run/youki"); - Self { container_id, root_path, @@ -58,7 +68,44 @@ impl<'a> ContainerBuilder<'a> { pid_file: None, console_socket: None, preserve_fds: 0, + executor: workload::default::get_executor(), + } + } + + /// validate_id checks if the supplied container ID is valid, returning + /// the ErrInvalidID in case it is not. + /// + /// The format of valid ID was never formally defined, instead the code + /// was modified to allow or disallow specific characters. + /// + /// Currently, a valid ID is a non-empty string consisting only of + /// the following characters: + /// - uppercase (A-Z) and lowercase (a-z) Latin letters; + /// - digits (0-9); + /// - underscore (_); + /// - plus sign (+); + /// - minus sign (-); + /// - period (.). + /// + /// In addition, IDs that can't be used to represent a file name + /// (such as . or ..) are rejected. + pub fn validate_id(self) -> Result { + let container_id = self.container_id.clone(); + if container_id.is_empty() { + Err(ErrInvalidID::Empty)?; + } + + if container_id == "." || container_id == ".." { + Err(ErrInvalidID::FileName)?; + } + + for c in container_id.chars() { + match c { + 'a'..='z' | 'A'..='Z' | '0'..='9' | '_' | '+' | '-' | '.' => (), + _ => Err(ErrInvalidID::InvalidChars(c))?, + } } + Ok(self) } /// Transforms this builder into a tenant builder @@ -66,15 +113,18 @@ impl<'a> ContainerBuilder<'a> { /// /// ```no_run /// # use libcontainer::container::builder::ContainerBuilder; - /// # use libcontainer::syscall::syscall::create_syscall; + /// # use libcontainer::syscall::syscall::SyscallType; /// - /// ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_tenant() /// .with_container_args(vec!["sleep".to_owned(), "9001".to_owned()]) /// .build(); /// ``` #[allow(clippy::wrong_self_convention)] - pub fn as_tenant(self) -> TenantContainerBuilder<'a> { + pub fn as_tenant(self) -> TenantContainerBuilder { TenantContainerBuilder::new(self) } @@ -83,15 +133,18 @@ impl<'a> ContainerBuilder<'a> { /// /// ```no_run /// # use libcontainer::container::builder::ContainerBuilder; - /// # use libcontainer::syscall::syscall::create_syscall; + /// # use libcontainer::syscall::syscall::SyscallType; /// - /// ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_init("/var/run/docker/bundle") /// .with_systemd(false) /// .build(); /// ``` #[allow(clippy::wrong_self_convention)] - pub fn as_init>(self, bundle: P) -> InitContainerBuilder<'a> { + pub fn as_init>(self, bundle: P) -> InitContainerBuilder { InitContainerBuilder::new(self, bundle.into()) } @@ -100,16 +153,20 @@ impl<'a> ContainerBuilder<'a> { /// /// ```no_run /// # use libcontainer::container::builder::ContainerBuilder; - /// # use libcontainer::syscall::syscall::create_syscall; + /// # use libcontainer::syscall::syscall::SyscallType; /// - /// ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .with_root_path("/run/containers/youki").expect("invalid root path"); /// ``` - pub fn with_root_path>(mut self, path: P) -> Result { + pub fn with_root_path>(mut self, path: P) -> Result { let path = path.into(); - self.root_path = path - .canonicalize_safely() - .with_context(|| format!("failed to canonicalize root path {path:?}"))?; + self.root_path = path.canonicalize_safely().map_err(|err| { + tracing::error!(?path, ?err, "failed to canonicalize root path"); + LibcontainerError::InvalidInput(format!("invalid root path {path:?}: {err:?}")) + })?; Ok(self) } @@ -120,20 +177,23 @@ impl<'a> ContainerBuilder<'a> { /// /// ```no_run /// # use libcontainer::container::builder::ContainerBuilder; - /// # use libcontainer::syscall::syscall::create_syscall; + /// # use libcontainer::syscall::syscall::SyscallType; /// - /// ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .with_pid_file(Some("/var/run/docker.pid")).expect("invalid pid file"); /// ``` - pub fn with_pid_file>(mut self, path: Option

) -> Result { - self.pid_file = match path { - Some(path) => { - let p = path.into(); - Some( - p.canonicalize_safely() - .with_context(|| format!("failed to canonicalize pid file {p:?}"))?, - ) - } + pub fn with_pid_file>( + mut self, + path: Option

, + ) -> Result { + self.pid_file = match path.map(|p| p.into()) { + Some(path) => Some(path.canonicalize_safely().map_err(|err| { + tracing::error!(?path, ?err, "failed to canonicalize pid file"); + LibcontainerError::InvalidInput(format!("invalid pid file path {path:?}: {err:?}")) + })?), None => None, }; @@ -146,9 +206,12 @@ impl<'a> ContainerBuilder<'a> { /// /// ```no_run /// # use libcontainer::container::builder::ContainerBuilder; - /// # use libcontainer::syscall::syscall::create_syscall; + /// # use libcontainer::syscall::syscall::SyscallType; /// - /// ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .with_console_socket(Some("/var/run/docker/sock.tty")); /// ``` pub fn with_console_socket>(mut self, path: Option

) -> Self { @@ -162,69 +225,109 @@ impl<'a> ContainerBuilder<'a> { /// /// ```no_run /// # use libcontainer::container::builder::ContainerBuilder; - /// # use libcontainer::syscall::syscall::create_syscall; + /// # use libcontainer::syscall::syscall::SyscallType; /// - /// ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .with_preserved_fds(5); /// ``` pub fn with_preserved_fds(mut self, preserved_fds: i32) -> Self { self.preserve_fds = preserved_fds; self } + /// Sets the number of additional file descriptors which will be passed into + /// the container process. + /// # Example + /// + /// ```no_run + /// # use libcontainer::container::builder::ContainerBuilder; + /// # use libcontainer::syscall::syscall::SyscallType; + /// # use libcontainer::workload::default::get_executor; + /// + /// ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) + /// .with_executor(get_executor()); + /// ``` + pub fn with_executor(mut self, executor: Executor) -> Self { + self.executor = executor; + self + } } #[cfg(test)] mod tests { - use crate::container::builder::ContainerBuilder; - use crate::syscall::syscall::create_syscall; - use crate::utils::TempDir; + use crate::{container::builder::ContainerBuilder, syscall::syscall::SyscallType}; use anyhow::{Context, Result}; use std::path::PathBuf; #[test] fn test_failable_functions() -> Result<()> { - let root_path_temp_dir = TempDir::new("root_path").context("failed to create temp dir")?; - let pid_file_temp_dir = TempDir::new("pid_file").context("failed to create temp dir")?; - let syscall = create_syscall(); + let root_path_temp_dir = tempfile::tempdir().context("failed to create temp dir")?; + let pid_file_temp_dir = tempfile::tempdir().context("failed to create temp dir")?; + let syscall = SyscallType::default(); - ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall.as_ref()) + ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall) .with_root_path(root_path_temp_dir.path())? - .with_pid_file(Some(pid_file_temp_dir.path()))? + .with_pid_file(Some(pid_file_temp_dir.path().join("fake.pid")))? .with_console_socket(Some("/var/run/docker/sock.tty")) .as_init("/var/run/docker/bundle"); // accept None pid file. - ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall.as_ref()) - .with_pid_file::(None)?; + ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall).with_pid_file::(None)?; // accept absolute root path which does not exist let abs_root_path = PathBuf::from("/not/existing/path"); - let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall.as_ref()) + let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall) .with_root_path(&abs_root_path) .context("build container")?; assert_eq!(path_builder.root_path, abs_root_path); // accept relative root path which does not exist let cwd = std::env::current_dir().context("get current dir")?; - let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall.as_ref()) + let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall) .with_root_path("./not/existing/path") .context("build container")?; assert_eq!(path_builder.root_path, cwd.join("not/existing/path")); // accept absolute pid path which does not exist let abs_pid_path = PathBuf::from("/not/existing/path"); - let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall.as_ref()) + let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall) .with_pid_file(Some(&abs_pid_path)) .context("build container")?; assert_eq!(path_builder.pid_file, Some(abs_pid_path)); // accept relative pid path which does not exist let cwd = std::env::current_dir().context("get current dir")?; - let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall.as_ref()) + let path_builder = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall) .with_pid_file(Some("./not/existing/path")) .context("build container")?; assert_eq!(path_builder.pid_file, Some(cwd.join("not/existing/path"))); Ok(()) } + + #[test] + fn test_validate_id() -> Result<()> { + let syscall = SyscallType::default(); + // validate container_id + let result = ContainerBuilder::new("$#".to_owned(), syscall).validate_id(); + assert!(result.is_err()); + + let result = ContainerBuilder::new(".".to_owned(), syscall).validate_id(); + assert!(result.is_err()); + + let result = ContainerBuilder::new("..".to_owned(), syscall).validate_id(); + assert!(result.is_err()); + + let result = ContainerBuilder::new("...".to_owned(), syscall).validate_id(); + assert!(result.is_ok()); + + let result = ContainerBuilder::new("74f1a4cb3801".to_owned(), syscall).validate_id(); + assert!(result.is_ok()); + Ok(()) + } } diff --git a/crates/libcontainer/src/container/builder_impl.rs b/crates/libcontainer/src/container/builder_impl.rs index c17558a55..67724d31b 100644 --- a/crates/libcontainer/src/container/builder_impl.rs +++ b/crates/libcontainer/src/container/builder_impl.rs @@ -1,31 +1,34 @@ use super::{Container, ContainerStatus}; use crate::{ + error::{LibcontainerError, MissingSpecError}, hooks, notify_socket::NotifyListener, process::{ self, args::{ContainerArgs, ContainerType}, + intel_rdt::delete_resctrl_subdirectory, }, rootless::Rootless, - syscall::Syscall, + syscall::syscall::SyscallType, utils, + workload::Executor, }; -use anyhow::{bail, Context, Result}; +use libcgroups::common::CgroupManager; use nix::unistd::Pid; use oci_spec::runtime::Spec; -use std::{fs, io::Write, os::unix::prelude::RawFd, path::PathBuf}; +use std::{fs, io::Write, os::unix::prelude::RawFd, path::PathBuf, rc::Rc}; -pub(super) struct ContainerBuilderImpl<'a> { +pub(super) struct ContainerBuilderImpl { /// Flag indicating if an init or a tenant container should be created pub container_type: ContainerType, /// Interface to operating system primitives - pub syscall: &'a dyn Syscall, + pub syscall: SyscallType, /// Flag indicating if systemd should be used for cgroup management pub use_systemd: bool, /// Id of the container pub container_id: String, - /// OCI complient runtime spec - pub spec: &'a Spec, + /// OCI compliant runtime spec + pub spec: Rc, /// Root filesystem of the container pub rootfs: PathBuf, /// File which will be used to communicate the pid of the @@ -34,7 +37,7 @@ pub(super) struct ContainerBuilderImpl<'a> { /// Socket to communicate the file descriptor of the ptty pub console_socket: Option, /// Options for rootless containers - pub rootless: Option>, + pub rootless: Option, /// Path to the Unix Domain Socket to communicate container start pub notify_path: PathBuf, /// Container state @@ -43,34 +46,43 @@ pub(super) struct ContainerBuilderImpl<'a> { pub preserve_fds: i32, /// If the container is to be run in detached mode pub detached: bool, + /// Default executes the specified execution of a generic command + pub executor: Executor, } -impl<'a> ContainerBuilderImpl<'a> { - pub(super) fn create(&mut self) -> Result { - match self.run_container().context("failed to create container") { +impl ContainerBuilderImpl { + pub(super) fn create(&mut self) -> Result { + match self.run_container() { Ok(pid) => Ok(pid), Err(outer) => { - if let Err(inner) = self.cleanup_container() { - return Err(outer.context(inner)); + // Only the init container should be cleaned up in the case of + // an error. + if matches!(self.container_type, ContainerType::InitContainer) { + self.cleanup_container()?; } + Err(outer) } } } - fn run_container(&mut self) -> Result { - let linux = self.spec.linux().as_ref().context("no linux in spec")?; + fn run_container(&mut self) -> Result { + let linux = self.spec.linux().as_ref().ok_or(MissingSpecError::Linux)?; let cgroups_path = utils::get_cgroup_path( linux.cgroups_path(), &self.container_id, self.rootless.is_some(), ); - let cmanager = libcgroups::common::create_cgroup_manager( - &cgroups_path, - self.use_systemd || self.rootless.is_some(), - &self.container_id, - )?; - let process = self.spec.process().as_ref().context("No process in spec")?; + let cgroup_config = libcgroups::common::CgroupConfig { + cgroup_path: cgroups_path, + systemd_cgroup: self.use_systemd || self.rootless.is_some(), + container_name: self.container_id.to_owned(), + }; + let process = self + .spec + .process() + .as_ref() + .ok_or(MissingSpecError::Process)?; if matches!(self.container_type, ContainerType::InitContainer) { if let Some(hooks) = self.spec.hooks() { @@ -81,8 +93,10 @@ impl<'a> ContainerBuilderImpl<'a> { // Need to create the notify socket before we pivot root, since the unix // domain socket used here is outside of the rootfs of container. During // exec, need to create the socket before we enter into existing mount - // namespace. - let notify_socket: NotifyListener = NotifyListener::new(&self.notify_path)?; + // namespace. We also need to create to socket before entering into the + // user namespace in the case that the path is located in paths only + // root can access. + let notify_listener = NotifyListener::new(&self.notify_path)?; // If Out-of-memory score adjustment is set in specification. set the score // value for the current process check @@ -94,9 +108,16 @@ impl<'a> ContainerBuilderImpl<'a> { // set). All children inherit their parent's oom_score_adj value on // fork(2) so this will always be propagated properly. if let Some(oom_score_adj) = process.oom_score_adj() { - log::debug!("Set OOM score to {}", oom_score_adj); - let mut f = fs::File::create("/proc/self/oom_score_adj")?; - f.write_all(oom_score_adj.to_string().as_bytes())?; + tracing::debug!("Set OOM score to {}", oom_score_adj); + let mut f = fs::File::create("/proc/self/oom_score_adj").map_err(|err| { + tracing::error!("failed to open /proc/self/oom_score_adj: {}", err); + LibcontainerError::OtherIO(err) + })?; + f.write_all(oom_score_adj.to_string().as_bytes()) + .map_err(|err| { + tracing::error!("failed to write to /proc/self/oom_score_adj: {}", err); + LibcontainerError::OtherIO(err) + })?; } // Make the process non-dumpable, to avoid various race conditions that @@ -117,23 +138,32 @@ impl<'a> ContainerBuilderImpl<'a> { let container_args = ContainerArgs { container_type: self.container_type, syscall: self.syscall, - spec: self.spec, - rootfs: &self.rootfs, + spec: Rc::clone(&self.spec), + rootfs: self.rootfs.to_owned(), console_socket: self.console_socket, - notify_socket, + notify_listener, preserve_fds: self.preserve_fds, - container: &self.container, - rootless: &self.rootless, - cgroup_manager: cmanager, + container: self.container.to_owned(), + rootless: self.rootless.to_owned(), + cgroup_config, detached: self.detached, + executor: self.executor.clone(), }; - let (intermediate, init_pid) = - process::container_main_process::container_main_process(&container_args)?; + let (init_pid, need_to_clean_up_intel_rdt_dir) = + process::container_main_process::container_main_process(&container_args).map_err( + |err| { + tracing::error!(?err, "failed to run container process"); + LibcontainerError::MainProcess(err) + }, + )?; // if file to write the pid to is specified, write pid of the child if let Some(pid_file) = &self.pid_file { - fs::write(pid_file, format!("{}", init_pid)).context("failed to write pid file")?; + fs::write(pid_file, format!("{init_pid}")).map_err(|err| { + tracing::error!("failed to write pid to file: {}", err); + LibcontainerError::OtherIO(err) + })?; } if let Some(container) = &mut self.container { @@ -142,43 +172,55 @@ impl<'a> ContainerBuilderImpl<'a> { .set_status(ContainerStatus::Created) .set_creator(nix::unistd::geteuid().as_raw()) .set_pid(init_pid.as_raw()) - .save() - .context("Failed to save container state")?; + .set_clean_up_intel_rdt_directory(need_to_clean_up_intel_rdt_dir) + .save()?; } - Ok(intermediate) + Ok(init_pid) } - fn cleanup_container(&self) -> Result<()> { - let linux = self.spec.linux().as_ref().context("no linux in spec")?; + fn cleanup_container(&self) -> Result<(), LibcontainerError> { + let linux = self.spec.linux().as_ref().ok_or(MissingSpecError::Linux)?; let cgroups_path = utils::get_cgroup_path( linux.cgroups_path(), &self.container_id, self.rootless.is_some(), ); - let cmanager = libcgroups::common::create_cgroup_manager( - &cgroups_path, - self.use_systemd || self.rootless.is_some(), - &self.container_id, - )?; + let cmanager = + libcgroups::common::create_cgroup_manager(libcgroups::common::CgroupConfig { + cgroup_path: cgroups_path, + systemd_cgroup: self.use_systemd || self.rootless.is_some(), + container_name: self.container_id.to_string(), + })?; let mut errors = Vec::new(); - if let Err(e) = cmanager.remove().context("failed to remove cgroup") { + + if let Err(e) = cmanager.remove() { + tracing::error!(error = ?e, "failed to remove cgroup manager"); errors.push(e.to_string()); } if let Some(container) = &self.container { + if let Some(true) = container.clean_up_intel_rdt_subdirectory() { + if let Err(e) = delete_resctrl_subdirectory(container.id()) { + tracing::error!(id = ?container.id(), error = ?e, "failed to delete resctrl subdirectory"); + errors.push(e.to_string()); + } + } + if container.root.exists() { - if let Err(e) = fs::remove_dir_all(&container.root) - .with_context(|| format!("could not delete {:?}", container.root)) - { + if let Err(e) = fs::remove_dir_all(&container.root) { + tracing::error!(container_root = ?container.root, error = ?e, "failed to delete container root"); errors.push(e.to_string()); } } } if !errors.is_empty() { - bail!("failed to cleanup container: {}", errors.join(";")); + return Err(LibcontainerError::Other(format!( + "failed to cleanup container: {}", + errors.join(";") + ))); } Ok(()) diff --git a/crates/libcontainer/src/container/container.rs b/crates/libcontainer/src/container/container.rs index 598277a8b..241114653 100644 --- a/crates/libcontainer/src/container/container.rs +++ b/crates/libcontainer/src/container/container.rs @@ -1,19 +1,17 @@ -use std::collections::HashMap; -use std::ffi::OsString; -use std::fs; -use std::path::{Path, PathBuf}; +use crate::config::YoukiConfig; +use crate::container::{ContainerStatus, State}; +use crate::error::LibcontainerError; +use crate::syscall::syscall::create_syscall; -use anyhow::Result; use chrono::DateTime; -use nix::unistd::Pid; - use chrono::Utc; +use nix::unistd::Pid; use procfs::process::Process; -use crate::config::YoukiConfig; -use crate::syscall::syscall::create_syscall; - -use crate::container::{ContainerStatus, State}; +use std::collections::HashMap; +use std::ffi::OsString; +use std::fs; +use std::path::{Path, PathBuf}; /// Structure representing the container data #[derive(Debug, Clone)] @@ -40,10 +38,17 @@ impl Container { pid: Option, bundle: &Path, container_root: &Path, - ) -> Result { - let container_root = fs::canonicalize(container_root)?; + ) -> Result { + let container_root = fs::canonicalize(container_root).map_err(|err| { + LibcontainerError::InvalidInput(format!( + "invalid container root {container_root:?}: {err:?}" + )) + })?; + let bundle = fs::canonicalize(bundle).map_err(|err| { + LibcontainerError::InvalidInput(format!("invalid bundle {bundle:?}: {err:?}")) + })?; + let state = State::new(container_id, status, pid, bundle); - let state = State::new(container_id, status, pid, fs::canonicalize(bundle)?); Ok(Self { state, root: container_root, @@ -117,15 +122,24 @@ impl Container { self } - pub fn systemd(&self) -> Option { + pub fn systemd(&self) -> bool { self.state.use_systemd } pub fn set_systemd(&mut self, should_use: bool) -> &mut Self { - self.state.use_systemd = Some(should_use); + self.state.use_systemd = should_use; + self + } + + pub fn set_clean_up_intel_rdt_directory(&mut self, clean_up: bool) -> &mut Self { + self.state.clean_up_intel_rdt_subdirectory = Some(clean_up); self } + pub fn clean_up_intel_rdt_subdirectory(&self) -> Option { + self.state.clean_up_intel_rdt_subdirectory + } + pub fn status(&self) -> ContainerStatus { self.state.status } @@ -142,7 +156,7 @@ impl Container { self } - pub fn refresh_status(&mut self) -> Result<()> { + pub fn refresh_status(&mut self) -> Result<(), LibcontainerError> { let new_status = match self.pid() { Some(pid) => { // Note that Process::new does not spawn a new process @@ -171,14 +185,14 @@ impl Container { Ok(()) } - pub fn refresh_state(&mut self) -> Result<&mut Self> { + pub fn refresh_state(&mut self) -> Result<&mut Self, LibcontainerError> { let state = State::load(&self.root)?; self.state = state; Ok(self) } - pub fn load(container_root: PathBuf) -> Result { + pub fn load(container_root: PathBuf) -> Result { let state = State::load(&container_root)?; let mut container = Self { state, @@ -188,12 +202,14 @@ impl Container { Ok(container) } - pub fn save(&self) -> Result<()> { - log::debug!("Save container status: {:?} in {:?}", self, self.root); - self.state.save(&self.root) + pub fn save(&self) -> Result<(), LibcontainerError> { + tracing::debug!("Save container status: {:?} in {:?}", self, self.root); + self.state.save(&self.root)?; + + Ok(()) } - pub fn spec(&self) -> Result { + pub fn spec(&self) -> Result { let spec = YoukiConfig::load(&self.root)?; Ok(spec) } @@ -213,8 +229,8 @@ pub struct CheckpointOptions { #[cfg(test)] mod tests { use super::*; - use crate::utils::create_temp_dir; use anyhow::Context; + use anyhow::Result; use serial_test::serial; #[test] @@ -270,11 +286,11 @@ mod tests { #[test] fn test_get_set_systemd() { let mut container = Container::default(); - assert_eq!(container.systemd(), None); + assert!(!container.systemd()); container.set_systemd(true); - assert_eq!(container.systemd(), Some(true)); + assert!(container.systemd()); container.set_systemd(false); - assert_eq!(container.systemd(), Some(false)); + assert!(!container.systemd()); } #[test] @@ -288,7 +304,7 @@ mod tests { #[test] #[serial] fn test_refresh_load_save_state() -> Result<()> { - let tmp_dir = create_temp_dir("test_refresh_load_save_state")?; + let tmp_dir = tempfile::tempdir().unwrap(); let mut container_1 = Container::new( "container_id_1", ContainerStatus::Created, @@ -313,7 +329,7 @@ mod tests { #[test] #[serial] fn test_get_spec() -> Result<()> { - let tmp_dir = create_temp_dir("test_get_spec")?; + let tmp_dir = tempfile::tempdir().unwrap(); use oci_spec::runtime::Spec; let spec = Spec::default(); let config = diff --git a/crates/libcontainer/src/container/container_checkpoint.rs b/crates/libcontainer/src/container/container_checkpoint.rs index c54d97b76..a60547348 100644 --- a/crates/libcontainer/src/container/container_checkpoint.rs +++ b/crates/libcontainer/src/container/container_checkpoint.rs @@ -1,6 +1,6 @@ use super::{Container, ContainerStatus}; use crate::container::container::CheckpointOptions; -use anyhow::{bail, Context, Result}; +use crate::error::LibcontainerError; use libcgroups::common::CgroupSetup::{Hybrid, Legacy}; #[cfg(feature = "v1")] @@ -14,19 +14,15 @@ const CRIU_CHECKPOINT_LOG_FILE: &str = "dump.log"; const DESCRIPTORS_JSON: &str = "descriptors.json"; impl Container { - pub fn checkpoint(&mut self, opts: &CheckpointOptions) -> Result<()> { - self.refresh_status() - .context("failed to refresh container status")?; + pub fn checkpoint(&mut self, opts: &CheckpointOptions) -> Result<(), LibcontainerError> { + self.refresh_status()?; // can_pause() checks if the container is running. That also works for // checkpoitning. is_running() would make more sense here, but let's // just reuse existing functions. if !self.can_pause() { - bail!( - "{} could not be checkpointed because it was {:?}", - self.id(), - self.status() - ); + tracing::error!(status = ?self.status(), id = ?self.id(), "cannot checkpoint container because it is not running"); + return Err(LibcontainerError::IncorrectStatus); } let mut criu = rust_criu::Criu::new().unwrap(); @@ -51,17 +47,18 @@ impl Container { criu.set_external_mount(dest.clone(), dest); } Some("cgroup") => { - match libcgroups::common::get_cgroup_setup() - .context("failed to determine cgroup setup")? - { + match libcgroups::common::get_cgroup_setup()? { // For v1 it is necessary to list all cgroup mounts as external mounts Legacy | Hybrid => { #[cfg(not(feature = "v1"))] panic!("libcontainer can't run in a Legacy or Hybrid cgroup setup without the v1 feature"); #[cfg(feature = "v1")] - for mp in libcgroups::v1::util::list_subsystem_mount_points() - .context("failed to get subsystem mount points")? - { + for mp in libcgroups::v1::util::list_subsystem_mount_points().map_err( + |err| { + tracing::error!(?err, "failed to get subsystem mount points"); + LibcontainerError::OtherCgroup(err.to_string()) + }, + )? { let cgroup_mount = mp .clone() .into_os_string() @@ -79,15 +76,17 @@ impl Container { } } - let directory = std::fs::File::open(&opts.image_path) - .with_context(|| format!("failed to open {:?}", opts.image_path))?; + let directory = std::fs::File::open(&opts.image_path).map_err(|err| { + tracing::error!(path = ?opts.image_path, ?err, "failed to open criu image directory"); + LibcontainerError::OtherIO(err) + })?; criu.set_images_dir_fd(directory.as_raw_fd()); // It seems to be necessary to be defined outside of 'if' to // keep the FD open until CRIU uses it. let work_dir: std::fs::File; if let Some(wp) = &opts.work_path { - work_dir = std::fs::File::open(wp)?; + work_dir = std::fs::File::open(wp).map_err(LibcontainerError::OtherIO)?; criu.set_work_dir_fd(work_dir.as_raw_fd()); } @@ -96,15 +95,21 @@ impl Container { // Remember original stdin, stdout, stderr for container restore. let mut descriptors = Vec::new(); for n in 0..3 { - let link_path = match fs::read_link(format!("/proc/{}/fd/{}", pid, n)) { + let link_path = match fs::read_link(format!("/proc/{pid}/fd/{n}")) { Ok(lp) => lp.into_os_string().into_string().unwrap(), Err(..) => "/dev/null".to_string(), }; descriptors.push(link_path); } let descriptors_json_path = opts.image_path.join(DESCRIPTORS_JSON); - let mut descriptors_json = File::create(descriptors_json_path)?; - write!(descriptors_json, "{}", serde_json::to_string(&descriptors)?)?; + let mut descriptors_json = + File::create(descriptors_json_path).map_err(LibcontainerError::OtherIO)?; + write!( + descriptors_json, + "{}", + serde_json::to_string(&descriptors).map_err(LibcontainerError::OtherSerialization)? + ) + .map_err(LibcontainerError::OtherIO)?; criu.set_log_file(CRIU_CHECKPOINT_LOG_FILE.to_string()); criu.set_log_level(4); @@ -123,24 +128,17 @@ impl Container { .into_string() .unwrap(), ); - if let Err(e) = criu.dump() { - bail!( - "checkpointing container {} failed with {:?}. Please check CRIU logfile {:}/{}", - self.id(), - e, - opts.work_path - .as_ref() - .unwrap_or(&opts.image_path) - .display(), - CRIU_CHECKPOINT_LOG_FILE - ); - } + + criu.dump().map_err(|err| { + tracing::error!(?err, id = ?self.id(), logfile = ?opts.image_path.join(CRIU_CHECKPOINT_LOG_FILE), "checkpointing container failed"); + LibcontainerError::Other(err.to_string()) + })?; if !opts.leave_running { self.set_status(ContainerStatus::Stopped).save()?; } - log::debug!("container {} checkpointed", self.id()); + tracing::debug!("container {} checkpointed", self.id()); Ok(()) } } diff --git a/crates/libcontainer/src/container/container_delete.rs b/crates/libcontainer/src/container/container_delete.rs index 2dc19d477..0322fd392 100644 --- a/crates/libcontainer/src/container/container_delete.rs +++ b/crates/libcontainer/src/container/container_delete.rs @@ -1,8 +1,8 @@ use super::{Container, ContainerStatus}; -use crate::config::YoukiConfig; use crate::hooks; -use anyhow::{bail, Context, Result}; -use libcgroups; +use crate::process::intel_rdt::delete_resctrl_subdirectory; +use crate::{config::YoukiConfig, error::LibcontainerError}; +use libcgroups::{self, common::CgroupManager}; use nix::sys::signal; use std::fs; @@ -13,10 +13,13 @@ impl Container { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; - /// use libcontainer::syscall::syscall::create_syscall; + /// use libcontainer::syscall::syscall::SyscallType; /// /// # fn main() -> anyhow::Result<()> { - /// let mut container = ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// let mut container = ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_init("/var/run/docker/bundle") /// .build()?; /// @@ -24,55 +27,98 @@ impl Container { /// # Ok(()) /// # } /// ``` - pub fn delete(&mut self, force: bool) -> Result<()> { - self.refresh_status() - .context("failed to refresh container status")?; - if self.can_kill() && force { - self.do_kill(signal::Signal::SIGKILL, true)?; - self.set_status(ContainerStatus::Stopped).save()?; + pub fn delete(&mut self, force: bool) -> Result<(), LibcontainerError> { + self.refresh_status()?; + + tracing::debug!("container status: {:?}", self.status()); + + // Check if container is allowed to be deleted based on container status. + match self.status() { + ContainerStatus::Stopped => {} + ContainerStatus::Created => { + // Here, we differ from the OCI spec, but matches the same + // behavior as `runc` and `crun`. The OCI spec does not allow + // deletion of status `created` without `force` flag. But both + // `runc` and `crun` allows deleting `created`. Therefore we + // decided to follow `runc` and `crun`. + self.do_kill(signal::Signal::SIGKILL, true)?; + self.set_status(ContainerStatus::Stopped).save()?; + } + ContainerStatus::Creating | ContainerStatus::Running | ContainerStatus::Paused => { + // Containers can't be deleted while in these status, unless + // force flag is set. In the force case, we need to clean up any + // processes associated with containers. + if force { + self.do_kill(signal::Signal::SIGKILL, true)?; + self.set_status(ContainerStatus::Stopped).save()?; + } else { + tracing::error!( + id = ?self.id(), + status = ?self.status(), + "delete requires the container state to be stopped or created", + ); + return Err(LibcontainerError::IncorrectStatus); + } + } + } + + // Once reached here, the container is verified that it can be deleted. + debug_assert!(self.status().can_delete()); + + if let Some(true) = &self.clean_up_intel_rdt_subdirectory() { + if let Err(err) = delete_resctrl_subdirectory(self.id()) { + tracing::warn!( + "failed to delete resctrl subdirectory due to: {err:?}, continue to delete" + ); + } } - log::debug!("container status: {:?}", self.status()); - if self.can_delete() { - if self.root.exists() { - let config = YoukiConfig::load(&self.root).with_context(|| { - format!("failed to load runtime spec for container {}", self.id()) - })?; - log::debug!("config: {:?}", config); - // remove the directory storing container state - log::debug!("remove dir {:?}", self.root); - fs::remove_dir_all(&self.root).with_context(|| { - format!("failed to remove container dir {}", self.root.display()) - })?; + if self.root.exists() { + match YoukiConfig::load(&self.root) { + Ok(config) => { + tracing::debug!("config: {:?}", config); - // remove the cgroup created for the container - // check https://man7.org/linux/man-pages/man7/cgroups.7.html - // creating and removing cgroups section for more information on cgroups - let use_systemd = self - .systemd() - .context("container state does not contain cgroup manager")?; - let cmanager = libcgroups::common::create_cgroup_manager( - &config.cgroup_path, - use_systemd, - self.id(), - ) - .context("failed to create cgroup manager")?; - cmanager.remove().with_context(|| { - format!("failed to remove cgroup {}", config.cgroup_path.display()) - })?; + // remove the cgroup created for the container + // check https://man7.org/linux/man-pages/man7/cgroups.7.html + // creating and removing cgroups section for more information on cgroups + let cmanager = libcgroups::common::create_cgroup_manager( + libcgroups::common::CgroupConfig { + cgroup_path: config.cgroup_path.to_owned(), + systemd_cgroup: self.systemd(), + container_name: self.id().to_string(), + }, + )?; + cmanager.remove().map_err(|err| { + tracing::error!(cgroup_path = ?config.cgroup_path, "failed to remove cgroup due to: {err:?}"); + err + })?; - if let Some(hooks) = config.hooks.as_ref() { - hooks::run_hooks(hooks.poststop().as_ref(), Some(self)) - .with_context(|| "failed to run post stop hooks")?; + if let Some(hooks) = config.hooks.as_ref() { + hooks::run_hooks(hooks.poststop().as_ref(), Some(self)).map_err(|err| { + tracing::error!(err = ?err, "failed to run post stop hooks"); + err + })?; + } + } + Err(err) => { + // There is a brief window where the container state is + // created, but the container config is not yet generated + // from the OCI spec. In this case, we assume as if we + // successfully deleted the config and moving on. + tracing::warn!( + "skipping loading youki config due to: {err:?}, continue to delete" + ); } } - Ok(()) - } else { - bail!( - "{} could not be deleted because it was {:?}", - self.id(), - self.status() - ) + + // remove the directory storing container state + tracing::debug!("remove dir {:?}", self.root); + fs::remove_dir_all(&self.root).map_err(|err| { + tracing::error!(?err, path = ?self.root, "failed to remove container dir"); + LibcontainerError::OtherIO(err) + })?; } + + Ok(()) } } diff --git a/crates/libcontainer/src/container/container_events.rs b/crates/libcontainer/src/container/container_events.rs index c736b834a..ed253be3a 100644 --- a/crates/libcontainer/src/container/container_events.rs +++ b/crates/libcontainer/src/container/container_events.rs @@ -1,7 +1,9 @@ use std::{thread, time::Duration}; +use crate::error::LibcontainerError; + use super::{Container, ContainerStatus}; -use anyhow::{bail, Context, Result}; +use libcgroups::common::CgroupManager; impl Container { /// Displays container events @@ -10,10 +12,13 @@ impl Container { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; - /// use libcontainer::syscall::syscall::create_syscall; + /// use libcontainer::syscall::syscall::SyscallType; /// /// # fn main() -> anyhow::Result<()> { - /// let mut container = ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// let mut container = ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_init("/var/run/docker/bundle") /// .build()?; /// @@ -21,28 +26,35 @@ impl Container { /// # Ok(()) /// # } /// ``` - pub fn events(&mut self, interval: u32, stats: bool) -> Result<()> { - self.refresh_status() - .context("failed to refresh container status")?; + pub fn events(&mut self, interval: u32, stats: bool) -> Result<(), LibcontainerError> { + self.refresh_status()?; if !self.state.status.eq(&ContainerStatus::Running) { - bail!("{} is not in running state", self.id()); + tracing::error!(id = ?self.id(), status = ?self.state.status, "container is not running"); + return Err(LibcontainerError::IncorrectStatus); } - let cgroups_path = self.spec()?.cgroup_path; - let use_systemd = self - .systemd() - .context("could not determine cgroup manager")?; - let cgroup_manager = - libcgroups::common::create_cgroup_manager(cgroups_path, use_systemd, self.id())?; + libcgroups::common::create_cgroup_manager(libcgroups::common::CgroupConfig { + cgroup_path: self.spec()?.cgroup_path, + systemd_cgroup: self.systemd(), + container_name: self.id().to_string(), + })?; match stats { true => { let stats = cgroup_manager.stats()?; - println!("{}", serde_json::to_string_pretty(&stats)?); + println!( + "{}", + serde_json::to_string_pretty(&stats) + .map_err(LibcontainerError::OtherSerialization)? + ); } false => loop { let stats = cgroup_manager.stats()?; - println!("{}", serde_json::to_string_pretty(&stats)?); + println!( + "{}", + serde_json::to_string_pretty(&stats) + .map_err(LibcontainerError::OtherSerialization)? + ); thread::sleep(Duration::from_secs(interval as u64)); }, } diff --git a/crates/libcontainer/src/container/container_kill.rs b/crates/libcontainer/src/container/container_kill.rs index a4e1e5a9e..6b5d08540 100644 --- a/crates/libcontainer/src/container/container_kill.rs +++ b/crates/libcontainer/src/container/container_kill.rs @@ -1,7 +1,6 @@ use super::{Container, ContainerStatus}; -use crate::signal::Signal; -use anyhow::{bail, Context, Result}; -use libcgroups::common::{create_cgroup_manager, get_cgroup_setup}; +use crate::{error::LibcontainerError, signal::Signal}; +use libcgroups::common::{get_cgroup_setup, CgroupManager}; use nix::sys::signal::{self}; impl Container { @@ -11,11 +10,14 @@ impl Container { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; - /// use libcontainer::syscall::syscall::create_syscall; + /// use libcontainer::syscall::syscall::SyscallType; /// use nix::sys::signal::Signal; /// /// # fn main() -> anyhow::Result<()> { - /// let mut container = ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// let mut container = ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_init("/var/run/docker/bundle") /// .build()?; /// @@ -23,28 +25,29 @@ impl Container { /// # Ok(()) /// # } /// ``` - pub fn kill>(&mut self, signal: S, all: bool) -> Result<()> { - self.refresh_status() - .context("failed to refresh container status")?; - if self.can_kill() { - self.do_kill(signal, all)?; - } else { - // just like runc, allow kill --all even if the container is stopped - if all && self.status() == ContainerStatus::Stopped { + pub fn kill>(&mut self, signal: S, all: bool) -> Result<(), LibcontainerError> { + self.refresh_status()?; + match self.can_kill() { + true => { + self.do_kill(signal, all)?; + } + false if all && self.status() == ContainerStatus::Stopped => { self.do_kill(signal, all)?; - } else { - bail!( - "{} could not be killed because it was {:?}", - self.id(), - self.status() - ) + } + false => { + tracing::error!(id = ?self.id(), status = ?self.status(), "cannot kill container due to incorrect state"); + return Err(LibcontainerError::IncorrectStatus); } } self.set_status(ContainerStatus::Stopped).save()?; Ok(()) } - pub(crate) fn do_kill>(&self, signal: S, all: bool) -> Result<()> { + pub(crate) fn do_kill>( + &self, + signal: S, + all: bool, + ) -> Result<(), LibcontainerError> { if all { self.kill_all_processes(signal) } else { @@ -52,18 +55,21 @@ impl Container { } } - fn kill_one_process>(&self, signal: S) -> Result<()> { + fn kill_one_process>(&self, signal: S) -> Result<(), LibcontainerError> { let signal = signal.into().into_raw(); let pid = self.pid().unwrap(); - log::debug!("kill signal {} to {}", signal, pid); - let res = signal::kill(pid, signal); + tracing::debug!("kill signal {} to {}", signal, pid); - match res { + match signal::kill(pid, signal) { + Ok(_) => {} Err(nix::errno::Errno::ESRCH) => { - /* the process does not exist, which is what we want */ + // the process does not exist, which is what we want + } + Err(err) => { + tracing::error!(id = ?self.id(), err = ?err, ?pid, ?signal, "failed to kill process"); + return Err(LibcontainerError::OtherSyscall(err)); } - _ => res?, } // For cgroup V1, a frozon process cannot respond to signals, @@ -72,12 +78,14 @@ impl Container { match get_cgroup_setup()? { libcgroups::common::CgroupSetup::Legacy | libcgroups::common::CgroupSetup::Hybrid => { - let cgroups_path = self.spec()?.cgroup_path; - let use_systemd = self - .systemd() - .context("container state does not contain cgroup manager")?; - let cmanger = create_cgroup_manager(&cgroups_path, use_systemd, self.id())?; - cmanger.freeze(libcgroups::common::FreezerState::Thawed)?; + let cmanager = libcgroups::common::create_cgroup_manager( + libcgroups::common::CgroupConfig { + cgroup_path: self.spec()?.cgroup_path, + systemd_cgroup: self.systemd(), + container_name: self.id().to_string(), + }, + )?; + cmanager.freeze(libcgroups::common::FreezerState::Thawed)?; } libcgroups::common::CgroupSetup::Unified => {} } @@ -85,41 +93,45 @@ impl Container { Ok(()) } - fn kill_all_processes>(&self, signal: S) -> Result<()> { + fn kill_all_processes>(&self, signal: S) -> Result<(), LibcontainerError> { let signal = signal.into().into_raw(); - let cgroups_path = self.spec()?.cgroup_path; - let use_systemd = self - .systemd() - .context("container state does not contain cgroup manager")?; - let cmanger = create_cgroup_manager(&cgroups_path, use_systemd, self.id())?; - let ret = cmanger.freeze(libcgroups::common::FreezerState::Frozen); - if ret.is_err() { - log::warn!( - "failed to freeze container {}, error: {}", - self.id(), - ret.unwrap_err() + let cmanager = + libcgroups::common::create_cgroup_manager(libcgroups::common::CgroupConfig { + cgroup_path: self.spec()?.cgroup_path, + systemd_cgroup: self.systemd(), + container_name: self.id().to_string(), + })?; + + if let Err(e) = cmanager.freeze(libcgroups::common::FreezerState::Frozen) { + tracing::warn!( + err = ?e, + id = ?self.id(), + "failed to freeze container", ); } - let pids = cmanger.get_all_pids()?; - pids.iter().try_for_each(|&pid| { - log::debug!("kill signal {} to {}", signal, pid); - let res = signal::kill(pid, signal); - match res { - Err(nix::errno::Errno::ESRCH) => { - /* the process does not exist, which is what we want */ - Ok(()) + + let pids = cmanager.get_all_pids()?; + pids.iter() + .try_for_each(|&pid| { + tracing::debug!("kill signal {} to {}", signal, pid); + let res = signal::kill(pid, signal); + match res { + Err(nix::errno::Errno::ESRCH) => { + // the process does not exist, which is what we want + Ok(()) + } + _ => res, } - _ => res, - } - })?; - let ret = cmanger.freeze(libcgroups::common::FreezerState::Thawed); - if ret.is_err() { - log::warn!( - "failed to thaw container {}, error: {}", - self.id(), - ret.unwrap_err() + }) + .map_err(LibcontainerError::OtherSyscall)?; + if let Err(err) = cmanager.freeze(libcgroups::common::FreezerState::Thawed) { + tracing::warn!( + err = ?err, + id = ?self.id(), + "failed to thaw container", ); } + Ok(()) } } diff --git a/crates/libcontainer/src/container/container_pause.rs b/crates/libcontainer/src/container/container_pause.rs index 9200c735e..8e7248e46 100644 --- a/crates/libcontainer/src/container/container_pause.rs +++ b/crates/libcontainer/src/container/container_pause.rs @@ -1,6 +1,7 @@ +use crate::error::LibcontainerError; + use super::{Container, ContainerStatus}; -use anyhow::{bail, Context, Result}; -use libcgroups::common::FreezerState; +use libcgroups::common::{CgroupManager, FreezerState}; impl Container { /// Suspends all processes within the container @@ -9,10 +10,13 @@ impl Container { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; - /// use libcontainer::syscall::syscall::create_syscall; + /// use libcontainer::syscall::syscall::SyscallType; /// /// # fn main() -> anyhow::Result<()> { - /// let mut container = ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// let mut container = ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_init("/var/run/docker/bundle") /// .build()?; /// @@ -20,30 +24,26 @@ impl Container { /// # Ok(()) /// # } /// ``` - pub fn pause(&mut self) -> Result<()> { - self.refresh_status() - .context("failed to refresh container status")?; + pub fn pause(&mut self) -> Result<(), LibcontainerError> { + self.refresh_status()?; if !self.can_pause() { - bail!( - "{} could not be paused because it was {:?}", - self.id(), - self.status() - ); + tracing::error!(status = ?self.status(), id = ?self.id(), "cannot pause container"); + return Err(LibcontainerError::IncorrectStatus); } - let cgroups_path = self.spec()?.cgroup_path; - let use_systemd = self - .systemd() - .context("container state does not contain cgroup manager")?; let cmanager = - libcgroups::common::create_cgroup_manager(cgroups_path, use_systemd, self.id())?; + libcgroups::common::create_cgroup_manager(libcgroups::common::CgroupConfig { + cgroup_path: self.spec()?.cgroup_path, + systemd_cgroup: self.systemd(), + container_name: self.id().to_string(), + })?; cmanager.freeze(FreezerState::Frozen)?; - log::debug!("saving paused status"); + tracing::debug!("saving paused status"); self.set_status(ContainerStatus::Paused).save()?; - log::debug!("container {} paused", self.id()); + tracing::debug!("container {} paused", self.id()); Ok(()) } } diff --git a/crates/libcontainer/src/container/container_resume.rs b/crates/libcontainer/src/container/container_resume.rs index 2e20b7f9b..544b8e8e5 100644 --- a/crates/libcontainer/src/container/container_resume.rs +++ b/crates/libcontainer/src/container/container_resume.rs @@ -1,7 +1,8 @@ +use crate::error::LibcontainerError; + use super::{Container, ContainerStatus}; -use anyhow::{bail, Context, Result}; -use libcgroups::common::FreezerState; +use libcgroups::common::{CgroupManager, FreezerState}; impl Container { /// Resumes all processes within the container @@ -10,10 +11,13 @@ impl Container { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; - /// use libcontainer::syscall::syscall::create_syscall; + /// use libcontainer::syscall::syscall::SyscallType; /// /// # fn main() -> anyhow::Result<()> { - /// let mut container = ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// let mut container = ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_init("/var/run/docker/bundle") /// .build()?; /// @@ -21,32 +25,28 @@ impl Container { /// # Ok(()) /// # } /// ``` - pub fn resume(&mut self) -> Result<()> { - self.refresh_status() - .context("failed to refresh container status")?; + pub fn resume(&mut self) -> Result<(), LibcontainerError> { + self.refresh_status()?; // check if container can be resumed : // for example, a running process cannot be resumed if !self.can_resume() { - bail!( - "{} could not be resumed because it was {:?}", - self.id(), - self.status() - ); + tracing::error!(status = ?self.status(), id = ?self.id(), "cannot resume container"); + return Err(LibcontainerError::IncorrectStatus); } - let cgroups_path = self.spec()?.cgroup_path; - let use_systemd = self - .systemd() - .context("container state does not contain cgroup manager")?; let cmanager = - libcgroups::common::create_cgroup_manager(cgroups_path, use_systemd, self.id())?; + libcgroups::common::create_cgroup_manager(libcgroups::common::CgroupConfig { + cgroup_path: self.spec()?.cgroup_path, + systemd_cgroup: self.systemd(), + container_name: self.id().to_string(), + })?; // resume the frozen container cmanager.freeze(FreezerState::Thawed)?; - log::debug!("saving running status"); + tracing::debug!("saving running status"); self.set_status(ContainerStatus::Running).save()?; - log::debug!("container {} resumed", self.id()); + tracing::debug!("container {} resumed", self.id()); Ok(()) } } diff --git a/crates/libcontainer/src/container/container_start.rs b/crates/libcontainer/src/container/container_start.rs index b05243e35..c6805511c 100644 --- a/crates/libcontainer/src/container/container_start.rs +++ b/crates/libcontainer/src/container/container_start.rs @@ -1,12 +1,12 @@ use crate::{ config::YoukiConfig, + error::LibcontainerError, hooks, notify_socket::{NotifySocket, NOTIFY_FILE}, }; use super::{Container, ContainerStatus}; -use anyhow::{bail, Context, Result}; -use nix::unistd; +use nix::{sys::signal, unistd}; impl Container { /// Starts a previously created container @@ -15,10 +15,13 @@ impl Container { /// /// ```no_run /// use libcontainer::container::builder::ContainerBuilder; - /// use libcontainer::syscall::syscall::create_syscall; + /// use libcontainer::syscall::syscall::SyscallType; /// /// # fn main() -> anyhow::Result<()> { - /// let mut container = ContainerBuilder::new("74f1a4cb3801".to_owned(), create_syscall().as_ref()) + /// let mut container = ContainerBuilder::new( + /// "74f1a4cb3801".to_owned(), + /// SyscallType::default(), + /// ) /// .as_init("/var/run/docker/bundle") /// .build()?; /// @@ -26,43 +29,57 @@ impl Container { /// # Ok(()) /// # } /// ``` - pub fn start(&mut self) -> Result<()> { - self.refresh_status() - .context("failed to refresh container status")?; + pub fn start(&mut self) -> Result<(), LibcontainerError> { + self.refresh_status()?; if !self.can_start() { - let err_msg = format!( - "{} could not be started because it was {:?}", - self.id(), - self.status() - ); - log::error!("{}", err_msg); - bail!(err_msg); + tracing::error!(status = ?self.status(), id = ?self.id(), "cannot start container due to incorrect state"); + return Err(LibcontainerError::IncorrectStatus); } - let config = YoukiConfig::load(&self.root) - .with_context(|| format!("failed to load runtime spec for container {}", self.id()))?; + let config = YoukiConfig::load(&self.root).map_err(|err| { + tracing::error!( + "failed to load runtime spec for container {}: {}", + self.id(), + err + ); + err + })?; if let Some(hooks) = config.hooks.as_ref() { // While prestart is marked as deprecated in the OCI spec, the docker and integration test still // uses it. #[allow(deprecated)] - hooks::run_hooks(hooks.prestart().as_ref(), Some(self)) - .with_context(|| "failed to run pre start hooks")?; + hooks::run_hooks(hooks.prestart().as_ref(), Some(self)).map_err(|err| { + tracing::error!("failed to run pre start hooks: {}", err); + // In the case where prestart hook fails, the runtime must + // stop the container before generating an error and exiting. + let _ = self.kill(signal::Signal::SIGKILL, true); + + err + })?; } - unistd::chdir(self.root.as_os_str())?; + unistd::chdir(self.root.as_os_str()).map_err(|err| { + tracing::error!("failed to change directory to container root: {}", err); + LibcontainerError::OtherSyscall(err) + })?; let mut notify_socket = NotifySocket::new(self.root.join(NOTIFY_FILE)); notify_socket.notify_container_start()?; self.set_status(ContainerStatus::Running) .save() - .with_context(|| format!("could not save state for container {}", self.id()))?; + .map_err(|err| { + tracing::error!(id = ?self.id(), ?err, "failed to save state for container"); + err + })?; // Run post start hooks. It runs after the container process is started. // It is called in the runtime namespace. if let Some(hooks) = config.hooks.as_ref() { - hooks::run_hooks(hooks.poststart().as_ref(), Some(self)) - .with_context(|| "failed to run post start hooks")?; + hooks::run_hooks(hooks.poststart().as_ref(), Some(self)).map_err(|err| { + tracing::error!("failed to run post start hooks: {}", err); + err + })?; } Ok(()) diff --git a/crates/libcontainer/src/container/init_builder.rs b/crates/libcontainer/src/container/init_builder.rs index 27e510043..38da5c9b2 100644 --- a/crates/libcontainer/src/container/init_builder.rs +++ b/crates/libcontainer/src/container/init_builder.rs @@ -1,15 +1,19 @@ -use anyhow::{bail, Context, Result}; use nix::unistd; use oci_spec::runtime::Spec; use rootless::Rootless; use std::{ fs, path::{Path, PathBuf}, + rc::Rc, }; use crate::{ - apparmor, config::YoukiConfig, notify_socket::NOTIFY_FILE, process::args::ContainerType, - rootless, tty, utils, + apparmor, + config::YoukiConfig, + error::{ErrInvalidSpec, LibcontainerError, MissingSpecError}, + notify_socket::NOTIFY_FILE, + process::args::ContainerType, + rootless, tty, }; use super::{ @@ -17,20 +21,22 @@ use super::{ }; // Builder that can be used to configure the properties of a new container -pub struct InitContainerBuilder<'a> { - base: ContainerBuilder<'a>, +pub struct InitContainerBuilder { + base: ContainerBuilder, bundle: PathBuf, use_systemd: bool, + detached: bool, } -impl<'a> InitContainerBuilder<'a> { +impl InitContainerBuilder { /// Generates the base configuration for a new container from which /// configuration methods can be chained - pub(super) fn new(builder: ContainerBuilder<'a>, bundle: PathBuf) -> Self { + pub(super) fn new(builder: ContainerBuilder, bundle: PathBuf) -> Self { Self { base: builder, bundle, use_systemd: true, + detached: true, } } @@ -40,24 +46,33 @@ impl<'a> InitContainerBuilder<'a> { self } + pub fn with_detach(mut self, detached: bool) -> Self { + self.detached = detached; + self + } + /// Creates a new container - pub fn build(self) -> Result { - let spec = self.load_spec().context("failed to load spec")?; - let container_dir = self - .create_container_dir() - .context("failed to create container dir")?; - - let mut container = self - .create_container_state(&container_dir) - .context("failed to create container state")?; + pub fn build(self) -> Result { + let spec = self.load_spec()?; + let container_dir = self.create_container_dir()?; + + let mut container = self.create_container_state(&container_dir)?; container .set_systemd(self.use_systemd) .set_annotations(spec.annotations().clone()); - unistd::chdir(&container_dir)?; + unistd::chdir(&container_dir).map_err(|err| { + tracing::error!( + ?container_dir, + ?err, + "failed to chdir into the container directory" + ); + LibcontainerError::OtherSyscall(err) + })?; let notify_path = container_dir.join(NOTIFY_FILE); // convert path of root file system of the container to absolute path - let rootfs = fs::canonicalize(spec.root().as_ref().context("no root in spec")?.path())?; + let rootfs = fs::canonicalize(spec.root().as_ref().ok_or(MissingSpecError::Root)?.path()) + .map_err(LibcontainerError::OtherIO)?; // if socket file path is given in commandline options, // get file descriptors of console socket @@ -73,9 +88,10 @@ impl<'a> InitContainerBuilder<'a> { let rootless = Rootless::new(&spec)?; let config = YoukiConfig::from_spec(&spec, container.id(), rootless.is_some())?; - config - .save(&container_dir) - .context("failed to save config")?; + config.save(&container_dir).map_err(|err| { + tracing::error!(?container_dir, "failed to save config: {}", err); + err + })?; let mut builder_impl = ContainerBuilderImpl { container_type: ContainerType::InitContainer, @@ -84,60 +100,94 @@ impl<'a> InitContainerBuilder<'a> { pid_file: self.base.pid_file, console_socket: csocketfd, use_systemd: self.use_systemd, - spec: &spec, + spec: Rc::new(spec), rootfs, rootless, notify_path, container: Some(container.clone()), preserve_fds: self.base.preserve_fds, - detached: false, // TODO this should be set properly based on how the command is given + detached: self.detached, + executor: self.base.executor, }; builder_impl.create()?; + container.refresh_state()?; Ok(container) } - fn create_container_dir(&self) -> Result { + fn create_container_dir(&self) -> Result { let container_dir = self.base.root_path.join(&self.base.container_id); - log::debug!("container directory will be {:?}", container_dir); + tracing::debug!("container directory will be {:?}", container_dir); if container_dir.exists() { - bail!("container {} already exists", self.base.container_id); + tracing::error!(id = self.base.container_id, dir = ?container_dir, "container already exists"); + return Err(LibcontainerError::Exist); } - utils::create_dir_all(&container_dir).context("failed to create container dir")?; + std::fs::create_dir_all(&container_dir).map_err(|err| { + tracing::error!( + ?container_dir, + "failed to create container directory: {}", + err + ); + LibcontainerError::OtherIO(err) + })?; Ok(container_dir) } - fn load_spec(&self) -> Result { + fn load_spec(&self) -> Result { let source_spec_path = self.bundle.join("config.json"); let mut spec = Spec::load(source_spec_path)?; - Self::validate_spec(&spec).context("failed to validate runtime spec")?; + Self::validate_spec(&spec)?; + + spec.canonicalize_rootfs(&self.bundle).map_err(|err| { + tracing::error!(bundle = ?self.bundle, "failed to canonicalize rootfs: {}", err); + err + })?; - spec.canonicalize_rootfs(&self.bundle) - .context("failed to canonicalize rootfs")?; Ok(spec) } - fn validate_spec(spec: &Spec) -> Result<()> { - if !spec.version().starts_with("1.0") { - bail!( - "runtime spec has incompatible version '{}'. Only 1.0.X is supported", + fn validate_spec(spec: &Spec) -> Result<(), LibcontainerError> { + let version = spec.version(); + if !version.starts_with("1.") { + tracing::error!( + "runtime spec has incompatible version '{}'. Only 1.X.Y is supported", spec.version() ); + Err(ErrInvalidSpec::UnsupportedVersion)?; } if let Some(process) = spec.process() { if let Some(profile) = process.apparmor_profile() { - if !apparmor::is_enabled()? { - bail!( - "apparmor profile {} is specified in runtime spec, \ - but apparmor is not activated on this system", - profile - ); + let apparmor_is_enabled = apparmor::is_enabled().map_err(|err| { + tracing::error!(?err, "failed to check if apparmor is enabled"); + LibcontainerError::OtherIO(err) + })?; + if !apparmor_is_enabled { + tracing::error!(?profile, + "apparmor profile exists in the spec, but apparmor is not activated on this system"); + Err(ErrInvalidSpec::AppArmorNotEnabled)?; + } + } + + if let Some(io_priority) = process.io_priority() { + let priority = io_priority.priority(); + let iop_class_res = serde_json::to_string(&io_priority.class()); + match iop_class_res { + Ok(iop_class) => { + if !(0..=7).contains(&priority) { + tracing::error!(?priority, "io priority '{}' not between 0 and 7 (inclusive), class '{}' not in (IO_PRIO_CLASS_RT,IO_PRIO_CLASS_BE,IO_PRIO_CLASS_IDLE)",priority, iop_class); + Err(ErrInvalidSpec::IoPriority)?; + } + } + Err(e) => { + tracing::error!(?priority, ?e, "failed to parse io priority class"); + Err(ErrInvalidSpec::IoPriority)?; + } } } } @@ -145,7 +195,7 @@ impl<'a> InitContainerBuilder<'a> { Ok(()) } - fn create_container_state(&self, container_dir: &Path) -> Result { + fn create_container_state(&self, container_dir: &Path) -> Result { let container = Container::new( &self.base.container_id, ContainerStatus::Creating, diff --git a/crates/libcontainer/src/container/state.rs b/crates/libcontainer/src/container/state.rs index c9949b0f9..5b6bd6f18 100644 --- a/crates/libcontainer/src/container/state.rs +++ b/crates/libcontainer/src/container/state.rs @@ -2,13 +2,13 @@ use std::collections::HashMap; use std::fmt::Display; use std::fs; -use std::io::BufReader; +use std::io::{BufReader, BufWriter, Write}; use std::path::PathBuf; use std::{fs::File, path::Path}; -use anyhow::{Context, Result}; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; +use tracing::instrument; /// Indicates status of the container #[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)] @@ -68,10 +68,31 @@ impl Display for ContainerStatus { Self::Paused => "Paused", }; - write!(f, "{}", print) + write!(f, "{print}") } } +#[derive(Debug, thiserror::Error)] +pub enum StateError { + #[error("failed to open container state file {state_file_path:?}")] + OpenStateFile { + state_file_path: PathBuf, + source: std::io::Error, + }, + #[error("failed to parse container state file {state_file_path:?}")] + ParseStateFile { + state_file_path: PathBuf, + source: serde_json::Error, + }, + #[error("failed to write container state file {state_file_path:?}")] + WriteStateFile { + state_file_path: PathBuf, + source: std::io::Error, + }, +} + +type Result = std::result::Result; + /// Stores the state information of the container #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "camelCase")] @@ -97,7 +118,9 @@ pub struct State { #[serde(skip_serializing_if = "Option::is_none")] pub creator: Option, // Specifies if systemd should be used to manage cgroups - pub use_systemd: Option, + pub use_systemd: bool, + // Specifies if the Intel RDT subdirectory needs be cleaned up. + pub clean_up_intel_rdt_subdirectory: Option, } impl State { @@ -118,10 +141,12 @@ impl State { annotations: Some(HashMap::default()), created: None, creator: None, - use_systemd: None, + use_systemd: false, + clean_up_intel_rdt_subdirectory: None, } } + #[instrument(level = "trace")] pub fn save(&self, container_root: &Path) -> Result<()> { let state_file_path = Self::file_path(container_root); let file = fs::OpenOptions::new() @@ -131,18 +156,70 @@ impl State { .create(true) .truncate(true) .open(&state_file_path) - .with_context(|| format!("failed to open {}", state_file_path.display()))?; - serde_json::to_writer(&file, self)?; + .map_err(|err| { + tracing::error!( + state_file_path = ?state_file_path, + err = %err, + "failed to open container state file", + ); + StateError::OpenStateFile { + state_file_path: state_file_path.to_owned(), + source: err, + } + })?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, self).map_err(|err| { + tracing::error!( + ?state_file_path, + %err, + "failed to parse container state file", + ); + StateError::ParseStateFile { + state_file_path: state_file_path.to_owned(), + source: err, + } + })?; + writer.flush().map_err(|err| { + tracing::error!( + ?state_file_path, + %err, + "failed to write container state file", + ); + StateError::WriteStateFile { + state_file_path: state_file_path.to_owned(), + source: err, + } + })?; + Ok(()) } pub fn load(container_root: &Path) -> Result { let state_file_path = Self::file_path(container_root); - let state_file = File::open(&state_file_path).with_context(|| { - format!("failed to open container state file {:?}", state_file_path) + let state_file = File::open(&state_file_path).map_err(|err| { + tracing::error!( + ?state_file_path, + %err, + "failed to open container state file", + ); + StateError::OpenStateFile { + state_file_path: state_file_path.to_owned(), + source: err, + } + })?; + + let state: Self = serde_json::from_reader(BufReader::new(state_file)).map_err(|err| { + tracing::error!( + ?state_file_path, + %err, + "failed to parse container state file", + ); + StateError::ParseStateFile { + state_file_path: state_file_path.to_owned(), + source: err, + } })?; - let state: Self = serde_json::from_reader(BufReader::new(state_file))?; Ok(state) } diff --git a/crates/libcontainer/src/container/tenant_builder.rs b/crates/libcontainer/src/container/tenant_builder.rs index 95972d40c..c6b8bbc97 100644 --- a/crates/libcontainer/src/container/tenant_builder.rs +++ b/crates/libcontainer/src/container/tenant_builder.rs @@ -1,4 +1,3 @@ -use anyhow::{bail, Context, Result}; use caps::Capability; use nix::fcntl::OFlag; use nix::unistd::{self, close, pipe2, read, Pid}; @@ -9,16 +8,19 @@ use oci_spec::runtime::{ }; use procfs::process::Namespace; +use std::rc::Rc; use std::{ collections::HashMap, convert::TryFrom, ffi::{OsStr, OsString}, fs, + io::BufReader, os::unix::prelude::RawFd, path::{Path, PathBuf}, str::FromStr, }; +use crate::error::{ErrInvalidSpec, LibcontainerError, MissingSpecError}; use crate::process::args::ContainerType; use crate::{capabilities::CapabilityExt, container::builder_impl::ContainerBuilderImpl}; use crate::{notify_socket::NotifySocket, rootless::Rootless, tty, utils}; @@ -31,8 +33,8 @@ const TENANT_TTY: &str = "tenant-tty-"; /// Builder that can be used to configure the properties of a process /// that will join an existing container sandbox -pub struct TenantContainerBuilder<'a> { - base: ContainerBuilder<'a>, +pub struct TenantContainerBuilder { + base: ContainerBuilder, env: HashMap, cwd: Option, args: Vec, @@ -42,11 +44,11 @@ pub struct TenantContainerBuilder<'a> { detached: bool, } -impl<'a> TenantContainerBuilder<'a> { +impl TenantContainerBuilder { /// Generates the base configuration for a process that will join /// an existing container sandbox from which configuration methods /// can be chained - pub(super) fn new(builder: ContainerBuilder<'a>) -> Self { + pub(super) fn new(builder: ContainerBuilder) -> Self { Self { base: builder, env: HashMap::new(), @@ -98,25 +100,19 @@ impl<'a> TenantContainerBuilder<'a> { } /// Joins an existing container - pub fn build(self) -> Result { - let container_dir = self - .lookup_container_dir() - .context("failed to look up container dir")?; - let container = self - .load_container_state(container_dir.clone()) - .context("failed to load container state")?; - let mut spec = self - .load_init_spec(&container) - .context("failed to load init spec")?; - self.adapt_spec_for_tenant(&mut spec, &container) - .context("failed to adapt spec for tenant")?; - - log::debug!("{:#?}", spec); - - unistd::chdir(&container_dir)?; + pub fn build(self) -> Result { + let container_dir = self.lookup_container_dir()?; + let container = self.load_container_state(container_dir.clone())?; + let mut spec = self.load_init_spec(&container)?; + self.adapt_spec_for_tenant(&mut spec, &container)?; + + tracing::debug!("{:#?}", spec); + + unistd::chdir(&container_dir).map_err(LibcontainerError::OtherSyscall)?; let notify_path = Self::setup_notify_listener(&container_dir)?; // convert path of root file system of the container to absolute path - let rootfs = fs::canonicalize(spec.root().as_ref().context("no root in spec")?.path())?; + let rootfs = fs::canonicalize(spec.root().as_ref().ok_or(MissingSpecError::Root)?.path()) + .map_err(LibcontainerError::OtherIO)?; // if socket file path is given in commandline options, // get file descriptors of console socket @@ -125,7 +121,8 @@ impl<'a> TenantContainerBuilder<'a> { let use_systemd = self.should_use_systemd(&container); let rootless = Rootless::new(&spec)?; - let (read_end, write_end) = pipe2(OFlag::O_CLOEXEC)?; + let (read_end, write_end) = + pipe2(OFlag::O_CLOEXEC).map_err(LibcontainerError::OtherSyscall)?; let mut builder_impl = ContainerBuilderImpl { container_type: ContainerType::TenantContainer { @@ -136,13 +133,14 @@ impl<'a> TenantContainerBuilder<'a> { pid_file: self.base.pid_file, console_socket: csocketfd, use_systemd, - spec: &spec, + spec: Rc::new(spec), rootfs, rootless, notify_path: notify_path.clone(), container: None, preserve_fds: self.base.preserve_fds, detached: self.detached, + executor: self.base.executor, }; let pid = builder_impl.create()?; @@ -150,18 +148,20 @@ impl<'a> TenantContainerBuilder<'a> { let mut notify_socket = NotifySocket::new(notify_path); notify_socket.notify_container_start()?; - close(write_end)?; + close(write_end).map_err(LibcontainerError::OtherSyscall)?; let mut err_str_buf = Vec::new(); loop { let mut buf = [0; 3]; - match read(read_end, &mut buf)? { + match read(read_end, &mut buf).map_err(LibcontainerError::OtherSyscall)? { 0 => { if err_str_buf.is_empty() { return Ok(pid); } else { - bail!(String::from_utf8_lossy(&err_str_buf).to_string()); + return Err(LibcontainerError::Other( + String::from_utf8_lossy(&err_str_buf).to_string(), + )); } } _ => { @@ -171,45 +171,83 @@ impl<'a> TenantContainerBuilder<'a> { } } - fn lookup_container_dir(&self) -> Result { + fn lookup_container_dir(&self) -> Result { let container_dir = self.base.root_path.join(&self.base.container_id); if !container_dir.exists() { - bail!("container {} does not exist", self.base.container_id); + tracing::error!(?container_dir, ?self.base.container_id, "container dir does not exist"); + return Err(LibcontainerError::NoDirectory); } Ok(container_dir) } - fn load_init_spec(&self, container: &Container) -> Result { + fn load_init_spec(&self, container: &Container) -> Result { let spec_path = container.bundle().join("config.json"); - let mut spec = Spec::load(&spec_path) - .with_context(|| format!("failed to load spec from {:?}", spec_path))?; + let mut spec = Spec::load(&spec_path).map_err(|err| { + tracing::error!(path = ?spec_path, ?err, "failed to load spec"); + err + })?; + + Self::validate_spec(&spec)?; - spec.canonicalize_rootfs(container.bundle()) - .context("failed to canonicalize rootfs")?; + spec.canonicalize_rootfs(container.bundle())?; Ok(spec) } - fn load_container_state(&self, container_dir: PathBuf) -> Result { + fn validate_spec(spec: &Spec) -> Result<(), LibcontainerError> { + let version = spec.version(); + if !version.starts_with("1.") { + tracing::error!( + "runtime spec has incompatible version '{}'. Only 1.X.Y is supported", + spec.version() + ); + Err(ErrInvalidSpec::UnsupportedVersion)?; + } + + if let Some(process) = spec.process() { + if let Some(io_priority) = process.io_priority() { + let priority = io_priority.priority(); + let iop_class_res = serde_json::to_string(&io_priority.class()); + match iop_class_res { + Ok(iop_class) => { + if !(0..=7).contains(&priority) { + tracing::error!(?priority, "io priority '{}' not between 0 and 7 (inclusive), class '{}' not in (IO_PRIO_CLASS_RT,IO_PRIO_CLASS_BE,IO_PRIO_CLASS_IDLE)",priority, iop_class); + Err(ErrInvalidSpec::IoPriority)?; + } + } + Err(e) => { + tracing::error!(?priority, ?e, "failed to parse io priority class"); + Err(ErrInvalidSpec::IoPriority)?; + } + } + } + } + + Ok(()) + } + + fn load_container_state(&self, container_dir: PathBuf) -> Result { let container = Container::load(container_dir)?; if !container.can_exec() { - bail!( - "Cannot exec as container is in state {}", - container.status() - ); + tracing::error!(status = ?container.status(), "cannot exec as container"); + return Err(LibcontainerError::IncorrectStatus); } Ok(container) } - fn adapt_spec_for_tenant(&self, spec: &mut Spec, container: &Container) -> Result<()> { + fn adapt_spec_for_tenant( + &self, + spec: &mut Spec, + container: &Container, + ) -> Result<(), LibcontainerError> { let process = if let Some(process) = &self.process { self.get_process(process)? } else { let mut process_builder = ProcessBuilder::default() .args(self.get_args()?) - .env(self.get_environment()?); + .env(self.get_environment()); if let Some(cwd) = self.get_working_dir()? { process_builder = process_builder.cwd(cwd); } @@ -226,7 +264,9 @@ impl<'a> TenantContainerBuilder<'a> { }; if container.pid().is_none() { - bail!("could not retrieve container init pid"); + return Err(LibcontainerError::Other( + "could not retrieve container init pid".into(), + )); } let init_process = procfs::process::Process::new(container.pid().unwrap().as_raw())?; @@ -237,53 +277,54 @@ impl<'a> TenantContainerBuilder<'a> { Ok(()) } - fn get_process(&self, process: &Path) -> Result { + fn get_process(&self, process: &Path) -> Result { if !process.exists() { - bail!( - "Process.json file does not exist at specified path {}", - process.display() - ) + tracing::error!(?process, "process.json file does not exist"); + return Err(LibcontainerError::Other( + "process.json file does not exist".into(), + )); } - let process = utils::open(process)?; - let process_spec = serde_json::from_reader(process)?; + let process = utils::open(process).map_err(LibcontainerError::OtherIO)?; + let reader = BufReader::new(process); + let process_spec = + serde_json::from_reader(reader).map_err(LibcontainerError::OtherSerialization)?; Ok(process_spec) } - fn get_working_dir(&self) -> Result> { + fn get_working_dir(&self) -> Result, LibcontainerError> { if let Some(cwd) = &self.cwd { if cwd.is_relative() { - bail!( - "current working directory must be an absolute path, but is {:?}", - cwd - ); + tracing::error!(?cwd, "current working directory must be an absolute path"); + return Err(LibcontainerError::Other( + "current working directory must be an absolute path".into(), + )); } return Ok(Some(cwd.into())); } Ok(None) } - fn get_args(&self) -> Result> { + fn get_args(&self) -> Result, LibcontainerError> { if self.args.is_empty() { - bail!("container command was not specified") + Err(MissingSpecError::Args)?; } Ok(self.args.clone()) } - fn get_environment(&self) -> Result> { - Ok(self - .env - .iter() - .map(|(k, v)| format!("{}={}", k, v)) - .collect()) + fn get_environment(&self) -> Vec { + self.env.iter().map(|(k, v)| format!("{k}={v}")).collect() } fn get_no_new_privileges(&self) -> Option { self.no_new_privs } - fn get_capabilities(&self, spec: &Spec) -> Result> { + fn get_capabilities( + &self, + spec: &Spec, + ) -> Result, LibcontainerError> { if !self.capabilities.is_empty() { let mut caps: Vec = Vec::with_capacity(self.capabilities.len()); for cap in &self.capabilities { @@ -296,7 +337,7 @@ impl<'a> TenantContainerBuilder<'a> { if let Some(spec_caps) = spec .process() .as_ref() - .context("no process in spec")? + .ok_or(MissingSpecError::Process)? .capabilities() { let mut capabilities_builder = LinuxCapabilitiesBuilder::default(); @@ -358,7 +399,7 @@ impl<'a> TenantContainerBuilder<'a> { fn get_namespaces( &self, init_namespaces: HashMap, - ) -> Result> { + ) -> Result, LibcontainerError> { let mut tenant_namespaces = Vec::with_capacity(init_namespaces.len()); for &ns_type in NAMESPACE_TYPES { @@ -377,21 +418,17 @@ impl<'a> TenantContainerBuilder<'a> { } fn should_use_systemd(&self, container: &Container) -> bool { - if let Some(use_systemd) = container.systemd() { - return use_systemd; - } - - false + container.systemd() } - fn setup_notify_listener(container_dir: &Path) -> Result { + fn setup_notify_listener(container_dir: &Path) -> Result { let notify_name = Self::generate_name(container_dir, TENANT_NOTIFY); let socket_path = container_dir.join(notify_name); Ok(socket_path) } - fn setup_tty_socket(&self, container_dir: &Path) -> Result> { + fn setup_tty_socket(&self, container_dir: &Path) -> Result, LibcontainerError> { let tty_name = Self::generate_name(container_dir, TENANT_TTY); let csocketfd = if let Some(console_socket) = &self.base.console_socket { Some(tty::setup_console_socket( @@ -409,7 +446,7 @@ impl<'a> TenantContainerBuilder<'a> { fn generate_name(dir: &Path, prefix: &str) -> String { loop { let rand = fastrand::i32(..); - let name = format!("{}{:x}.sock", prefix, rand); + let name = format!("{prefix}{rand:x}.sock"); if !dir.join(&name).exists() { return name; } diff --git a/crates/libcontainer/src/error.rs b/crates/libcontainer/src/error.rs new file mode 100644 index 000000000..6e1dfeff1 --- /dev/null +++ b/crates/libcontainer/src/error.rs @@ -0,0 +1,93 @@ +#[derive(Debug, thiserror::Error)] +pub enum MissingSpecError { + #[error("missing process in spec")] + Process, + #[error("missing linux in spec")] + Linux, + #[error("missing args in the process spec")] + Args, + #[error("missing root in the spec")] + Root, +} + +#[derive(Debug, thiserror::Error)] +pub enum LibcontainerError { + #[error("failed to perform operation due to incorrect container status")] + IncorrectStatus, + #[error("container already exists")] + Exist, + #[error("container state directory does not exist")] + NoDirectory, + #[error("invalid input")] + InvalidInput(String), + #[error("requires at least one executors")] + NoExecutors, + + // Invalid inputs + #[error(transparent)] + InvalidID(#[from] ErrInvalidID), + #[error(transparent)] + MissingSpec(#[from] MissingSpecError), + #[error("invalid runtime spec")] + InvalidSpec(#[from] ErrInvalidSpec), + + // Errors from submodules and other errors + #[error(transparent)] + Tty(#[from] crate::tty::TTYError), + #[error(transparent)] + Rootless(#[from] crate::rootless::RootlessError), + #[error(transparent)] + NotifyListener(#[from] crate::notify_socket::NotifyListenerError), + #[error(transparent)] + Config(#[from] crate::config::ConfigError), + #[error(transparent)] + Hook(#[from] crate::hooks::HookError), + #[error(transparent)] + State(#[from] crate::container::state::StateError), + #[error("oci spec error")] + Spec(#[from] oci_spec::OciSpecError), + #[error(transparent)] + MainProcess(#[from] crate::process::container_main_process::ProcessError), + #[error(transparent)] + Procfs(#[from] procfs::ProcError), + #[error(transparent)] + Capabilities(#[from] caps::errors::CapsError), + #[error(transparent)] + CgroupManager(#[from] libcgroups::common::AnyManagerError), + #[error(transparent)] + CgroupCreate(#[from] libcgroups::common::CreateCgroupSetupError), + #[error(transparent)] + CgroupGet(#[from] libcgroups::common::GetCgroupSetupError), + + // Catch all errors that are not covered by the above + #[error("syscall error")] + OtherSyscall(#[source] nix::Error), + #[error("io error")] + OtherIO(#[source] std::io::Error), + #[error("serialization error")] + OtherSerialization(#[source] serde_json::Error), + #[error("{0}")] + OtherCgroup(String), + #[error("{0}")] + Other(String), +} + +#[derive(Debug, thiserror::Error)] +pub enum ErrInvalidID { + #[error("container id can't be empty")] + Empty, + #[error("container id contains invalid characters: {0}")] + InvalidChars(char), + #[error("container id can't be used to represent a file name (such as . or ..)")] + FileName, +} + +#[derive(Debug, thiserror::Error)] +pub enum ErrInvalidSpec { + #[error("runtime spec has incompatible version. Only 1.X.Y is supported")] + UnsupportedVersion, + #[error("apparmor is specified but not enabled on this system")] + AppArmorNotEnabled, + #[error("invalid io priority or class.")] + IoPriority, +} diff --git a/crates/libcontainer/src/hooks.rs b/crates/libcontainer/src/hooks.rs index 817459535..f12cc6039 100644 --- a/crates/libcontainer/src/hooks.rs +++ b/crates/libcontainer/src/hooks.rs @@ -1,29 +1,38 @@ -use anyhow::{bail, Context, Result}; use nix::{sys::signal, unistd::Pid}; use oci_spec::runtime::Hook; use std::{ - collections::HashMap, fmt, io::ErrorKind, io::Write, os::unix::prelude::CommandExt, process, + collections::HashMap, + io::ErrorKind, + io::Write, + os::unix::prelude::CommandExt, + process::{self}, thread, time, }; use crate::{container::Container, utils}; -// A special error used to signal a timeout. We want to differentiate between a -// timeout vs. other error. -#[derive(Debug)] -pub struct HookTimeoutError; -impl std::error::Error for HookTimeoutError {} -impl fmt::Display for HookTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - "hook command timeout".fmt(f) - } + +#[derive(Debug, thiserror::Error)] +pub enum HookError { + #[error("failed to execute hook command")] + CommandExecute(#[source] std::io::Error), + #[error("failed to encode container state")] + EncodeContainerState(#[source] serde_json::Error), + #[error("hook command exited with non-zero exit code: {0}")] + NonZeroExitCode(i32), + #[error("hook command was killed by a signal")] + Killed, + #[error("failed to execute hook command due to a timeout")] + Timeout, + #[error("container state is required to run hook")] + MissingContainerState, + #[error("failed to write container state to stdin")] + WriteContainerState(#[source] std::io::Error), } -pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Result<()> { - if container.is_none() { - bail!("container state is required to run hook"); - } +type Result = std::result::Result; - let state = &container.unwrap().state; +pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Result<()> { + let state = &(container.ok_or(HookError::MissingContainerState)?.state); if let Some(hooks) = hooks { for hook in hooks { @@ -31,11 +40,11 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re // Based on OCI spec, the first argument of the args vector is the // arg0, which can be different from the path. For example, path // may be "/usr/bin/true" and arg0 is set to "true". However, rust - // command differenciates arg0 from args, where rust command arg + // command differentiates arg0 from args, where rust command arg // doesn't include arg0. So we have to make the split arg0 from the // rest of args. if let Some((arg0, args)) = hook.args().as_ref().and_then(|a| a.split_first()) { - log::debug!("run_hooks arg0: {:?}, args: {:?}", arg0, args); + tracing::debug!("run_hooks arg0: {:?}, args: {:?}", arg0, args); hook_command.arg0(arg0).args(args) } else { hook_command.arg0(&hook.path().display().to_string()) @@ -46,14 +55,14 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re } else { HashMap::new() }; - log::debug!("run_hooks envs: {:?}", envs); + tracing::debug!("run_hooks envs: {:?}", envs); let mut hook_process = hook_command .env_clear() .envs(envs) .stdin(process::Stdio::piped()) .spawn() - .with_context(|| "Failed to execute hook")?; + .map_err(HookError::CommandExecute)?; let hook_process_pid = Pid::from_raw(hook_process.id() as i32); // Based on the OCI spec, we need to pipe the container state into // the hook command through stdin. @@ -67,13 +76,13 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re // error, in the case that the hook command is waiting for us to // write to stdin. let encoded_state = - serde_json::to_string(state).context("failed to encode container state")?; + serde_json::to_string(state).map_err(HookError::EncodeContainerState)?; if let Err(e) = stdin.write_all(encoded_state.as_bytes()) { if e.kind() != ErrorKind::BrokenPipe { // Not a broken pipe. The hook command may be waiting // for us. let _ = signal::kill(hook_process_pid, signal::Signal::SIGKILL); - bail!("failed to write container state to stdin: {:?}", e); + return Err(HookError::WriteContainerState(e)); } } } @@ -89,18 +98,18 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re // use pid to identify the process and send a kill signal. This // is what the Command.kill() does under the hood anyway. When // timeout, we have to kill the process and clean up properly. - let (s, r) = crossbeam_channel::unbounded(); + let (s, r) = std::sync::mpsc::channel(); thread::spawn(move || { let res = hook_process.wait(); let _ = s.send(res); }); match r.recv_timeout(time::Duration::from_secs(timeout_sec as u64)) { Ok(res) => res, - Err(crossbeam_channel::RecvTimeoutError::Timeout) => { + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { // Kill the process. There is no need to further clean // up because we will be error out. let _ = signal::kill(hook_process_pid, signal::Signal::SIGKILL); - return Err(HookTimeoutError.into()); + return Err(HookError::Timeout); } Err(_) => { unreachable!(); @@ -112,21 +121,12 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re match res { Ok(exit_status) => match exit_status.code() { - Some(0) => {} - Some(exit_code) => { - bail!( - "Failed to execute hook command. Non-zero return code. {:?}", - exit_code - ); - } - None => { - bail!("Process is killed by signal"); - } + Some(0) => Ok(()), + Some(exit_code) => Err(HookError::NonZeroExitCode(exit_code)), + None => Err(HookError::Killed), }, - Err(e) => { - bail!("Failed to execute hook command: {:?}", e); - } - } + Err(e) => Err(HookError::CommandExecute(e)), + }?; } } @@ -136,7 +136,7 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re #[cfg(test)] mod test { use super::*; - use anyhow::{bail, Result}; + use anyhow::{bail, Context, Result}; use oci_spec::runtime::HookBuilder; use serial_test::serial; use std::{env, fs}; @@ -144,7 +144,7 @@ mod test { fn is_command_in_path(program: &str) -> bool { if let Ok(path) = env::var("PATH") { for p in path.split(':') { - let p_str = format!("{}/{}", p, program); + let p_str = format!("{p}/{program}"); if fs::metadata(p_str).is_ok() { return true; } @@ -221,14 +221,14 @@ mod test { Ok(_) => { bail!("The test expects the hook to error out with timeout. Should not execute cleanly"); } + Err(HookError::Timeout) => {} Err(err) => { - // We want to make sure the error returned is indeed timeout - // error. All other errors are considered failure. - if !err.is::() { - bail!("Failed to execute hook: {:?}", err); - } + bail!( + "The test expects the hook to error out with timeout. Got error: {}", + err + ); } - } + }; Ok(()) } diff --git a/crates/libcontainer/src/lib.rs b/crates/libcontainer/src/lib.rs index 4d28afaab..face7251e 100644 --- a/crates/libcontainer/src/lib.rs +++ b/crates/libcontainer/src/lib.rs @@ -1,16 +1,26 @@ pub mod apparmor; pub mod capabilities; +pub mod channel; pub mod config; pub mod container; +pub mod error; pub mod hooks; pub mod namespaces; pub mod notify_socket; pub mod process; pub mod rootfs; pub mod rootless; +#[cfg(feature = "libseccomp")] pub mod seccomp; pub mod signal; pub mod syscall; +pub mod test_utils; pub mod tty; pub mod utils; pub mod workload; + +// Because the `libcontainer` api uses the oci_spec who resides in a different +// crate, we re-export the version of oci_spec this crate uses. +// Ref: https://github.com/containers/youki/issues/2066 +// Ref: https://github.com/rust-lang/api-guidelines/discussions/176 +pub use oci_spec; diff --git a/crates/libcontainer/src/namespaces.rs b/crates/libcontainer/src/namespaces.rs index 5135cdcca..22c941301 100644 --- a/crates/libcontainer/src/namespaces.rs +++ b/crates/libcontainer/src/namespaces.rs @@ -8,11 +8,24 @@ //! Cgroup (Resource limits, execution priority etc.) use crate::syscall::{syscall::create_syscall, Syscall}; -use anyhow::{Context, Result}; use nix::{fcntl, sched::CloneFlags, sys::stat, unistd}; use oci_spec::runtime::{LinuxNamespace, LinuxNamespaceType}; use std::collections; +type Result = std::result::Result; + +#[derive(Debug, thiserror::Error)] +pub enum NamespaceError { + #[error(transparent)] + Nix(#[from] nix::Error), + #[error(transparent)] + IO(#[from] std::io::Error), + #[error(transparent)] + Syscall(#[from] crate::syscall::SyscallError), + #[error("Namespace type not supported: {0}")] + NotSupported(String), +} + static ORDERED_NAMESPACES: &[CloneFlags] = &[ CloneFlags::CLONE_NEWUSER, CloneFlags::CLONE_NEWPID, @@ -29,8 +42,8 @@ pub struct Namespaces { namespace_map: collections::HashMap, } -fn get_clone_flag(namespace_type: LinuxNamespaceType) -> CloneFlags { - match namespace_type { +fn get_clone_flag(namespace_type: LinuxNamespaceType) -> Result { + let flag = match namespace_type { LinuxNamespaceType::User => CloneFlags::CLONE_NEWUSER, LinuxNamespaceType::Pid => CloneFlags::CLONE_NEWPID, LinuxNamespaceType::Uts => CloneFlags::CLONE_NEWUTS, @@ -38,22 +51,32 @@ fn get_clone_flag(namespace_type: LinuxNamespaceType) -> CloneFlags { LinuxNamespaceType::Network => CloneFlags::CLONE_NEWNET, LinuxNamespaceType::Cgroup => CloneFlags::CLONE_NEWCGROUP, LinuxNamespaceType::Mount => CloneFlags::CLONE_NEWNS, - } + LinuxNamespaceType::Time => return Err(NamespaceError::NotSupported("time".to_string())), + }; + + Ok(flag) } -impl From>> for Namespaces { - fn from(namespaces: Option<&Vec>) -> Self { +impl TryFrom>> for Namespaces { + type Error = NamespaceError; + + fn try_from(namespaces: Option<&Vec>) -> Result { let command: Box = create_syscall(); let namespace_map: collections::HashMap = namespaces .unwrap_or(&vec![]) .iter() - .map(|ns| (get_clone_flag(ns.typ()), ns.clone())) + .map(|ns| match get_clone_flag(ns.typ()) { + Ok(flag) => Ok((flag, ns.clone())), + Err(err) => Err(err), + }) + .collect::>>()? + .into_iter() .collect(); - Namespaces { + Ok(Namespaces { command, namespace_map, - } + }) } } @@ -65,32 +88,48 @@ impl Namespaces { .filter_map(|c| self.namespace_map.get_key_value(c)) .collect(); - for (ns_type, ns) in to_enter { - self.unshare_or_setns(ns) - .with_context(|| format!("failed to enter {:?} namespace: {:?}", ns_type, ns))?; + for (_, ns) in to_enter { + self.unshare_or_setns(ns)?; } Ok(()) } pub fn unshare_or_setns(&self, namespace: &LinuxNamespace) -> Result<()> { - log::debug!("unshare or setns: {:?}", namespace); - if namespace.path().is_none() { - self.command.unshare(get_clone_flag(namespace.typ()))?; - } else { - let ns_path = namespace.path().as_ref().unwrap(); - let fd = fcntl::open(ns_path, fcntl::OFlag::empty(), stat::Mode::empty()) - .with_context(|| format!("failed to open namespace fd: {:?}", ns_path))?; - self.command - .set_ns(fd, get_clone_flag(namespace.typ())) - .with_context(|| "failed to set namespace")?; - unistd::close(fd).with_context(|| "failed to close namespace fd")?; + tracing::debug!("unshare or setns: {:?}", namespace); + match namespace.path() { + Some(path) => { + let fd = fcntl::open(path, fcntl::OFlag::empty(), stat::Mode::empty()).map_err( + |err| { + tracing::error!(?err, ?namespace, "failed to open namespace file"); + err + }, + )?; + self.command + .set_ns(fd, get_clone_flag(namespace.typ())?) + .map_err(|err| { + tracing::error!(?err, ?namespace, "failed to set namespace"); + err + })?; + unistd::close(fd).map_err(|err| { + tracing::error!(?err, ?namespace, "failed to close namespace file"); + err + })?; + } + None => { + self.command + .unshare(get_clone_flag(namespace.typ())?) + .map_err(|err| { + tracing::error!(?err, ?namespace, "failed to unshare namespace"); + err + })?; + } } Ok(()) } - pub fn get(&self, k: LinuxNamespaceType) -> Option<&LinuxNamespace> { - self.namespace_map.get(&get_clone_flag(k)) + pub fn get(&self, k: LinuxNamespaceType) -> Result> { + Ok(self.namespace_map.get(&get_clone_flag(k)?)) } } @@ -132,7 +171,8 @@ mod tests { #[serial] fn test_apply_namespaces() { let sample_linux_namespaces = gen_sample_linux_namespaces(); - let namespaces = Namespaces::from(Some(&sample_linux_namespaces)); + let namespaces = Namespaces::try_from(Some(&sample_linux_namespaces)) + .expect("create namespace struct should be good"); let test_command: &TestHelperSyscall = namespaces.command.as_any().downcast_ref().unwrap(); assert!(namespaces .apply_namespaces(|ns_type| { ns_type != CloneFlags::CLONE_NEWIPC }) diff --git a/crates/libcontainer/src/notify_socket.rs b/crates/libcontainer/src/notify_socket.rs index b95285914..64837ca87 100644 --- a/crates/libcontainer/src/notify_socket.rs +++ b/crates/libcontainer/src/notify_socket.rs @@ -1,36 +1,76 @@ -use anyhow::{bail, Context, Result}; use nix::unistd::{self, close}; use std::env; use std::io::prelude::*; +use std::os::fd::FromRawFd; use std::os::unix::io::AsRawFd; use std::os::unix::net::{UnixListener, UnixStream}; use std::path::{Path, PathBuf}; pub const NOTIFY_FILE: &str = "notify.sock"; +#[derive(Debug, thiserror::Error)] +pub enum NotifyListenerError { + #[error("failed to chdir {path} while creating notify socket: {source}")] + Chdir { source: nix::Error, path: PathBuf }, + #[error("invalid path: {0}")] + InvalidPath(PathBuf), + #[error("failed to bind notify socket: {name}")] + Bind { + source: std::io::Error, + name: String, + }, + #[error("failed to connect to notify socket: {name}")] + Connect { + source: std::io::Error, + name: String, + }, + #[error("failed to get cwd")] + GetCwd(#[source] std::io::Error), + #[error("failed to accept notify listener")] + Accept(#[source] std::io::Error), + #[error("failed to close notify listener")] + Close(#[source] nix::errno::Errno), + #[error("failed to read notify listener")] + Read(#[source] std::io::Error), + #[error("failed to send start container")] + SendStartContainer(#[source] std::io::Error), +} + +type Result = std::result::Result; + pub struct NotifyListener { socket: UnixListener, } impl NotifyListener { pub fn new(socket_path: &Path) -> Result { - // unix domain socket has a maximum length of 108, different from - // normal path length of 255. due to how docker create the path name + tracing::debug!(?socket_path, "create notify listener"); + // Unix domain socket has a maximum length of 108, different from + // normal path length of 255. Due to how docker create the path name // to the container working directory, there is a high chance that - // the full absolute path is over the limit. to work around this + // the full absolute path is over the limit. To work around this // limitation, we chdir first into the workdir where the socket is, // and chdir back after the socket is created. - let workdir = socket_path.parent().unwrap(); - let socket_name = socket_path.file_name().unwrap(); - let cwd = unistd::getcwd().context("Failed to get cwd")?; - unistd::chdir(workdir).context(format!( - "Failed to chdir into {}", - workdir.to_str().unwrap() - ))?; - let stream = UnixListener::bind(socket_name) - .context(format!("Failed to bind {}", socket_name.to_str().unwrap()))?; - unistd::chdir(&cwd) - .context(format!("Failed to chdir back to {}", cwd.to_str().unwrap()))?; + let workdir = socket_path + .parent() + .ok_or_else(|| NotifyListenerError::InvalidPath(socket_path.to_owned()))?; + let socket_name = socket_path + .file_name() + .ok_or_else(|| NotifyListenerError::InvalidPath(socket_path.to_owned()))?; + let cwd = env::current_dir().map_err(NotifyListenerError::GetCwd)?; + tracing::debug!(?cwd, "the cwd to create the notify socket"); + unistd::chdir(workdir).map_err(|e| NotifyListenerError::Chdir { + source: e, + path: workdir.to_owned(), + })?; + let stream = UnixListener::bind(socket_name).map_err(|e| NotifyListenerError::Bind { + source: e, + name: socket_name.to_str().unwrap().to_owned(), + })?; + unistd::chdir(&cwd).map_err(|e| NotifyListenerError::Chdir { + source: e, + path: cwd, + })?; Ok(Self { socket: stream }) } @@ -39,21 +79,40 @@ impl NotifyListener { match self.socket.accept() { Ok((mut socket, _)) => { let mut response = String::new(); - socket.read_to_string(&mut response)?; - log::debug!("received: {}", response); + socket + .read_to_string(&mut response) + .map_err(NotifyListenerError::Read)?; + tracing::debug!("received: {}", response); } - Err(e) => bail!("accept function failed: {:?}", e), + Err(e) => Err(NotifyListenerError::Accept(e))?, } Ok(()) } pub fn close(&self) -> Result<()> { - close(self.socket.as_raw_fd())?; + close(self.socket.as_raw_fd()).map_err(NotifyListenerError::Close)?; Ok(()) } } +impl Clone for NotifyListener { + fn clone(&self) -> Self { + let fd = self.socket.as_raw_fd(); + // This is safe because we just duplicate a valid fd. Theoretically, to + // truly clone a unix listener, we have to use dup(2) to duplicate the + // fd, and then use from_raw_fd to create a new UnixListener. However, + // for our purposes, fd is just an integer to pass around for the same + // socket. Our main usage is to pass the notify_listener across process + // boundary. Since fd tables are cloned during clone/fork calls, this + // should be safe to use, as long as we be careful with not closing the + // same fd in different places. If we observe an issue, we will switch + // to `dup`. + let socket = unsafe { UnixListener::from_raw_fd(fd) }; + Self { socket } + } +} + pub struct NotifySocket { path: PathBuf, } @@ -66,13 +125,66 @@ impl NotifySocket { } pub fn notify_container_start(&mut self) -> Result<()> { - log::debug!("notify container start"); - let cwd = env::current_dir()?; - unistd::chdir(self.path.parent().unwrap())?; - let mut stream = UnixStream::connect(self.path.file_name().unwrap())?; - stream.write_all(b"start container")?; - log::debug!("notify finished"); - unistd::chdir(&cwd)?; + tracing::debug!("notify container start"); + let cwd = env::current_dir().map_err(NotifyListenerError::GetCwd)?; + let workdir = self + .path + .parent() + .ok_or_else(|| NotifyListenerError::InvalidPath(self.path.to_owned()))?; + unistd::chdir(workdir).map_err(|e| NotifyListenerError::Chdir { + source: e, + path: workdir.to_owned(), + })?; + let socket_name = self + .path + .file_name() + .ok_or_else(|| NotifyListenerError::InvalidPath(self.path.to_owned()))?; + let mut stream = + UnixStream::connect(socket_name).map_err(|e| NotifyListenerError::Connect { + source: e, + name: socket_name.to_str().unwrap().to_owned(), + })?; + stream + .write_all(b"start container") + .map_err(NotifyListenerError::SendStartContainer)?; + tracing::debug!("notify finished"); + unistd::chdir(&cwd).map_err(|e| NotifyListenerError::Chdir { + source: e, + path: cwd, + })?; Ok(()) } } + +#[cfg(test)] +mod test { + use tempfile::tempdir; + + use super::*; + + #[test] + /// Test that the listener can be cloned and function correctly. This test + /// also serves as a test for the normal case. + fn test_notify_listener_clone() { + let tempdir = tempdir().unwrap(); + let socket_path = tempdir.path().join("notify.sock"); + // listener needs to be created first because it will create the socket. + let listener = NotifyListener::new(&socket_path).unwrap(); + let mut socket = NotifySocket::new(socket_path.clone()); + // This is safe without race because the unix domain socket is already + // created. It is OK for the socket to send the start notification + // before the listener wait is called. + let thread_handle = std::thread::spawn({ + move || { + // We clone the listener and listen on the cloned listener to + // make sure the cloned fd functions correctly. + let cloned_listener = listener.clone(); + cloned_listener.wait_for_container_start().unwrap(); + cloned_listener.close().unwrap(); + } + }); + + socket.notify_container_start().unwrap(); + thread_handle.join().unwrap(); + } +} diff --git a/crates/libcontainer/src/process/args.rs b/crates/libcontainer/src/process/args.rs index 6b36c3396..148fb2af0 100644 --- a/crates/libcontainer/src/process/args.rs +++ b/crates/libcontainer/src/process/args.rs @@ -1,38 +1,44 @@ -use libcgroups::common::CgroupManager; +use libcgroups::common::CgroupConfig; use oci_spec::runtime::Spec; use std::os::unix::prelude::RawFd; use std::path::PathBuf; +use std::rc::Rc; +use crate::container::Container; +use crate::notify_socket::NotifyListener; use crate::rootless::Rootless; -use crate::{container::Container, notify_socket::NotifyListener, syscall::Syscall}; - +use crate::syscall::syscall::SyscallType; +use crate::workload::Executor; #[derive(Debug, Copy, Clone)] pub enum ContainerType { InitContainer, TenantContainer { exec_notify_fd: RawFd }, } -pub struct ContainerArgs<'a> { +#[derive(Clone)] +pub struct ContainerArgs { /// Indicates if an init or a tenant container should be created pub container_type: ContainerType, /// Interface to operating system primitives - pub syscall: &'a dyn Syscall, - /// OCI complient runtime spec - pub spec: &'a Spec, + pub syscall: SyscallType, + /// OCI compliant runtime spec + pub spec: Rc, /// Root filesystem of the container - pub rootfs: &'a PathBuf, + pub rootfs: PathBuf, /// Socket to communicate the file descriptor of the ptty pub console_socket: Option, /// The Unix Domain Socket to communicate container start - pub notify_socket: NotifyListener, - /// File descriptos preserved/passed to the container init process. + pub notify_listener: NotifyListener, + /// File descriptors preserved/passed to the container init process. pub preserve_fds: i32, /// Container state - pub container: &'a Option, + pub container: Option, /// Options for rootless containers - pub rootless: &'a Option>, - /// Cgroup Manager - pub cgroup_manager: Box, + pub rootless: Option, + /// Cgroup Manager Config + pub cgroup_config: CgroupConfig, /// If the container is to be run in detached mode pub detached: bool, + /// Manage the functions that actually run on the container + pub executor: Executor, } diff --git a/crates/libcontainer/src/process/channel.rs b/crates/libcontainer/src/process/channel.rs index d049e6d7d..5a03c4840 100644 --- a/crates/libcontainer/src/process/channel.rs +++ b/crates/libcontainer/src/process/channel.rs @@ -1,15 +1,28 @@ +use crate::channel::{channel, Receiver, Sender}; use crate::process::message::Message; -use anyhow::{bail, Context, Result}; -use nix::{ - sys::socket::{self, UnixAddr}, - unistd::{self, Pid}, -}; -use serde::{Deserialize, Serialize}; -use std::{ - io::{IoSlice, IoSliceMut}, - marker::PhantomData, - os::unix::prelude::{AsRawFd, RawFd}, -}; +use nix::unistd::Pid; +use std::os::unix::prelude::{AsRawFd, RawFd}; + +#[derive(Debug, thiserror::Error)] +pub enum ChannelError { + #[error("received unexpected message: {received:?}, expected: {expected:?}")] + UnexpectedMessage { + expected: Message, + received: Message, + }, + #[error("failed to receive. {msg:?}. {source:?}")] + ReceiveError { + msg: String, + #[source] + source: crate::channel::ChannelError, + }, + #[error(transparent)] + BaseChannelError(#[from] crate::channel::ChannelError), + #[error("missing fds from seccomp request")] + MissingSeccompFds, + #[error("exec process failed with error {0}")] + ExecError(String), +} /// Channel Design /// @@ -22,11 +35,12 @@ use std::{ /// processes will share the main_sender and use it to send message to the main /// process. -pub fn main_channel() -> Result<(MainSender, MainReceiver)> { +pub fn main_channel() -> Result<(MainSender, MainReceiver), ChannelError> { let (sender, receiver) = channel::()?; Ok((MainSender { sender }, MainReceiver { receiver })) } +#[derive(Clone)] pub struct MainSender { sender: Sender, } @@ -34,44 +48,47 @@ pub struct MainSender { impl MainSender { // requests the Main to write the id mappings for the intermediate process // this needs to be done from the parent see https://man7.org/linux/man-pages/man7/user_namespaces.7.html - pub fn identifier_mapping_request(&mut self) -> Result<()> { - log::debug!("send identifier mapping request"); + pub fn identifier_mapping_request(&mut self) -> Result<(), ChannelError> { + tracing::debug!("send identifier mapping request"); self.sender.send(Message::WriteMapping)?; Ok(()) } - pub fn seccomp_notify_request(&mut self, fd: RawFd) -> Result<()> { + pub fn seccomp_notify_request(&mut self, fd: RawFd) -> Result<(), ChannelError> { self.sender .send_fds(Message::SeccompNotify, &[fd.as_raw_fd()])?; Ok(()) } - pub fn intermediate_ready(&mut self, pid: Pid) -> Result<()> { + pub fn intermediate_ready(&mut self, pid: Pid) -> Result<(), ChannelError> { // Send over the IntermediateReady follow by the pid. - log::debug!("sending init pid ({:?})", pid); + tracing::debug!("sending init pid ({:?})", pid); self.sender.send(Message::IntermediateReady(pid.as_raw()))?; Ok(()) } - pub fn init_ready(&mut self) -> Result<()> { + pub fn init_ready(&mut self) -> Result<(), ChannelError> { self.sender.send(Message::InitReady)?; Ok(()) } - pub fn exec_failed(&mut self, err: String) -> Result<()> { + pub fn exec_failed(&mut self, err: String) -> Result<(), ChannelError> { self.sender.send(Message::ExecFailed(err))?; Ok(()) } - pub fn close(&self) -> Result<()> { - self.sender.close() + pub fn close(&self) -> Result<(), ChannelError> { + self.sender.close()?; + + Ok(()) } } +#[derive(Clone)] pub struct MainReceiver { receiver: Receiver, } @@ -79,79 +96,98 @@ pub struct MainReceiver { impl MainReceiver { /// Waits for associated intermediate process to send ready message /// and return the pid of init process which is forked by intermediate process - pub fn wait_for_intermediate_ready(&mut self) -> Result { + pub fn wait_for_intermediate_ready(&mut self) -> Result { let msg = self .receiver .recv() - .context("failed to receive a message from the intermediate process")?; + .map_err(|err| ChannelError::ReceiveError { + msg: "waiting for intermediate process".to_string(), + source: err, + })?; match msg { Message::IntermediateReady(pid) => Ok(Pid::from_raw(pid)), - Message::ExecFailed(err) => bail!("exec process failed with error {}", err), - _ => bail!( - "receive unexpected message {:?} waiting for intermediate ready", - msg - ), + Message::ExecFailed(err) => Err(ChannelError::ExecError(err)), + msg => Err(ChannelError::UnexpectedMessage { + expected: Message::IntermediateReady(0), + received: msg, + }), } } - pub fn wait_for_mapping_request(&mut self) -> Result<()> { + pub fn wait_for_mapping_request(&mut self) -> Result<(), ChannelError> { let msg = self .receiver .recv() - .context("failed to wait for mapping request")?; + .map_err(|err| ChannelError::ReceiveError { + msg: "waiting for mapping request".to_string(), + source: err, + })?; match msg { Message::WriteMapping => Ok(()), - msg => bail!( - "receive unexpected message {:?} waiting for mapping request", - msg - ), + msg => Err(ChannelError::UnexpectedMessage { + expected: Message::WriteMapping, + received: msg, + }), } } - pub fn wait_for_seccomp_request(&mut self) -> Result { - let (msg, fds) = self - .receiver - .recv_with_fds::<[RawFd; 1]>() - .context("failed to wait for seccomp request")?; + pub fn wait_for_seccomp_request(&mut self) -> Result { + let (msg, fds) = self.receiver.recv_with_fds::<[RawFd; 1]>().map_err(|err| { + ChannelError::ReceiveError { + msg: "waiting for seccomp request".to_string(), + source: err, + } + })?; match msg { Message::SeccompNotify => { let fd = match fds { - Some(fds) => fds[0], - None => bail!("expecting fds from seccomp request"), - }; + Some(fds) => { + if fds.is_empty() { + Err(ChannelError::MissingSeccompFds) + } else { + Ok(fds[0]) + } + } + None => Err(ChannelError::MissingSeccompFds), + }?; Ok(fd) } - msg => bail!( - "receive unexpected message {:?} waiting for seccomp request", - msg - ), + msg => Err(ChannelError::UnexpectedMessage { + expected: Message::SeccompNotify, + received: msg, + }), } } /// Waits for associated init process to send ready message /// and return the pid of init process which is forked by init process - pub fn wait_for_init_ready(&mut self) -> Result<()> { + pub fn wait_for_init_ready(&mut self) -> Result<(), ChannelError> { let msg = self .receiver .recv() - .context("failed to wait for init ready")?; + .map_err(|err| ChannelError::ReceiveError { + msg: "waiting for init ready".to_string(), + source: err, + })?; match msg { Message::InitReady => Ok(()), - msg => bail!( - "receive unexpected message {:?} waiting for init ready", - msg - ), + msg => Err(ChannelError::UnexpectedMessage { + expected: Message::InitReady, + received: msg, + }), } } - pub fn close(&self) -> Result<()> { - self.receiver.close() + pub fn close(&self) -> Result<(), ChannelError> { + self.receiver.close()?; + + Ok(()) } } -pub fn intermediate_channel() -> Result<(IntermediateSender, IntermediateReceiver)> { +pub fn intermediate_channel() -> Result<(IntermediateSender, IntermediateReceiver), ChannelError> { let (sender, receiver) = channel::()?; Ok(( IntermediateSender { sender }, @@ -159,286 +195,117 @@ pub fn intermediate_channel() -> Result<(IntermediateSender, IntermediateReceive )) } +#[derive(Clone)] pub struct IntermediateSender { sender: Sender, } impl IntermediateSender { - pub fn mapping_written(&mut self) -> Result<()> { - log::debug!("identifier mapping written"); + pub fn mapping_written(&mut self) -> Result<(), ChannelError> { + tracing::debug!("identifier mapping written"); self.sender.send(Message::MappingWritten)?; Ok(()) } - pub fn close(&self) -> Result<()> { - self.sender.close() + pub fn close(&self) -> Result<(), ChannelError> { + self.sender.close()?; + + Ok(()) } } +#[derive(Clone)] pub struct IntermediateReceiver { receiver: Receiver, } impl IntermediateReceiver { // wait until the parent process has finished writing the id mappings - pub fn wait_for_mapping_ack(&mut self) -> Result<()> { - log::debug!("waiting for mapping ack"); + pub fn wait_for_mapping_ack(&mut self) -> Result<(), ChannelError> { + tracing::debug!("waiting for mapping ack"); let msg = self .receiver .recv() - .context("failed to wait for init ready")?; + .map_err(|err| ChannelError::ReceiveError { + msg: "waiting for mapping ack".to_string(), + source: err, + })?; match msg { Message::MappingWritten => Ok(()), - msg => bail!( - "receive unexpected message {:?} waiting for init ready", - msg - ), + msg => Err(ChannelError::UnexpectedMessage { + expected: Message::MappingWritten, + received: msg, + }), } } - pub fn close(&self) -> Result<()> { - self.receiver.close() + pub fn close(&self) -> Result<(), ChannelError> { + self.receiver.close()?; + + Ok(()) } } -pub fn init_channel() -> Result<(InitSender, InitReceiver)> { +pub fn init_channel() -> Result<(InitSender, InitReceiver), ChannelError> { let (sender, receiver) = channel::()?; Ok((InitSender { sender }, InitReceiver { receiver })) } +#[derive(Clone)] pub struct InitSender { sender: Sender, } impl InitSender { - pub fn seccomp_notify_done(&mut self) -> Result<()> { + pub fn seccomp_notify_done(&mut self) -> Result<(), ChannelError> { self.sender.send(Message::SeccompNotifyDone)?; Ok(()) } - pub fn close(&self) -> Result<()> { - self.sender.close() + pub fn close(&self) -> Result<(), ChannelError> { + self.sender.close()?; + + Ok(()) } } +#[derive(Clone)] pub struct InitReceiver { receiver: Receiver, } impl InitReceiver { - pub fn wait_for_seccomp_request_done(&mut self) -> Result<()> { + pub fn wait_for_seccomp_request_done(&mut self) -> Result<(), ChannelError> { let msg = self .receiver .recv() - .context("failed to wait for seccomp request")?; + .map_err(|err| ChannelError::ReceiveError { + msg: "waiting for seccomp request".to_string(), + source: err, + })?; match msg { Message::SeccompNotifyDone => Ok(()), - msg => bail!( - "receive unexpected message {:?} waiting for seccomp done request", - msg - ), - } - } - - pub fn close(&self) -> Result<()> { - self.receiver.close() - } -} - -pub struct Receiver { - receiver: RawFd, - phantom: PhantomData, -} - -pub struct Sender { - sender: RawFd, - phantom: PhantomData, -} - -impl Sender -where - T: Serialize, -{ - fn send_iovec(&mut self, iov: &[IoSlice], fds: Option<&[RawFd]>) -> Result { - let cmsgs = if let Some(fds) = fds { - vec![socket::ControlMessage::ScmRights(fds)] - } else { - vec![] - }; - socket::sendmsg::(self.sender, iov, &cmsgs, socket::MsgFlags::empty(), None) - .map_err(|e| e.into()) - } - - fn send_slice_with_len(&mut self, data: &[u8], fds: Option<&[RawFd]>) -> Result { - let len = data.len() as u64; - // Here we prefix the length of the data onto the serialized data. - let iov = [ - IoSlice::new(unsafe { - std::slice::from_raw_parts( - (&len as *const u64) as *const u8, - std::mem::size_of::(), - ) + msg => Err(ChannelError::UnexpectedMessage { + expected: Message::SeccompNotifyDone, + received: msg, }), - IoSlice::new(data), - ]; - self.send_iovec(&iov[..], fds) - } - - pub fn send(&mut self, object: T) -> Result<()> { - let payload = serde_json::to_vec(&object)?; - self.send_slice_with_len(&payload, None)?; - - Ok(()) - } - - pub fn send_fds(&mut self, object: T, fds: &[RawFd]) -> Result<()> { - let payload = serde_json::to_vec(&object)?; - self.send_slice_with_len(&payload, Some(fds))?; - - Ok(()) - } - - pub fn close(&self) -> Result<()> { - Ok(unistd::close(self.sender)?) - } -} - -impl Receiver -where - T: serde::de::DeserializeOwned, -{ - fn peek_size_iovec(&mut self) -> Result { - let mut len: u64 = 0; - let mut iov = [IoSliceMut::new(unsafe { - std::slice::from_raw_parts_mut( - (&mut len as *mut u64) as *mut u8, - std::mem::size_of::(), - ) - })]; - let _ = - socket::recvmsg::(self.receiver, &mut iov, None, socket::MsgFlags::MSG_PEEK)?; - match len { - 0 => bail!("channel connection broken"), - _ => Ok(len), } } - fn recv_into_iovec(&mut self, iov: &mut [IoSliceMut]) -> Result<(usize, Option)> - where - F: Default + AsMut<[RawFd]>, - { - let mut cmsgspace = nix::cmsg_space!(F); - let msg = socket::recvmsg::( - self.receiver, - iov, - Some(&mut cmsgspace), - socket::MsgFlags::MSG_CMSG_CLOEXEC, - )?; - - // Sending multiple SCM_RIGHTS message will led to platform dependent - // behavior, with some system choose to return EINVAL when sending or - // silently only process the first msg or send all of it. Here we assume - // there is only one SCM_RIGHTS message and will only process the first - // message. - let fds: Option = msg - .cmsgs() - .find_map(|cmsg| { - if let socket::ControlMessageOwned::ScmRights(fds) = cmsg { - Some(fds) - } else { - None - } - }) - .map(|fds| { - let mut fds_array: F = Default::default(); - >::as_mut(&mut fds_array).clone_from_slice(&fds); - fds_array - }); - - Ok((msg.bytes, fds)) - } + pub fn close(&self) -> Result<(), ChannelError> { + self.receiver.close()?; - fn recv_into_buf_with_len(&mut self) -> Result<(Vec, Option)> - where - F: Default + AsMut<[RawFd]>, - { - let msg_len = self.peek_size_iovec()?; - let mut len: u64 = 0; - let mut buf = vec![0u8; msg_len as usize]; - let (bytes, fds) = { - let mut iov = [ - IoSliceMut::new(unsafe { - std::slice::from_raw_parts_mut( - (&mut len as *mut u64) as *mut u8, - std::mem::size_of::(), - ) - }), - IoSliceMut::new(&mut buf), - ]; - self.recv_into_iovec(&mut iov)? - }; - - match bytes { - 0 => bail!("channel connection broken"), - _ => Ok((buf, fds)), - } - } - - // Recv the next message of type T. - pub fn recv(&mut self) -> Result { - let (buf, _) = self.recv_into_buf_with_len::<[RawFd; 0]>()?; - Ok(serde_json::from_slice(&buf[..])?) - } - - // Works similar to `recv`, but will look for fds sent by SCM_RIGHTS - // message. We use F as as `[RawFd; n]`, where `n` is the number of - // descriptors you want to receive. - pub fn recv_with_fds(&mut self) -> Result<(T, Option)> - where - F: Default + AsMut<[RawFd]>, - { - let (buf, fds) = self.recv_into_buf_with_len::()?; - Ok((serde_json::from_slice(&buf[..])?, fds)) - } - - pub fn close(&self) -> Result<()> { - Ok(unistd::close(self.receiver)?) + Ok(()) } } -pub fn channel() -> Result<(Sender, Receiver)> -where - T: for<'de> Deserialize<'de> + Serialize, -{ - let (os_sender, os_receiver) = unix_channel()?; - let receiver = Receiver { - receiver: os_receiver, - phantom: PhantomData, - }; - let sender = Sender { - sender: os_sender, - phantom: PhantomData, - }; - Ok((sender, receiver)) -} - -// Use socketpair as the underlying pipe. -fn unix_channel() -> Result<(RawFd, RawFd)> { - Ok(socket::socketpair( - socket::AddressFamily::Unix, - socket::SockType::SeqPacket, - None, - socket::SockFlag::SOCK_CLOEXEC, - )?) -} - #[cfg(test)] mod tests { use super::*; - use anyhow::Context; + use anyhow::{Context, Result}; use nix::sys::wait; use nix::unistd; use serial_test::serial; diff --git a/crates/libcontainer/src/process/container_init_process.rs b/crates/libcontainer/src/process/container_init_process.rs index 97dd21ce0..aee50bac4 100644 --- a/crates/libcontainer/src/process/container_init_process.rs +++ b/crates/libcontainer/src/process/container_init_process.rs @@ -1,19 +1,18 @@ use super::args::{ContainerArgs, ContainerType}; -use crate::apparmor; -use crate::syscall::Syscall; -use crate::workload::ExecutorManager; +use crate::error::MissingSpecError; +use crate::namespaces::NamespaceError; +use crate::syscall::{Syscall, SyscallError}; +use crate::{apparmor, notify_socket, rootfs, workload}; use crate::{ capabilities, hooks, namespaces::Namespaces, process::channel, rootfs::RootFS, - rootless::Rootless, seccomp, tty, utils, + rootless::Rootless, tty, utils, }; -use anyhow::{bail, Context, Ok, Result}; use nix::mount::MsFlags; use nix::sched::CloneFlags; use nix::sys::stat::Mode; use nix::unistd::setsid; - use nix::unistd::{self, Gid, Uid}; -use oci_spec::runtime::{LinuxNamespaceType, Spec, User}; +use oci_spec::runtime::{IOPriorityClass, LinuxIOPriority, LinuxNamespaceType, Spec, User}; use std::collections::HashMap; use std::os::unix::io::AsRawFd; use std::{ @@ -21,17 +20,141 @@ use std::{ path::{Path, PathBuf}, }; +#[cfg(feature = "libseccomp")] +use crate::seccomp; + +#[derive(Debug, thiserror::Error)] +pub enum InitProcessError { + #[error("failed to set sysctl")] + Sysctl(#[source] std::io::Error), + #[error("failed to mount path as readonly")] + MountPathReadonly(#[source] SyscallError), + #[error("failed to mount path as masked")] + MountPathMasked(#[source] SyscallError), + #[error(transparent)] + Namespaces(#[from] NamespaceError), + #[error("failed to set hostname")] + SetHostname(#[source] SyscallError), + #[error("failed to set domainname")] + SetDomainname(#[source] SyscallError), + #[error("failed to reopen /dev/null")] + ReopenDevNull(#[source] std::io::Error), + #[error("failed to unix syscall")] + NixOther(#[source] nix::Error), + #[error(transparent)] + MissingSpec(#[from] crate::error::MissingSpecError), + #[error("failed to setup tty")] + Tty(#[source] tty::TTYError), + #[error("failed to run hooks")] + Hooks(#[from] hooks::HookError), + #[error("failed to prepare rootfs")] + RootFS(#[source] rootfs::RootfsError), + #[error("failed syscall")] + SyscallOther(#[source] SyscallError), + #[error("failed apparmor")] + AppArmor(#[source] apparmor::AppArmorError), + #[error("invalid umask")] + InvalidUmask(u32), + #[error(transparent)] + #[cfg(feature = "libseccomp")] + Seccomp(#[from] seccomp::SeccompError), + #[error("invalid executable: {0}")] + InvalidExecutable(String), + #[error("io error")] + Io(#[source] std::io::Error), + #[error(transparent)] + Channel(#[from] channel::ChannelError), + #[error("setgroup is disabled")] + SetGroupDisabled, + #[error(transparent)] + NotifyListener(#[from] notify_socket::NotifyListenerError), + #[error(transparent)] + Workload(#[from] workload::ExecutorError), + #[error("invalid io priority class: {0}")] + IoPriorityClass(String), +} + +type Result = std::result::Result; + +fn get_executable_path(name: &str, path_var: &str) -> Option { + // if path has / in it, we have to assume absolute path, as per runc impl + if name.contains('/') && PathBuf::from(name).exists() { + return Some(PathBuf::from(name)); + } + for path in path_var.split(':') { + let potential_path = PathBuf::from(path).join(name); + if potential_path.exists() { + return Some(potential_path); + } + } + None +} + +fn is_executable(path: &Path) -> std::result::Result { + use std::os::unix::fs::PermissionsExt; + let metadata = path.metadata()?; + let permissions = metadata.permissions(); + // we have to check if the path is file and the execute bit + // is set. In case of directories, the execute bit is also set, + // so have to check if this is a file or not + Ok(metadata.is_file() && permissions.mode() & 0o001 != 0) +} + +// this checks if the binary to run actually exists and if we have +// permissions to run it. Taken from +// https://github.com/opencontainers/runc/blob/25c9e888686773e7e06429133578038a9abc091d/libcontainer/standard_init_linux.go#L195-L206 +fn verify_binary(args: &[String], envs: &[String]) -> Result<()> { + let path_vars: Vec<&String> = envs.iter().filter(|&e| e.starts_with("PATH=")).collect(); + if path_vars.is_empty() { + tracing::error!("PATH environment variable is not set"); + return Err(InitProcessError::InvalidExecutable(args[0].clone())); + } + let path_var = path_vars[0].trim_start_matches("PATH="); + match get_executable_path(&args[0], path_var) { + None => { + tracing::error!( + "executable {} for container process not found in PATH", + args[0] + ); + return Err(InitProcessError::InvalidExecutable(args[0].clone())); + } + Some(path) => match is_executable(&path) { + Ok(true) => { + tracing::debug!("found executable {:?}", path); + } + Ok(false) => { + tracing::error!( + "executable {:?} does not have the correct permission set", + path + ); + return Err(InitProcessError::InvalidExecutable(args[0].clone())); + } + Err(err) => { + tracing::error!( + "failed to check permissions for executable {:?}: {}", + path, + err + ); + return Err(InitProcessError::Io(err)); + } + }, + } + Ok(()) +} + fn sysctl(kernel_params: &HashMap) -> Result<()> { let sys = PathBuf::from("/proc/sys"); for (kernel_param, value) in kernel_params { let path = sys.join(kernel_param.replace('.', "/")); - log::debug!( + tracing::debug!( "apply value {} to kernel parameter {}.", value, kernel_param ); - fs::write(path, value.as_bytes()) - .with_context(|| format!("failed to set sysctl {}={}", kernel_param, value))?; + fs::write(path, value.as_bytes()).map_err(|err| { + tracing::error!("failed to set sysctl {kernel_param}={value}: {err}"); + InitProcessError::Sysctl(err) + })?; } Ok(()) @@ -49,62 +172,82 @@ fn readonly_path(path: &Path, syscall: &dyn Syscall) -> Result<()> { MsFlags::MS_BIND | MsFlags::MS_REC, None, ) { - if let Some(errno) = err.downcast_ref() { + if let SyscallError::Nix(errno) = err { // ignore error if path is not exist. if matches!(errno, nix::errno::Errno::ENOENT) { return Ok(()); } } - bail!(err) + + tracing::error!(?path, ?err, "failed to mount path as readonly"); + return Err(InitProcessError::MountPathReadonly(err)); } - syscall.mount( - Some(path), - path, - None, - MsFlags::MS_NOSUID - | MsFlags::MS_NODEV - | MsFlags::MS_NOEXEC - | MsFlags::MS_BIND - | MsFlags::MS_REMOUNT - | MsFlags::MS_RDONLY, - None, - )?; + syscall + .mount( + Some(path), + path, + None, + MsFlags::MS_NOSUID + | MsFlags::MS_NODEV + | MsFlags::MS_NOEXEC + | MsFlags::MS_BIND + | MsFlags::MS_REMOUNT + | MsFlags::MS_RDONLY, + None, + ) + .map_err(|err| { + tracing::error!(?path, ?err, "failed to remount path as readonly"); + InitProcessError::MountPathReadonly(err) + })?; - log::debug!("readonly path {:?} mounted", path); + tracing::debug!("readonly path {:?} mounted", path); Ok(()) } // For files, bind mounts /dev/null over the top of the specified path. // For directories, mounts read-only tmpfs over the top of the specified path. fn masked_path(path: &Path, mount_label: &Option, syscall: &dyn Syscall) -> Result<()> { - if let Err(e) = syscall.mount( + if let Err(err) = syscall.mount( Some(Path::new("/dev/null")), path, None, MsFlags::MS_BIND, None, ) { - if let Some(errno) = e.downcast_ref() { - if matches!(errno, nix::errno::Errno::ENOENT) { - log::warn!("masked path {:?} not exist", path); - } else if matches!(errno, nix::errno::Errno::ENOTDIR) { + match err { + SyscallError::Nix(nix::errno::Errno::ENOENT) => { + // ignore error if path is not exist. + } + SyscallError::Nix(nix::errno::Errno::ENOTDIR) => { let label = match mount_label { - Some(l) => format!("context=\"{}\"", l), + Some(l) => format!("context=\"{l}\""), None => "".to_string(), }; - syscall.mount( - Some(Path::new("tmpfs")), - path, - Some("tmpfs"), - MsFlags::MS_RDONLY, - Some(label.as_str()), - )?; + syscall + .mount( + Some(Path::new("tmpfs")), + path, + Some("tmpfs"), + MsFlags::MS_RDONLY, + Some(label.as_str()), + ) + .map_err(|err| { + tracing::error!(?path, ?err, "failed to mount path as masked using tempfs"); + InitProcessError::MountPathMasked(err) + })?; + } + _ => { + tracing::error!( + ?path, + ?err, + "failed to mount path as masked using /dev/null" + ); + return Err(InitProcessError::MountPathMasked(err)); } - } else { - bail!(e) } - }; + } + Ok(()) } @@ -121,17 +264,29 @@ fn apply_rest_namespaces( .apply_namespaces(|ns_type| -> bool { ns_type != CloneFlags::CLONE_NEWUSER && ns_type != CloneFlags::CLONE_NEWPID }) - .with_context(|| "failed to apply namespaces")?; + .map_err(|err| { + tracing::error!( + ?err, + "failed to apply rest of the namespaces (exclude user and pid)" + ); + InitProcessError::Namespaces(err) + })?; // Only set the host name if entering into a new uts namespace - if let Some(uts_namespace) = namespaces.get(LinuxNamespaceType::Uts) { + if let Some(uts_namespace) = namespaces.get(LinuxNamespaceType::Uts)? { if uts_namespace.path().is_none() { if let Some(hostname) = spec.hostname() { - syscall.set_hostname(hostname)?; + syscall.set_hostname(hostname).map_err(|err| { + tracing::error!(?err, ?hostname, "failed to set hostname"); + InitProcessError::SetHostname(err) + })?; } if let Some(domainname) = spec.domainname() { - syscall.set_domainname(domainname)?; + syscall.set_domainname(domainname).map_err(|err| { + tracing::error!(?err, ?domainname, "failed to set domainname"); + InitProcessError::SetDomainname(err) + })?; } } } @@ -143,44 +298,69 @@ fn reopen_dev_null() -> Result<()> { // we can re-open /dev/null if it is in use to the /dev/null // in the container. - let dev_null = fs::File::open("/dev/null")?; - let dev_null_fstat_info = nix::sys::stat::fstat(dev_null.as_raw_fd())?; + let dev_null = fs::File::open("/dev/null").map_err(|err| { + tracing::error!(?err, "failed to open /dev/null inside the container"); + InitProcessError::ReopenDevNull(err) + })?; + let dev_null_fstat_info = nix::sys::stat::fstat(dev_null.as_raw_fd()).map_err(|err| { + tracing::error!(?err, "failed to fstat /dev/null inside the container"); + InitProcessError::NixOther(err) + })?; // Check if stdin, stdout or stderr point to /dev/null for fd in 0..3 { - let fstat_info = nix::sys::stat::fstat(fd)?; + let fstat_info = nix::sys::stat::fstat(fd).map_err(|err| { + tracing::error!(?err, "failed to fstat stdio fd {}", fd); + InitProcessError::NixOther(err) + })?; if dev_null_fstat_info.st_rdev == fstat_info.st_rdev { // This FD points to /dev/null outside of the container. // Let's point to /dev/null inside of the container. - nix::unistd::dup2(dev_null.as_raw_fd(), fd)?; + nix::unistd::dup2(dev_null.as_raw_fd(), fd).map_err(|err| { + tracing::error!(?err, "failed to dup2 fd {} to /dev/null", fd); + InitProcessError::NixOther(err) + })?; } } + Ok(()) } +// Some variables are unused in the case where libseccomp feature is not enabled. +#[allow(unused_variables)] pub fn container_init_process( args: &ContainerArgs, main_sender: &mut channel::MainSender, init_receiver: &mut channel::InitReceiver, ) -> Result<()> { - let syscall = args.syscall; - let spec = args.spec; - let linux = spec.linux().as_ref().context("no linux in spec")?; - let proc = spec.process().as_ref().context("no process in spec")?; + let syscall = args.syscall.create_syscall(); + let spec = &args.spec; + let linux = spec.linux().as_ref().ok_or(MissingSpecError::Linux)?; + let proc = spec.process().as_ref().ok_or(MissingSpecError::Process)?; let mut envs: Vec = proc.env().as_ref().unwrap_or(&vec![]).clone(); - let rootfs_path = args.rootfs; + let rootfs_path = &args.rootfs; let hooks = spec.hooks().as_ref(); let container = args.container.as_ref(); - let namespaces = Namespaces::from(linux.namespaces().as_ref()); + let namespaces = Namespaces::try_from(linux.namespaces().as_ref())?; + let notify_listener = &args.notify_listener; + + setsid().map_err(|err| { + tracing::error!(?err, "failed to setsid to create a session"); + InitProcessError::NixOther(err) + })?; + + set_io_priority(syscall.as_ref(), proc.io_priority())?; - setsid().context("failed to create session")?; // set up tty if specified if let Some(csocketfd) = args.console_socket { - tty::setup_console(&csocketfd).with_context(|| "failed to set up tty")?; + tty::setup_console(&csocketfd).map_err(|err| { + tracing::error!(?err, "failed to set up tty"); + InitProcessError::Tty(err) + })?; } - apply_rest_namespaces(&namespaces, spec, syscall)?; + apply_rest_namespaces(&namespaces, spec, syscall.as_ref())?; if let Some(true) = proc.no_new_privileges() { let _ = prctl::set_no_new_privileges(true); @@ -190,84 +370,108 @@ pub fn container_init_process( // create_container hook needs to be called after the namespace setup, but // before pivot_root is called. This runs in the container namespaces. if let Some(hooks) = hooks { - hooks::run_hooks(hooks.create_container().as_ref(), container) - .context("Failed to run create container hooks")?; + hooks::run_hooks(hooks.create_container().as_ref(), container).map_err(|err| { + tracing::error!(?err, "failed to run create container hooks"); + InitProcessError::Hooks(err) + })?; } - let bind_service = namespaces.get(LinuxNamespaceType::User).is_some(); + let bind_service = namespaces.get(LinuxNamespaceType::User)?.is_some(); let rootfs = RootFS::new(); rootfs .prepare_rootfs( spec, rootfs_path, bind_service, - namespaces.get(LinuxNamespaceType::Cgroup).is_some(), + namespaces.get(LinuxNamespaceType::Cgroup)?.is_some(), ) - .with_context(|| "Failed to prepare rootfs")?; + .map_err(|err| { + tracing::error!(?err, "failed to prepare rootfs"); + InitProcessError::RootFS(err) + })?; // Entering into the rootfs jail. If mount namespace is specified, then // we use pivot_root, but if we are on the host mount namespace, we will // use simple chroot. Scary things will happen if you try to pivot_root // in the host mount namespace... - if namespaces.get(LinuxNamespaceType::Mount).is_some() { + if namespaces.get(LinuxNamespaceType::Mount)?.is_some() { // change the root of filesystem of the process to the rootfs - syscall - .pivot_rootfs(rootfs_path) - .with_context(|| format!("failed to pivot root to {:?}", rootfs_path))?; + syscall.pivot_rootfs(rootfs_path).map_err(|err| { + tracing::error!(?err, ?rootfs_path, "failed to pivot root"); + InitProcessError::SyscallOther(err) + })?; } else { - syscall - .chroot(rootfs_path) - .with_context(|| format!("failed to chroot to {:?}", rootfs_path))?; + syscall.chroot(rootfs_path).map_err(|err| { + tracing::error!(?err, ?rootfs_path, "failed to chroot"); + InitProcessError::SyscallOther(err) + })?; } - rootfs - .adjust_root_mount_propagation(linux) - .context("failed to set propagation type of root mount")?; + rootfs.adjust_root_mount_propagation(linux).map_err(|err| { + tracing::error!(?err, "failed to adjust root mount propagation"); + InitProcessError::RootFS(err) + })?; - reopen_dev_null()?; + reopen_dev_null().map_err(|err| { + tracing::error!(?err, "failed to reopen /dev/null"); + err + })?; if let Some(kernel_params) = linux.sysctl() { - sysctl(kernel_params) - .with_context(|| format!("failed to sysctl: {:?}", kernel_params))?; + sysctl(kernel_params)?; } } if let Some(profile) = proc.apparmor_profile() { - apparmor::apply_profile(profile) - .with_context(|| format!("failed to apply apparmor profile {}", profile))?; + apparmor::apply_profile(profile).map_err(|err| { + tracing::error!(?err, "failed to apply apparmor profile"); + InitProcessError::AppArmor(err) + })?; } if let Some(true) = spec.root().as_ref().map(|r| r.readonly().unwrap_or(false)) { - syscall.mount( - None, - Path::new("/"), - None, - MsFlags::MS_RDONLY | MsFlags::MS_REMOUNT | MsFlags::MS_BIND, - None, - )? + syscall + .mount( + None, + Path::new("/"), + None, + MsFlags::MS_RDONLY | MsFlags::MS_REMOUNT | MsFlags::MS_BIND, + None, + ) + .map_err(|err| { + tracing::error!(?err, "failed to remount root `/` as readonly"); + InitProcessError::SyscallOther(err) + })?; } if let Some(umask) = proc.user().umask() { - if let Some(mode) = Mode::from_bits(umask) { - nix::sys::stat::umask(mode); - } else { - bail!("invalid umask {}", umask); + match Mode::from_bits(umask) { + Some(mode) => { + nix::sys::stat::umask(mode); + } + None => { + return Err(InitProcessError::InvalidUmask(umask)); + } } } if let Some(paths) = linux.readonly_paths() { // mount readonly path for path in paths { - readonly_path(Path::new(path), syscall) - .with_context(|| format!("failed to set read only path {:?}", path))?; + readonly_path(Path::new(path), syscall.as_ref()).map_err(|err| { + tracing::error!(?err, ?path, "failed to set readonly path"); + err + })?; } } if let Some(paths) = linux.masked_paths() { // mount masked path for path in paths { - masked_path(Path::new(path), linux.mount_label(), syscall) - .with_context(|| format!("failed to set masked path {:?}", path))?; + masked_path(Path::new(path), linux.mount_label(), syscall.as_ref()).map_err(|err| { + tracing::error!(?err, ?path, "failed to set masked path"); + err + })?; } } @@ -281,19 +485,29 @@ pub fn container_init_process( match unistd::chdir(proc.cwd()) { std::result::Result::Ok(_) => false, Err(nix::Error::EPERM) => true, - Err(e) => bail!("failed to chdir: {}", e), + Err(e) => { + tracing::error!(?e, "failed to chdir"); + return Err(InitProcessError::NixOther(e)); + } } }; - set_supplementary_gids(proc.user(), args.rootless, syscall) - .context("failed to set supplementary gids")?; + set_supplementary_gids(proc.user(), &args.rootless, syscall.as_ref()).map_err(|err| { + tracing::error!(?err, "failed to set supplementary gids"); + err + })?; syscall .set_id( Uid::from_raw(proc.user().uid()), Gid::from_raw(proc.user().gid()), ) - .context("failed to configure uid and gid")?; + .map_err(|err| { + let uid = proc.user().uid(); + let gid = proc.user().gid(); + tracing::error!(?err, ?uid, ?gid, "failed to set uid and gid"); + InitProcessError::SyscallOther(err) + })?; // Take care of LISTEN_FDS used for systemd-active-socket. If the value is // not 0, then we have to preserve those fds as well, and set up the correct @@ -303,7 +517,7 @@ pub fn container_init_process( let listen_fds = match listen_fds_str.parse::() { std::result::Result::Ok(v) => v, Err(error) => { - log::warn!( + tracing::warn!( "LISTEN_FDS entered is not a fd. Ignore the value. {:?}", error ); @@ -318,7 +532,7 @@ pub fn container_init_process( // it here, if it is 0. if listen_fds > 0 { envs.append(&mut vec![ - format!("LISTEN_FDS={}", listen_fds), + format!("LISTEN_FDS={listen_fds}"), "LISTEN_PID=1".to_string(), ]); } @@ -327,7 +541,7 @@ pub fn container_init_process( } Err(env::VarError::NotPresent) => args.preserve_fds, Err(env::VarError::NotUnicode(value)) => { - log::warn!( + tracing::warn!( "LISTEN_FDS entered is malformed: {:?}. Ignore the value.", &value ); @@ -342,30 +556,50 @@ pub fn container_init_process( // will be closed after execve into the container payload. We can't close the // fds immediately since we at least still need it for the pipe used to wait on // starting the container. - syscall - .close_range(preserve_fds) - .with_context(|| "failed to clean up extra fds")?; + syscall.close_range(preserve_fds).map_err(|err| { + tracing::error!(?err, "failed to cleanup extra fds"); + InitProcessError::SyscallOther(err) + })?; // Without no new privileges, seccomp is a privileged operation. We have to // do this before dropping capabilities. Otherwise, we should do it later, // as close to exec as possible. + #[cfg(feature = "libseccomp")] if let Some(seccomp) = linux.seccomp() { if proc.no_new_privileges().is_none() { - let notify_fd = - seccomp::initialize_seccomp(seccomp).context("failed to execute seccomp")?; - sync_seccomp(notify_fd, main_sender, init_receiver) - .context("failed to sync seccomp")?; + let notify_fd = seccomp::initialize_seccomp(seccomp).map_err(|err| { + tracing::error!(?err, "failed to initialize seccomp"); + err + })?; + sync_seccomp(notify_fd, main_sender, init_receiver).map_err(|err| { + tracing::error!(?err, "failed to sync seccomp"); + err + })?; } } + #[cfg(not(feature = "libseccomp"))] + if proc.no_new_privileges().is_none() { + tracing::warn!("seccomp not available, unable to enforce no_new_privileges!") + } - capabilities::reset_effective(syscall).context("Failed to reset effective capabilities")?; + capabilities::reset_effective(syscall.as_ref()).map_err(|err| { + tracing::error!(?err, "failed to reset effective capabilities"); + InitProcessError::SyscallOther(err) + })?; if let Some(caps) = proc.capabilities() { - capabilities::drop_privileges(caps, syscall).context("Failed to drop capabilities")?; + capabilities::drop_privileges(caps, syscall.as_ref()).map_err(|err| { + tracing::error!(?err, "failed to drop capabilities"); + InitProcessError::SyscallOther(err) + })?; } // Change directory to process.cwd if process.cwd is not empty if do_chdir { - unistd::chdir(proc.cwd()).with_context(|| format!("failed to chdir {:?}", proc.cwd()))?; + unistd::chdir(proc.cwd()).map_err(|err| { + let cwd = proc.cwd(); + tracing::error!(?err, ?cwd, "failed to chdir to cwd"); + InitProcessError::NixOther(err) + })?; } // add HOME into envs if not exists @@ -385,67 +619,75 @@ pub fn container_init_process( // Initialize seccomp profile right before we are ready to execute the // payload so as few syscalls will happen between here and payload exec. The // notify socket will still need network related syscalls. + #[cfg(feature = "libseccomp")] if let Some(seccomp) = linux.seccomp() { if proc.no_new_privileges().is_some() { - let notify_fd = - seccomp::initialize_seccomp(seccomp).context("failed to execute seccomp")?; - sync_seccomp(notify_fd, main_sender, init_receiver) - .context("failed to sync seccomp")?; + let notify_fd = seccomp::initialize_seccomp(seccomp).map_err(|err| { + tracing::error!(?err, "failed to initialize seccomp"); + err + })?; + sync_seccomp(notify_fd, main_sender, init_receiver).map_err(|err| { + tracing::error!(?err, "failed to sync seccomp"); + err + })?; } } + #[cfg(not(feature = "libseccomp"))] + if proc.no_new_privileges().is_some() { + tracing::warn!("seccomp not available, unable to set seccomp privileges!") + } - // this checks if the binary to run actually exists and if we have permissions to run it. - // Taken from https://github.com/opencontainers/runc/blob/25c9e888686773e7e06429133578038a9abc091d/libcontainer/standard_init_linux.go#L195-L206 if let Some(args) = proc.args() { - let path_var = { - let mut ret: &str = ""; - for var in &envs { - if var.starts_with("PATH=") { - ret = var; - } - } - ret - }; - let executable_path = utils::get_executable_path(&args[0], path_var); - match executable_path { - None => bail!( - "executable '{}' for container process does not exist", - args[0] - ), - Some(path) => { - if !utils::is_executable(&path)? { - bail!("file {:?} does not have executable permission set", path); - } - } - } + verify_binary(args, &envs)?; } // Notify main process that the init process is ready to execute the // payload. Note, because we are already inside the pid namespace, the pid // outside the pid namespace should be recorded by the intermediate process // already. - main_sender.init_ready()?; - main_sender - .close() - .context("failed to close down main sender in init process")?; + main_sender.init_ready().map_err(|err| { + tracing::error!( + ?err, + "failed to notify main process that init process is ready" + ); + InitProcessError::Channel(err) + })?; + main_sender.close().map_err(|err| { + tracing::error!(?err, "failed to close down main sender in init process"); + InitProcessError::Channel(err) + })?; // listing on the notify socket for container start command - args.notify_socket.wait_for_container_start()?; - args.notify_socket.close()?; + notify_listener.wait_for_container_start().map_err(|err| { + tracing::error!(?err, "failed to wait for container start"); + err + })?; + notify_listener.close().map_err(|err| { + tracing::error!(?err, "failed to close notify socket"); + err + })?; // create_container hook needs to be called after the namespace setup, but // before pivot_root is called. This runs in the container namespaces. if matches!(args.container_type, ContainerType::InitContainer) { if let Some(hooks) = hooks { - hooks::run_hooks(hooks.start_container().as_ref(), container)? + hooks::run_hooks(hooks.start_container().as_ref(), container).map_err(|err| { + tracing::error!(?err, "failed to run start container hooks"); + err + })?; } } if proc.args().is_some() { - ExecutorManager::exec(spec) + (args.executor)(spec).map_err(|err| { + tracing::error!(?err, "failed to execute payload"); + err + })?; + unreachable!("should not be back here"); } else { - bail!("on non-Windows, at least one process arg entry is required") - } + tracing::error!("on non-Windows, at least one process arg entry is required"); + Err(MissingSpecError::Args) + }? } // Before 3.19 it was possible for an unprivileged user to enter an user namespace, @@ -482,10 +724,13 @@ fn set_supplementary_gids( return Ok(()); } - let setgroups = - fs::read_to_string("/proc/self/setgroups").context("failed to read setgroups")?; + let setgroups = fs::read_to_string("/proc/self/setgroups").map_err(|err| { + tracing::error!(?err, "failed to read setgroups"); + InitProcessError::Io(err) + })?; if setgroups.trim() == "deny" { - bail!("cannot set supplementary gids, setgroup is disabled"); + tracing::error!("cannot set supplementary gids, setgroup is disabled"); + return Err(InitProcessError::SetGroupDisabled); } let gids: Vec = additional_gids @@ -495,13 +740,15 @@ fn set_supplementary_gids( match rootless { Some(r) if r.privileged => { - syscall.set_groups(&gids).with_context(|| { - format!("failed to set privileged supplementary gids: {:?}", gids) + syscall.set_groups(&gids).map_err(|err| { + tracing::error!(?err, ?gids, "failed to set privileged supplementary gids"); + InitProcessError::SyscallOther(err) })?; } None => { - syscall.set_groups(&gids).with_context(|| { - format!("failed to set unprivileged supplementary gids: {:?}", gids) + syscall.set_groups(&gids).map_err(|err| { + tracing::error!(?err, ?gids, "failed to set unprivileged supplementary gids"); + InitProcessError::SyscallOther(err) })?; } // this should have been detected during validation @@ -514,15 +761,65 @@ fn set_supplementary_gids( Ok(()) } +/// set_io_priority set io priority +fn set_io_priority(syscall: &dyn Syscall, io_priority_op: &Option) -> Result<()> { + match io_priority_op { + Some(io_priority) => { + let io_prio_class_mapping: HashMap<_, _> = [ + (IOPriorityClass::IoprioClassRt, 1i64), + (IOPriorityClass::IoprioClassBe, 2i64), + (IOPriorityClass::IoprioClassIdle, 3i64), + ] + .iter() + .filter_map(|(class, num)| match serde_json::to_string(&class) { + Ok(class_str) => Some((class_str, *num)), + Err(err) => { + tracing::error!(?err, "failed to parse io priority class"); + None + } + }) + .collect(); + + let iop_class = serde_json::to_string(&io_priority.class()) + .map_err(|err| InitProcessError::IoPriorityClass(err.to_string()))?; + + match io_prio_class_mapping.get(&iop_class) { + Some(value) => { + syscall + .set_io_priority(*value, io_priority.priority()) + .map_err(|err| { + tracing::error!(?err, ?io_priority, "failed to set io_priority"); + InitProcessError::SyscallOther(err) + })?; + } + None => { + return Err(InitProcessError::IoPriorityClass(iop_class)); + } + } + } + None => {} + } + Ok(()) +} + +#[cfg(feature = "libseccomp")] fn sync_seccomp( fd: Option, main_sender: &mut channel::MainSender, init_receiver: &mut channel::InitReceiver, ) -> Result<()> { if let Some(fd) = fd { - log::debug!("init process sync seccomp, notify fd: {}", fd); - main_sender.seccomp_notify_request(fd)?; - init_receiver.wait_for_seccomp_request_done()?; + tracing::debug!("init process sync seccomp, notify fd: {}", fd); + main_sender.seccomp_notify_request(fd).map_err(|err| { + tracing::error!(?err, "failed to send seccomp notify request"); + InitProcessError::Channel(err) + })?; + init_receiver + .wait_for_seccomp_request_done() + .map_err(|err| { + tracing::error!(?err, "failed to wait for seccomp request done"); + InitProcessError::Channel(err) + })?; // Once we are sure the seccomp notify fd is sent, we can safely close // it. The fd is now duplicated to the main process and sent to seccomp // listener. @@ -537,10 +834,13 @@ mod tests { use super::*; use crate::syscall::{ syscall::create_syscall, - test::{ArgName, MountArgs, TestHelperSyscall}, + test::{ArgName, IoPriorityArgs, MountArgs, TestHelperSyscall}, }; + use anyhow::Result; + #[cfg(feature = "libseccomp")] use nix::unistd; use oci_spec::runtime::{LinuxNamespaceBuilder, SpecBuilder, UserBuilder}; + #[cfg(feature = "libseccomp")] use serial_test::serial; use std::fs; @@ -593,7 +893,7 @@ mod tests { .typ(LinuxNamespaceType::Pid) .build()?, ]; - let namespaces = Namespaces::from(Some(&linux_spaces)); + let namespaces = Namespaces::try_from(Some(&linux_spaces))?; apply_rest_namespaces(&namespaces, &spec, syscall.as_ref())?; @@ -647,6 +947,7 @@ mod tests { newuidmap: None, uid_mappings: None, user_namespace: None, + ..Default::default() }), vec![vec![Gid::from_raw(37), Gid::from_raw(38)]], ), @@ -675,17 +976,12 @@ mod tests { #[test] #[serial] + #[cfg(feature = "libseccomp")] fn test_sync_seccomp() -> Result<()> { use std::os::unix::io::IntoRawFd; use std::thread; - use utils::create_temp_dir; - let tmp_dir = create_temp_dir("test_sync_seccomp")?; - let tmp_file = std::fs::OpenOptions::new() - .write(true) - .create(true) - .open(tmp_dir.path().join("temp_file")) - .expect("create temp file failed"); + let tmp_file = tempfile::tempfile()?; let (mut main_sender, mut main_receiver) = channel::main_channel()?; let (mut init_sender, mut init_receiver) = channel::init_channel()?; @@ -711,7 +1007,9 @@ mod tests { .as_any() .downcast_ref::() .unwrap(); - mocks.set_ret_err(ArgName::Mount, || bail!(nix::errno::Errno::ENOENT)); + mocks.set_ret_err(ArgName::Mount, || { + Err(SyscallError::Nix(nix::errno::Errno::ENOENT)) + }); assert!(masked_path(Path::new("/proc/self"), &None, syscall.as_ref()).is_ok()); let got = mocks.get_mount_args(); @@ -725,7 +1023,9 @@ mod tests { .as_any() .downcast_ref::() .unwrap(); - mocks.set_ret_err(ArgName::Mount, || bail!(nix::errno::Errno::ENOTDIR)); + mocks.set_ret_err(ArgName::Mount, || { + Err(SyscallError::Nix(nix::errno::Errno::ENOTDIR)) + }); assert!(masked_path(Path::new("/proc/self"), &None, syscall.as_ref()).is_ok()); @@ -748,7 +1048,9 @@ mod tests { .as_any() .downcast_ref::() .unwrap(); - mocks.set_ret_err(ArgName::Mount, || bail!(nix::errno::Errno::ENOTDIR)); + mocks.set_ret_err(ArgName::Mount, || { + Err(SyscallError::Nix(nix::errno::Errno::ENOTDIR)) + }); assert!(masked_path( Path::new("/proc/self"), @@ -776,10 +1078,69 @@ mod tests { .as_any() .downcast_ref::() .unwrap(); - mocks.set_ret_err(ArgName::Mount, || bail!("unknown error")); + mocks.set_ret_err(ArgName::Mount, || { + Err(SyscallError::Nix(nix::errno::Errno::UnknownErrno)) + }); assert!(masked_path(Path::new("/proc/self"), &None, syscall.as_ref()).is_err()); let got = mocks.get_mount_args(); assert_eq!(0, got.len()); } + + #[test] + fn test_get_executable_path() { + let non_existing_abs_path = "/some/non/existent/absolute/path"; + let existing_abs_path = "/usr/bin/sh"; + let existing_binary = "sh"; + let non_existing_binary = "non-existent"; + let path_value = "/usr/bin:/bin"; + + assert_eq!( + get_executable_path(existing_abs_path, path_value), + Some(PathBuf::from(existing_abs_path)) + ); + assert_eq!(get_executable_path(non_existing_abs_path, path_value), None); + + assert_eq!( + get_executable_path(existing_binary, path_value), + Some(PathBuf::from("/usr/bin/sh")) + ); + + assert_eq!(get_executable_path(non_existing_binary, path_value), None); + } + + #[test] + fn test_is_executable() { + let tmp = tempfile::tempdir().expect("create temp directory for test"); + let executable_path = PathBuf::from("/bin/sh"); + let directory_path = tmp.path(); + let non_executable_path = directory_path.join("non_executable_file"); + let non_existent_path = PathBuf::from("/some/non/existent/path"); + + std::fs::File::create(non_executable_path.as_path()).unwrap(); + + assert!(is_executable(&non_existent_path).is_err()); + assert!(is_executable(&executable_path).unwrap()); + assert!(!is_executable(&non_executable_path).unwrap()); + assert!(!is_executable(directory_path).unwrap()); + } + + #[test] + fn test_set_io_priority() { + let test_command = TestHelperSyscall::default(); + let io_priority_op = None; + assert!(set_io_priority(&test_command, &io_priority_op).is_ok()); + + let data = "{\"class\":\"IOPRIO_CLASS_RT\",\"priority\":1}"; + let iop: LinuxIOPriority = serde_json::from_str(data).unwrap(); + let io_priority_op = Some(iop); + assert!(set_io_priority(&test_command, &io_priority_op).is_ok()); + + let want_io_priority = IoPriorityArgs { + class: 1, + priority: 1, + }; + let set_io_prioritys = test_command.get_io_priority_args(); + assert_eq!(set_io_prioritys[0], want_io_priority); + } } diff --git a/crates/libcontainer/src/process/container_intermediate_process.rs b/crates/libcontainer/src/process/container_intermediate_process.rs index 1aa0486cb..1d8b8918d 100644 --- a/crates/libcontainer/src/process/container_intermediate_process.rs +++ b/crates/libcontainer/src/process/container_intermediate_process.rs @@ -1,15 +1,36 @@ +use crate::error::MissingSpecError; use crate::{namespaces::Namespaces, process::channel, process::fork}; -use anyhow::{Context, Error, Result}; use libcgroups::common::CgroupManager; use nix::unistd::{close, write}; use nix::unistd::{Gid, Pid, Uid}; use oci_spec::runtime::{LinuxNamespaceType, LinuxResources}; use procfs::process::Process; -use std::convert::From; use super::args::{ContainerArgs, ContainerType}; use super::container_init_process::container_init_process; +#[derive(Debug, thiserror::Error)] +pub enum IntermediateProcessError { + #[error(transparent)] + Channel(#[from] channel::ChannelError), + #[error(transparent)] + Namespace(#[from] crate::namespaces::NamespaceError), + #[error(transparent)] + Syscall(#[from] crate::syscall::SyscallError), + #[error("failed to launch init process")] + InitProcess(#[source] fork::CloneError), + #[error("cgroup error: {0}")] + Cgroup(String), + #[error(transparent)] + Procfs(#[from] procfs::ProcError), + #[error("exec notify failed")] + ExecNotify(#[source] nix::Error), + #[error(transparent)] + MissingSpec(#[from] crate::error::MissingSpecError), +} + +type Result = std::result::Result; + pub fn container_intermediate_process( args: &ContainerArgs, intermediate_chan: &mut (channel::IntermediateSender, channel::IntermediateReceiver), @@ -18,10 +39,12 @@ pub fn container_intermediate_process( ) -> Result { let (inter_sender, inter_receiver) = intermediate_chan; let (init_sender, init_receiver) = init_chan; - let command = &args.syscall; + let command = args.syscall.create_syscall(); let spec = &args.spec; - let linux = spec.linux().as_ref().context("no linux in spec")?; - let namespaces = Namespaces::from(linux.namespaces().as_ref()); + let linux = spec.linux().as_ref().ok_or(MissingSpecError::Linux)?; + let namespaces = Namespaces::try_from(linux.namespaces().as_ref())?; + let cgroup_manager = + libcgroups::common::create_cgroup_manager(args.cgroup_config.to_owned()).unwrap(); // this needs to be done before we create the init process, so that the init // process will already be captured by the cgroup. It also needs to be done @@ -34,27 +57,30 @@ pub fn container_intermediate_process( // the cgroup of the process will form the root of the cgroup hierarchy in // the cgroup namespace. apply_cgroups( - args.cgroup_manager.as_ref(), + &cgroup_manager, linux.resources().as_ref(), matches!(args.container_type, ContainerType::InitContainer), - ) - .context("failed to apply cgroups")?; + )?; // if new user is specified in specification, this will be true and new // namespace will be created, check // https://man7.org/linux/man-pages/man7/user_namespaces.7.html for more // information - if let Some(user_namespace) = namespaces.get(LinuxNamespaceType::User) { - namespaces - .unshare_or_setns(user_namespace) - .with_context(|| format!("failed to enter user namespace: {:?}", user_namespace))?; + if let Some(user_namespace) = namespaces.get(LinuxNamespaceType::User)? { + namespaces.unshare_or_setns(user_namespace)?; if user_namespace.path().is_none() { - log::debug!("creating new user namespace"); + tracing::debug!("creating new user namespace"); // child needs to be dumpable, otherwise the non root parent is not // allowed to write the uid/gid maps prctl::set_dumpable(true).unwrap(); - main_sender.identifier_mapping_request()?; - inter_receiver.wait_for_mapping_ack()?; + main_sender.identifier_mapping_request().map_err(|err| { + tracing::error!("failed to send id mapping request: {}", err); + err + })?; + inter_receiver.wait_for_mapping_ack().map_err(|err| { + tracing::error!("failed to receive id mapping ack: {}", err); + err + })?; prctl::set_dumpable(false).unwrap(); } @@ -64,82 +90,117 @@ pub fn container_intermediate_process( // configuring the container process will require root, even though the // root in the user namespace likely is mapped to an non-privileged user // on the parent user namespace. - command.set_id(Uid::from_raw(0), Gid::from_raw(0)).context( - "failed to configure uid and gid root in the beginning of a new user namespace", - )?; + command.set_id(Uid::from_raw(0), Gid::from_raw(0))?; } // set limits and namespaces to the process - let proc = spec.process().as_ref().context("no process in spec")?; + let proc = spec.process().as_ref().ok_or(MissingSpecError::Process)?; if let Some(rlimits) = proc.rlimits() { for rlimit in rlimits { - command.set_rlimit(rlimit).context("failed to set rlimit")?; + command.set_rlimit(rlimit).map_err(|err| { + tracing::error!(?err, ?rlimit, "failed to set rlimit"); + err + })?; } } // Pid namespace requires an extra fork to enter, so we enter pid namespace now. - if let Some(pid_namespace) = namespaces.get(LinuxNamespaceType::Pid) { - namespaces - .unshare_or_setns(pid_namespace) - .with_context(|| format!("failed to enter pid namespace: {:?}", pid_namespace))?; + if let Some(pid_namespace) = namespaces.get(LinuxNamespaceType::Pid)? { + namespaces.unshare_or_setns(pid_namespace)?; } - // We have to record the pid of the child (container init process), since - // the child will be inside the pid namespace. We can't rely on child_ready - // to send us the correct pid. - let pid = fork::container_fork(|| { - // We are inside the forked process here. The first thing we have to do is to close - // any unused senders, since fork will make a dup for all the socket. - init_sender - .close() - .context("failed to close receiver in init process")?; - inter_sender - .close() - .context("failed to close sender in the intermediate process")?; + // We have to record the pid of the init process. The init process will be + // inside the pid namespace, so we can't rely on the init process to send us + // the correct pid. We also want to clone the init process as a sibling + // process to the intermediate process. The intermediate process is only + // used as a jumping board to set the init process to the correct + // configuration. The youki main process can decide what to do with the init + // process and the intermediate process can just exit safely after the job + // is done. + let pid = fork::container_clone_sibling("youki:[2:INIT]", || { + // We are inside the forked process here. The first thing we have to do + // is to close any unused senders, since fork will make a dup for all + // the socket. + init_sender.close().map_err(|err| { + tracing::error!("failed to close receiver in init process: {}", err); + IntermediateProcessError::Channel(err) + })?; + inter_sender.close().map_err(|err| { + tracing::error!( + "failed to close sender in the intermediate process: {}", + err + ); + IntermediateProcessError::Channel(err) + })?; match container_init_process(args, main_sender, init_receiver) { Ok(_) => Ok(0), Err(e) => { if let ContainerType::TenantContainer { exec_notify_fd } = args.container_type { - let buf = format!("{}", e); - write(exec_notify_fd, buf.as_bytes())?; - close(exec_notify_fd)?; + let buf = format!("{e}"); + write(exec_notify_fd, buf.as_bytes()).map_err(|err| { + tracing::error!("failed to write to exec notify fd: {}", err); + IntermediateProcessError::ExecNotify(err) + })?; + close(exec_notify_fd).map_err(|err| { + tracing::error!("failed to close exec notify fd: {}", err); + IntermediateProcessError::ExecNotify(err) + })?; } - Err(e) + tracing::error!("failed to initialize container process: {e}"); + Err(e.into()) } } + }) + .map_err(|err| { + tracing::error!("failed to fork init process: {}", err); + IntermediateProcessError::InitProcess(err) })?; - // close the exec_notify_fd in this process + // Close the exec_notify_fd in this process if let ContainerType::TenantContainer { exec_notify_fd } = args.container_type { - close(exec_notify_fd)?; + close(exec_notify_fd).map_err(|err| { + tracing::error!("failed to close exec notify fd: {}", err); + IntermediateProcessError::ExecNotify(err) + })?; } - main_sender - .intermediate_ready(pid) - .context("failed to send child ready from intermediate process")?; + main_sender.intermediate_ready(pid).map_err(|err| { + tracing::error!("failed to wait on intermediate process: {}", err); + err + })?; // Close unused senders here so we don't have lingering socket around. - main_sender - .close() - .context("failed to close unused main sender")?; - inter_sender - .close() - .context("failed to close sender in the intermediate process")?; - init_sender - .close() - .context("failed to close unused init sender")?; + main_sender.close().map_err(|err| { + tracing::error!("failed to close unused main sender: {}", err); + err + })?; + inter_sender.close().map_err(|err| { + tracing::error!( + "failed to close sender in the intermediate process: {}", + err + ); + err + })?; + init_sender.close().map_err(|err| { + tracing::error!("failed to close unused init sender: {}", err); + err + })?; Ok(pid) } -fn apply_cgroups( +fn apply_cgroups< + C: CgroupManager + ?Sized, + E: std::error::Error + Send + Sync + 'static, +>( cmanager: &C, resources: Option<&LinuxResources>, init: bool, -) -> Result<(), Error> { +) -> Result<()> { let pid = Pid::from_raw(Process::myself()?.pid()); - cmanager - .add_task(pid) - .with_context(|| format!("failed to add task {} to cgroup manager", pid))?; + cmanager.add_task(pid).map_err(|err| { + tracing::error!(?pid, ?err, ?init, "failed to add task to cgroup"); + IntermediateProcessError::Cgroup(err.to_string()) + })?; if let Some(resources) = resources { if init { @@ -150,9 +211,10 @@ fn apply_cgroups( disable_oom_killer: false, }; - cmanager - .apply(&controller_opt) - .context("failed to apply resource limits to cgroup")?; + cmanager.apply(&controller_opt).map_err(|err| { + tracing::error!(?pid, ?err, ?init, "failed to apply cgroup"); + IntermediateProcessError::Cgroup(err.to_string()) + })?; } } diff --git a/crates/libcontainer/src/process/container_main_process.rs b/crates/libcontainer/src/process/container_main_process.rs index 0a9670ae5..f3024caf6 100644 --- a/crates/libcontainer/src/process/container_main_process.rs +++ b/crates/libcontainer/src/process/container_main_process.rs @@ -1,63 +1,70 @@ use crate::{ - container::ContainerProcessState, process::{ - args::{ContainerArgs, ContainerType}, - channel, container_intermediate_process, fork, + args::ContainerArgs, channel, container_intermediate_process, fork, + intel_rdt::setup_intel_rdt, }, rootless::Rootless, - seccomp, utils, }; -use anyhow::{Context, Result}; -use nix::{ - sys::{ - socket::{self, UnixAddr}, - wait::{waitpid, WaitStatus}, - }, - unistd::{self, Pid}, -}; -use oci_spec::runtime; -use std::{io::IoSlice, path::Path}; +use nix::sys::wait::{waitpid, WaitStatus}; +use nix::unistd::Pid; + +#[derive(Debug, thiserror::Error)] +pub enum ProcessError { + #[error(transparent)] + Channel(#[from] channel::ChannelError), + #[error("failed to write deny to setgroups")] + SetGroupsDeny(#[source] std::io::Error), + #[error(transparent)] + Rootless(#[from] crate::rootless::RootlessError), + #[error("container state is required")] + ContainerStateRequired, + #[error("failed to wait for intermediate process")] + WaitIntermediateProcess(#[source] nix::Error), + #[error(transparent)] + IntelRdt(#[from] crate::process::intel_rdt::IntelRdtError), + #[error("failed to create intermediate process")] + IntermediateProcessFailed(#[source] fork::CloneError), + #[error("failed seccomp listener")] + #[cfg(feature = "libseccomp")] + SeccompListener(#[from] crate::process::seccomp_listener::SeccompListenerError), +} -pub fn container_main_process(container_args: &ContainerArgs) -> Result<(Pid, Pid)> { +type Result = std::result::Result; + +pub fn container_main_process(container_args: &ContainerArgs) -> Result<(Pid, bool)> { // We use a set of channels to communicate between parent and child process. // Each channel is uni-directional. Because we will pass these channel to - // forked process, we have to be deligent about closing any unused channel. + // cloned process, we have to be deligent about closing any unused channel. // At minimum, we have to close down any unused senders. The corresponding // receivers will be cleaned up once the senders are closed down. let (main_sender, main_receiver) = &mut channel::main_channel()?; let inter_chan = &mut channel::intermediate_channel()?; let init_chan = &mut channel::init_channel()?; - let intermediate_pid = fork::container_fork(|| { - let container_pid = container_intermediate_process::container_intermediate_process( + let intermediate_pid = fork::container_fork("youki:[1:INTER]", || { + container_intermediate_process::container_intermediate_process( container_args, inter_chan, init_chan, main_sender, )?; - if matches!( - container_args.container_type, - ContainerType::TenantContainer { exec_notify_fd: _ } - ) && !container_args.detached - { - match waitpid(container_pid, None)? { - WaitStatus::Exited(_, s) => Ok(s), - WaitStatus::Signaled(_, sig, _) => Ok(sig as i32), - _ => Ok(0), - } - } else { - Ok(0) - } + Ok(0) + }) + .map_err(|err| { + tracing::error!("failed to fork intermediate process: {}", err); + ProcessError::IntermediateProcessFailed(err) })?; + // Close down unused fds. The corresponding fds are duplicated to the // child process during fork. - main_sender - .close() - .context("failed to close unused sender")?; + main_sender.close().map_err(|err| { + tracing::error!("failed to close unused sender: {}", err); + err + })?; - let (inter_sender, _) = inter_chan; - let (init_sender, _) = init_chan; + let (inter_sender, inter_receiver) = inter_chan; + let (init_sender, init_receiver) = init_chan; // If creating a rootless container, the intermediate process will ask // the main process to set up uid and gid mapping, once the intermediate @@ -70,17 +77,20 @@ pub fn container_main_process(container_args: &ContainerArgs) -> Result<(Pid, Pi // At this point, we don't need to send any message to intermediate process anymore, // so we want to close this sender at the earliest point. - inter_sender - .close() - .context("failed to close unused intermediate sender")?; + inter_sender.close().map_err(|err| { + tracing::error!("failed to close unused intermediate sender: {}", err); + err + })?; // The intermediate process will send the init pid once it forks the init // process. The intermediate process should exit after this point. let init_pid = main_receiver.wait_for_intermediate_ready()?; + let mut need_to_clean_up_intel_rdt_subdirectory = false; if let Some(linux) = container_args.spec.linux() { + #[cfg(feature = "libseccomp")] if let Some(seccomp) = linux.seccomp() { - let state = ContainerProcessState { + let state = crate::container::ContainerProcessState { oci_version: container_args.spec.version().to_string(), // runc hardcode the `seccompFd` name for fds. fds: vec![String::from("seccompFd")], @@ -89,110 +99,100 @@ pub fn container_main_process(container_args: &ContainerArgs) -> Result<(Pid, Pi state: container_args .container .as_ref() - .context("container state is required")? + .ok_or(ProcessError::ContainerStateRequired)? .state .clone(), }; - sync_seccomp(seccomp, &state, init_sender, main_receiver) - .context("failed to sync seccomp with init")?; + crate::process::seccomp_listener::sync_seccomp( + seccomp, + &state, + init_sender, + main_receiver, + )?; + } + + if let Some(intel_rdt) = linux.intel_rdt() { + let container_id = container_args + .container + .as_ref() + .map(|container| container.id()); + need_to_clean_up_intel_rdt_subdirectory = + setup_intel_rdt(container_id, &init_pid, intel_rdt)?; } } // We don't need to send anything to the init process after this point, so // close the sender. - init_sender - .close() - .context("failed to close unused init sender")?; + init_sender.close().map_err(|err| { + tracing::error!("failed to close unused init sender: {}", err); + err + })?; - main_receiver - .wait_for_init_ready() - .context("failed to wait for init ready")?; + main_receiver.wait_for_init_ready().map_err(|err| { + tracing::error!("failed to wait for init ready: {}", err); + err + })?; - log::debug!("init pid is {:?}", init_pid); + tracing::debug!("init pid is {:?}", init_pid); - // here we send both intermediate and init pid, because : - // init pid is required for writing it to pid_file (if) given by the high-level runtime - // intermediate pid is required in the case when we call exec, as we nned to wait for the - // intermediate process to exit, which itself waits for child process (the exec process) to exit - // in order to get the proper exit code. We cannot simply wait for the init_pid , that is the actual container - // process, as it is not (direect) child of our process - Ok((intermediate_pid, init_pid)) -} + // Close the receiver ends to avoid leaking file descriptors. -fn sync_seccomp( - seccomp: &runtime::LinuxSeccomp, - state: &ContainerProcessState, - init_sender: &mut channel::InitSender, - main_receiver: &mut channel::MainReceiver, -) -> Result<()> { - if seccomp::is_notify(seccomp) { - log::debug!("main process waiting for sync seccomp"); - let seccomp_fd = main_receiver.wait_for_seccomp_request()?; - let listener_path = seccomp - .listener_path() - .as_ref() - .context("notify will require seccomp listener path to be set")?; - let encoded_state = - serde_json::to_vec(state).context("failed to encode container process state")?; - sync_seccomp_send_msg(listener_path, &encoded_state, seccomp_fd) - .context("failed to send msg to seccomp listener")?; - init_sender.seccomp_notify_done()?; - // Once we sent the seccomp notify fd to the seccomp listener, we can - // safely close the fd. The SCM_RIGHTS msg will duplicate the fd to the - // process on the other end of the listener. - let _ = unistd::close(seccomp_fd); - } + inter_receiver.close().map_err(|err| { + tracing::error!("failed to close intermediate process receiver: {}", err); + err + })?; - Ok(()) -} + init_receiver.close().map_err(|err| { + tracing::error!("failed to close init process receiver: {}", err); + err + })?; -fn sync_seccomp_send_msg(listener_path: &Path, msg: &[u8], fd: i32) -> Result<()> { - // The seccomp listener has specific instructions on how to transmit the - // information through seccomp listener. Therefore, we have to use - // libc/nix APIs instead of Rust std lib APIs to maintain flexibility. - let socket = socket::socket( - socket::AddressFamily::Unix, - socket::SockType::Stream, - socket::SockFlag::empty(), - None, - ) - .context("failed to create unix domain socket for seccomp listener")?; - let unix_addr = socket::UnixAddr::new(listener_path).context("failed to create unix addr")?; - socket::connect(socket, &unix_addr).with_context(|| { - format!( - "failed to connect to seccomp notify listerner path: {:?}", - listener_path - ) + main_receiver.close().map_err(|err| { + tracing::error!("failed to close main process receiver: {}", err); + err })?; - // We have to use sendmsg here because the spec requires us to send seccomp notify fds through - // SCM_RIGHTS message. - // Ref: https://man7.org/linux/man-pages/man3/sendmsg.3p.html - // Ref: https://man7.org/linux/man-pages/man3/cmsg.3.html - let iov = [IoSlice::new(msg)]; - let fds = [fd]; - let cmsgs = socket::ControlMessage::ScmRights(&fds); - socket::sendmsg::(socket, &iov, &[cmsgs], socket::MsgFlags::empty(), None) - .context("failed to write container state to seccomp listener")?; - // The spec requires the listener socket to be closed immediately after sending. - let _ = unistd::close(socket); - Ok(()) + // Before the main process returns, we want to make sure the intermediate + // process is exit and reaped. By this point, the intermediate process + // should already exited successfully. If intermediate process errors out, + // the `init_ready` will not be sent. + match waitpid(intermediate_pid, None) { + Ok(WaitStatus::Exited(_, 0)) => (), + Ok(WaitStatus::Exited(_, s)) => { + tracing::warn!("intermediate process failed with exit status: {s}"); + } + Ok(WaitStatus::Signaled(_, sig, _)) => { + tracing::warn!("intermediate process killed with signal: {sig}") + } + Ok(_) => (), + Err(nix::errno::Errno::ECHILD) => { + // This is safe because intermediate_process and main_process check if the process is + // finished by piping instead of exit code. + tracing::warn!("intermediate process already reaped"); + } + Err(err) => return Err(ProcessError::WaitIntermediateProcess(err)), + }; + + Ok((init_pid, need_to_clean_up_intel_rdt_subdirectory)) } fn setup_mapping(rootless: &Rootless, pid: Pid) -> Result<()> { - log::debug!("write mapping for pid {:?}", pid); + tracing::debug!("write mapping for pid {:?}", pid); if !rootless.privileged { // The main process is running as an unprivileged user and cannot write the mapping // until "deny" has been written to setgroups. See CVE-2014-8989. - utils::write_file(format!("/proc/{}/setgroups", pid), "deny")?; + std::fs::write(format!("/proc/{pid}/setgroups"), "deny") + .map_err(ProcessError::SetGroupsDeny)?; } - rootless - .write_uid_mapping(pid) - .context(format!("failed to map uid of pid {}", pid))?; - rootless - .write_gid_mapping(pid) - .context(format!("failed to map gid of pid {}", pid))?; + rootless.write_uid_mapping(pid).map_err(|err| { + tracing::error!("failed to write uid mapping for pid {:?}: {}", pid, err); + err + })?; + rootless.write_gid_mapping(pid).map_err(|err| { + tracing::error!("failed to write gid mapping for pid {:?}: {}", pid, err); + err + })?; Ok(()) } @@ -200,19 +200,16 @@ fn setup_mapping(rootless: &Rootless, pid: Pid) -> Result<()> { mod tests { use super::*; use crate::process::channel::{intermediate_channel, main_channel}; - use crate::rootless::{get_gid_path, get_uid_path}; + use crate::rootless::RootlessIDMapper; + use anyhow::Result; use nix::{ sched::{unshare, CloneFlags}, unistd::{self, getgid, getuid}, }; - use oci_spec::runtime::{ - LinuxIdMappingBuilder, LinuxSeccompAction, LinuxSeccompBuilder, LinuxSyscallBuilder, - }; + use oci_spec::runtime::LinuxIdMappingBuilder; use serial_test::serial; use std::fs; - use crate::utils::TempDir; - #[test] #[serial] fn setup_uid_mapping_should_succeed() -> Result<()> { @@ -222,9 +219,12 @@ mod tests { .size(1u32) .build()?; let uid_mappings = vec![uid_mapping]; + let tmp = tempfile::tempdir()?; + let id_mapper = RootlessIDMapper::new_test(tmp.path().to_path_buf()); let rootless = Rootless { - uid_mappings: Some(&uid_mappings), + uid_mappings: Some(uid_mappings), privileged: true, + rootless_id_mapper: id_mapper.clone(), ..Default::default() }; let (mut parent_sender, mut parent_receiver) = main_channel()?; @@ -234,16 +234,13 @@ mod tests { parent_receiver.wait_for_mapping_request()?; parent_receiver.close()?; - let tempdir = TempDir::new(get_uid_path(&child).parent().unwrap())?; - let uid_map_path = tempdir.join("uid_map"); - let _ = fs::File::create(&uid_map_path)?; - - let tempdir = TempDir::new(get_gid_path(&child).parent().unwrap())?; - let gid_map_path = tempdir.join("gid_map"); - let _ = fs::File::create(gid_map_path)?; - + // In test, we fake the uid path in /proc/{pid}/uid_map, so we + // need to ensure the path exists before we write the mapping. + // The path requires the pid we use, so we can only do do after + // obtaining the child pid here. + id_mapper.ensure_uid_path(&child)?; setup_mapping(&rootless, child)?; - let line = fs::read_to_string(uid_map_path)?; + let line = fs::read_to_string(id_mapper.get_uid_path(&child))?; let line_splited = line.split_whitespace(); for (act, expect) in line_splited.zip([ uid_mapping.container_id().to_string(), @@ -270,15 +267,18 @@ mod tests { #[test] #[serial] - fn setup_gid_mapping_should_successed() -> Result<()> { + fn setup_gid_mapping_should_succeed() -> Result<()> { let gid_mapping = LinuxIdMappingBuilder::default() .host_id(getgid()) .container_id(0u32) .size(1u32) .build()?; let gid_mappings = vec![gid_mapping]; + let tmp = tempfile::tempdir()?; + let id_mapper = RootlessIDMapper::new_test(tmp.path().to_path_buf()); let rootless = Rootless { - gid_mappings: Some(&gid_mappings), + gid_mappings: Some(gid_mappings), + rootless_id_mapper: id_mapper.clone(), ..Default::default() }; let (mut parent_sender, mut parent_receiver) = main_channel()?; @@ -288,16 +288,13 @@ mod tests { parent_receiver.wait_for_mapping_request()?; parent_receiver.close()?; - let tempdir = TempDir::new(get_uid_path(&child).parent().unwrap())?; - let uid_map_path = tempdir.join("uid_map"); - let _ = fs::File::create(uid_map_path)?; - - let tempdir = TempDir::new(get_gid_path(&child).parent().unwrap())?; - let gid_map_path = tempdir.join("gid_map"); - let _ = fs::File::create(&gid_map_path)?; - + // In test, we fake the gid path in /proc/{pid}/gid_map, so we + // need to ensure the path exists before we write the mapping. + // The path requires the pid we use, so we can only do do after + // obtaining the child pid here. + id_mapper.ensure_gid_path(&child)?; setup_mapping(&rootless, child)?; - let line = fs::read_to_string(gid_map_path)?; + let line = fs::read_to_string(id_mapper.get_gid_path(&child))?; let line_splited = line.split_whitespace(); for (act, expect) in line_splited.zip([ gid_mapping.container_id().to_string(), @@ -325,63 +322,4 @@ mod tests { } Ok(()) } - - #[test] - #[serial] - fn test_sync_seccomp() -> Result<()> { - use std::io::Read; - use std::os::unix::io::IntoRawFd; - use std::os::unix::net::UnixListener; - use std::thread; - use utils::create_temp_dir; - - let tmp_dir = create_temp_dir("test_sync_seccomp")?; - let scmp_file = std::fs::OpenOptions::new() - .write(true) - .create(true) - .open(tmp_dir.path().join("scmp_file"))?; - - std::fs::OpenOptions::new() - .write(true) - .create(true) - .open(tmp_dir.path().join("socket_file.sock"))?; - - let (mut main_sender, mut main_receiver) = channel::main_channel()?; - let (mut init_sender, mut init_receiver) = channel::init_channel()?; - let socket_path = tmp_dir.path().join("socket_file.sock"); - let socket_path_seccomp_th = socket_path.clone(); - - let state = ContainerProcessState::default(); - let want = serde_json::to_string(&state)?; - let th = thread::spawn(move || { - sync_seccomp( - &LinuxSeccompBuilder::default() - .listener_path(socket_path_seccomp_th) - .syscalls(vec![LinuxSyscallBuilder::default() - .action(LinuxSeccompAction::ScmpActNotify) - .build() - .unwrap()]) - .build() - .unwrap(), - &state, - &mut init_sender, - &mut main_receiver, - ) - .unwrap(); - }); - - let fd = scmp_file.into_raw_fd(); - assert!(main_sender.seccomp_notify_request(fd).is_ok()); - - fs::remove_file(socket_path.clone())?; - let lis = UnixListener::bind(socket_path)?; - let (mut socket, _) = lis.accept()?; - let mut got = String::new(); - socket.read_to_string(&mut got)?; - assert!(init_receiver.wait_for_seccomp_request_done().is_ok()); - - assert_eq!(want, got); - assert!(th.join().is_ok()); - Ok(()) - } } diff --git a/crates/libcontainer/src/process/fork.rs b/crates/libcontainer/src/process/fork.rs index 12ba2dfcf..a3557a531 100644 --- a/crates/libcontainer/src/process/fork.rs +++ b/crates/libcontainer/src/process/fork.rs @@ -1,41 +1,112 @@ -use anyhow::Result; -use nix::unistd; +use libc::SIGCHLD; use nix::unistd::Pid; +use prctl; -// Execute the cb in another process. Make the fork works more like thread_spawn -// or clone, so it is easier to reason. Compared to clone call, fork is easier -// to use since fork will magically take care of all the variable copying. If -// using clone, we would have to manually make sure all the variables are -// correctly send to the new process, especially Rust borrow checker will be a -// lot of hassel to deal with every details. -pub fn container_fork Result>(cb: F) -> Result { - // here we return the child's pid in case of parent, the i32 in return signature, - // and for child, we run the callback function, and exit with the same exit code - // given by it. If there was any error when trying to run callback, exit with -1 - match unsafe { unistd::fork()? } { - unistd::ForkResult::Parent { child } => Ok(child), - unistd::ForkResult::Child => { +#[derive(Debug, thiserror::Error)] +pub enum CloneError { + #[error("failed to clone process using clone3")] + Clone(#[source] nix::Error), +} + +type Result = std::result::Result; + +#[derive(Debug, thiserror::Error)] +pub enum CallbackError { + #[error(transparent)] + IntermediateProcess( + #[from] crate::process::container_intermediate_process::IntermediateProcessError, + ), + #[error(transparent)] + InitProcess(#[from] crate::process::container_init_process::InitProcessError), + // Need a fake error for testing + #[error("unknown")] + #[cfg(test)] + Test, +} + +type CallbackResult = std::result::Result; + +// Fork/Clone a sibling process that shares the same parent as the calling +// process. This is used to launch the container init process so the parent +// process of the calling process can receive ownership of the process. If we +// clone a child process as the init process, the calling process (likely the +// youki main process) will exit and the init process will be re-parented to the +// process 1 (system init process), which is not the right behavior of what we +// look for. +pub fn container_clone_sibling CallbackResult>( + child_name: &str, + cb: F, +) -> Result { + let mut clone = clone3::Clone3::default(); + // Note: normally, an exit signal is required, but when using + // `CLONE_PARENT`, the `clone3` will return EINVAL if an exit signal is set. + // The older `clone` will not return EINVAL in this case. Instead it ignores + // the exit signal bits in the glibc wrapper. + clone.flag_parent(); + + container_clone(child_name, cb, clone) +} + +// A simple clone wrapper to clone3 so we can share this logic in different +// fork/clone situations. We decided to minimally support kernel version >= 5.4, +// and `clone3` requires only kernel version >= 5.3. Therefore, we don't need to +// fall back to `clone` or `fork`. +fn container_clone CallbackResult>( + child_name: &str, + cb: F, + mut clone_cmd: clone3::Clone3, +) -> Result { + // Return the child's pid in case of parent/calling process, and for the + // cloned process, run the callback function, and exit with the same exit + // code returned by the callback. If there was any error when trying to run + // callback, exit with -1 + match unsafe { + clone_cmd + .call() + .map_err(|err| CloneError::Clone(nix::errno::from_i32(err.0)))? + } { + 0 => { + prctl::set_name(child_name).expect("failed to set name"); + // Inside the cloned process let ret = match cb() { Err(error) => { - log::debug!("failed to run fork: {:?}", error); + tracing::debug!("failed to run child process in clone: {:?}", error); -1 } Ok(exit_code) => exit_code, }; std::process::exit(ret); } + pid => Ok(Pid::from_raw(pid)), } } +// Execute the cb in another process. Make the fork works more like thread_spawn +// or clone, so it is easier to reason. Compared to clone call, fork is easier +// to use since fork will magically take care of all the variable copying. If +// using clone, we would have to manually make sure all the variables are +// correctly send to the new process, especially Rust borrow checker will be a +// lot of hassel to deal with every details. +pub fn container_fork CallbackResult>(child_name: &str, cb: F) -> Result { + // Using `clone3` to mimic the effect of `fork`. + let mut clone = clone3::Clone3::default(); + clone.exit_signal(SIGCHLD as u64); + + container_clone(child_name, cb, clone) +} + #[cfg(test)] mod test { + use crate::channel::channel; + use super::*; - use anyhow::{bail, Result}; + use anyhow::{bail, Context, Result}; use nix::sys::wait::{waitpid, WaitStatus}; + use nix::unistd; #[test] fn test_container_fork() -> Result<()> { - let pid = container_fork(|| Ok(0))?; + let pid = container_fork("test:child", || Ok(0))?; match waitpid(pid, None).expect("wait pid failed.") { WaitStatus::Exited(p, status) => { assert_eq!(pid, p); @@ -48,7 +119,7 @@ mod test { #[test] fn test_container_err_fork() -> Result<()> { - let pid = container_fork(|| bail!(""))?; + let pid = container_fork("test:child", || Err(CallbackError::Test))?; match waitpid(pid, None).expect("wait pid failed.") { WaitStatus::Exited(p, status) => { assert_eq!(pid, p); @@ -58,4 +129,55 @@ mod test { _ => bail!("test failed"), } } + + #[test] + fn test_container_clone_sibling() -> Result<()> { + // The `container_clone_sibling` will create a sibling process (share + // the same parent) of the calling process. In Unix, a process can only + // wait on the immediate children process and can't wait on the sibling + // process. Therefore, to test the logic, we will have to fork a process + // first and then let the forked process call `container_clone_sibling`. + // Then the testing process (the process where test is called), who are + // the parent to this forked process and the sibling process cloned by + // the `container_clone_sibling`, can wait on both processes. + + // We need to use a channel so that the forked process can pass the pid + // of the sibling process to the testing process. + let (sender, receiver) = &mut channel::()?; + + match unsafe { unistd::fork()? } { + unistd::ForkResult::Parent { child } => { + let sibling_process_pid = + Pid::from_raw(receiver.recv().with_context(|| { + "failed to receive the sibling pid from forked process" + })?); + receiver.close()?; + match waitpid(sibling_process_pid, None).expect("wait pid failed.") { + WaitStatus::Exited(p, status) => { + assert_eq!(sibling_process_pid, p); + assert_eq!(status, 0); + } + _ => bail!("failed to wait on the sibling process"), + } + // After sibling process exits, we can wait on the forked process. + match waitpid(child, None).expect("wait pid failed.") { + WaitStatus::Exited(p, status) => { + assert_eq!(child, p); + assert_eq!(status, 0); + } + _ => bail!("failed to wait on the forked process"), + } + } + unistd::ForkResult::Child => { + // Inside the forked process. We call `container_clone` and pass + // the pid to the parent process. + let pid = container_clone_sibling("test:child", || Ok(0))?; + sender.send(pid.as_raw())?; + sender.close()?; + std::process::exit(0); + } + }; + + Ok(()) + } } diff --git a/crates/libcontainer/src/process/intel_rdt.rs b/crates/libcontainer/src/process/intel_rdt.rs new file mode 100644 index 000000000..e1b33cd2c --- /dev/null +++ b/crates/libcontainer/src/process/intel_rdt.rs @@ -0,0 +1,613 @@ +use once_cell::sync::Lazy; +use regex::Regex; +use std::collections::HashMap; +use std::io::Write; +use std::{ + fs::{self, OpenOptions}, + path::{Path, PathBuf}, +}; + +use nix::unistd::Pid; +use oci_spec::runtime::LinuxIntelRdt; +use procfs::process::Process; + +#[derive(Debug, thiserror::Error)] +pub enum IntelRdtError { + #[error(transparent)] + ProcError(#[from] procfs::ProcError), + #[error("failed to find resctrl mount point")] + ResctrlMountPointNotFound, + #[error("failed to find ID for resctrl")] + ResctrlIdNotFound, + #[error("existing schemata found but data did not match")] + ExistingSchemataMismatch, + #[error("failed to read existing schemata")] + ReadSchemata(#[source] std::io::Error), + #[error("failed to write schemata")] + WriteSchemata(#[source] std::io::Error), + #[error("failed to open schemata file")] + OpenSchemata(#[source] std::io::Error), + #[error(transparent)] + ParseLine(#[from] ParseLineError), + #[error("no resctrl subdirectory found for container id")] + NoResctrlSubdirectory, + #[error("failed to remove subdirectory")] + RemoveSubdirectory(#[source] std::io::Error), + #[error("no parent for resctrl subdirectory")] + NoResctrlSubdirectoryParent, + #[error("invalid resctrl directory")] + InvalidResctrlDirectory, + #[error("resctrl closID directory didn't exist")] + NoClosIDDirectory, + #[error("failed to write to resctrl closID directory")] + WriteClosIDDirectory(#[source] std::io::Error), + #[error("failed to open resctrl closID directory")] + OpenClosIDDirectory(#[source] std::io::Error), + #[error("failed to create resctrl closID directory")] + CreateClosIDDirectory(#[source] std::io::Error), + #[error("failed to canonicalize path")] + Canonicalize(#[source] std::io::Error), +} + +#[derive(Debug, thiserror::Error)] +pub enum ParseLineError { + #[error("MB line doesn't match validation")] + MBLine, + #[error("MB token has wrong number of fields")] + MBToken, + #[error("L3 line doesn't match validation")] + L3Line, + #[error("L3 token has wrong number of fields")] + L3Token, +} + +type Result = std::result::Result; + +pub fn delete_resctrl_subdirectory(id: &str) -> Result<()> { + let dir = find_resctrl_mount_point().map_err(|err| { + tracing::error!("failed to find resctrl mount point: {}", err); + err + })?; + let container_resctrl_path = dir.join(id).canonicalize().map_err(|err| { + tracing::error!(?dir, ?id, "failed to canonicalize path: {}", err); + IntelRdtError::Canonicalize(err) + })?; + match container_resctrl_path.parent() { + // Make sure the container_id really exists and the directory + // is inside the resctrl fs. + Some(parent) => { + if parent == dir && container_resctrl_path.exists() { + fs::remove_dir(&container_resctrl_path).map_err(|err| { + tracing::error!(path = ?container_resctrl_path, "failed to remove resctrl subdirectory: {}", err); + IntelRdtError::RemoveSubdirectory(err) + })?; + } else { + return Err(IntelRdtError::NoResctrlSubdirectory); + } + } + None => return Err(IntelRdtError::NoResctrlSubdirectoryParent), + } + Ok(()) +} + +/// Finds the resctrl mount path by looking at the process mountinfo data. +pub fn find_resctrl_mount_point() -> Result { + let process = Process::myself()?; + let mount_infos = process.mountinfo()?; + + for mount_info in mount_infos.iter() { + // "resctrl" type fs can be mounted only once. + if mount_info.fs_type == "resctrl" { + let path = mount_info.mount_point.clone().canonicalize().map_err(|err| { + tracing::error!(path = ?mount_info.mount_point, "failed to canonicalize path: {}", err); + IntelRdtError::Canonicalize(err) + })?; + return Ok(path); + } + } + + Err(IntelRdtError::ResctrlMountPointNotFound) +} + +/// Adds container PID to the tasks file in the correct resctrl +/// pseudo-filesystem subdirectory. Creates the directory if needed based on +/// the rules in Linux OCI runtime config spec. +fn write_container_pid_to_resctrl_tasks( + path: &Path, + id: &str, + init_pid: Pid, + only_clos_id_set: bool, +) -> Result { + let tasks = path.to_owned().join(id).join("tasks"); + let dir = tasks.parent(); + match dir { + None => Err(IntelRdtError::InvalidResctrlDirectory), + Some(resctrl_container_dir) => { + let mut created_dir = false; + if !resctrl_container_dir.exists() { + if only_clos_id_set { + // Directory doesn't exist and only clos_id is set: error out. + return Err(IntelRdtError::NoClosIDDirectory); + } + fs::create_dir_all(resctrl_container_dir).map_err(|err| { + tracing::error!("failed to create resctrl subdirectory: {}", err); + IntelRdtError::CreateClosIDDirectory(err) + })?; + created_dir = true; + } + // TODO(ipuustin): File doesn't need to be created, but it's easier + // to test this way. Fix the tests so that the fake resctrl + // filesystem is pre-populated. + let mut file = OpenOptions::new() + .create(true) + .append(true) + .open(tasks) + .map_err(|err| { + tracing::error!("failed to open resctrl tasks file: {}", err); + IntelRdtError::OpenClosIDDirectory(err) + })?; + write!(file, "{init_pid}").map_err(|err| { + tracing::error!("failed to write to resctrl tasks file: {}", err); + IntelRdtError::WriteClosIDDirectory(err) + })?; + Ok(created_dir) + } + } +} + +/// Merges the two schemas together, removing lines starting with "MB:" from +/// l3_cache_schema if mem_bw_schema is also specified. +fn combine_l3_cache_and_mem_bw_schemas( + l3_cache_schema: &Option, + mem_bw_schema: &Option, +) -> Option { + if l3_cache_schema.is_some() && mem_bw_schema.is_some() { + // Combine the results. Filter out "MB:"-lines from l3_cache_schema + let real_l3_cache_schema = l3_cache_schema.as_ref().unwrap(); + let real_mem_bw_schema = mem_bw_schema.as_ref().unwrap(); + let mut output: Vec<&str> = vec![]; + + for line in real_l3_cache_schema.lines() { + if line.starts_with("MB:") { + continue; + } + output.push(line); + } + output.push(real_mem_bw_schema); + return Some(output.join("\n")); + } else if l3_cache_schema.is_some() { + // Apprarently the "MB:"-lines don't need to be removed in this case? + return l3_cache_schema.to_owned(); + } else if mem_bw_schema.is_some() { + return mem_bw_schema.to_owned(); + } + None +} + +#[derive(PartialEq)] +enum LineType { + L3Line, + L3DataLine, + L3CodeLine, + MbLine, + Unknown, +} + +#[derive(PartialEq)] +struct ParsedLine { + line_type: LineType, + tokens: HashMap, +} + +/// Parse tokens ("1=7000") from a "MB:" line. +fn parse_mb_line(line: &str) -> std::result::Result, ParseLineError> { + let mut token_map = HashMap::new(); + + static MB_VALIDATE_RE: Lazy = Lazy::new(|| { + Regex::new(r"^MB:(?:\s|;)*(?:\w+\s*=\s*\w+)?(?:(?:\s*;+\s*)+\w+\s*=\s*\w+)*(?:\s|;)*$") + .unwrap() + }); + static MB_CAPTURE_RE: Lazy = Lazy::new(|| Regex::new(r"(\w+)\s*=\s*(\w+)").unwrap()); + + if !MB_VALIDATE_RE.is_match(line) { + return Err(ParseLineError::MBLine); + } + + for token in MB_CAPTURE_RE.captures_iter(line) { + match (token.get(1), token.get(2)) { + (Some(key), Some(value)) => { + token_map.insert(key.as_str().to_string(), value.as_str().to_string()); + } + _ => return Err(ParseLineError::MBToken), + } + } + + Ok(token_map) +} + +/// Parse tokens ("0=ffff") from a L3{,CODE,DATA} line. +fn parse_l3_line(line: &str) -> std::result::Result, ParseLineError> { + let mut token_map = HashMap::new(); + + static L3_VALIDATE_RE: Lazy = Lazy::new(|| { + Regex::new(r"^(?:L3|L3DATA|L3CODE):(?:\s|;)*(?:\w+\s*=\s*[[:xdigit:]]+)?(?:(?:\s*;+\s*)+\w+\s*=\s*[[:xdigit:]]+)*(?:\s|;)*$").unwrap() + }); + static L3_CAPTURE_RE: Lazy = + Lazy::new(|| Regex::new(r"(\w+)\s*=\s*0*([[:xdigit:]]+)").unwrap()); + // ^ + // +-------------+ + // | + // The capture regexp also removes leading zeros from mask values. + + if !L3_VALIDATE_RE.is_match(line) { + return Err(ParseLineError::L3Line); + } + + for token in L3_CAPTURE_RE.captures_iter(line) { + match (token.get(1), token.get(2)) { + (Some(key), Some(value)) => { + token_map.insert(key.as_str().to_string(), value.as_str().to_string()); + } + _ => return Err(ParseLineError::L3Token), + } + } + + Ok(token_map) +} + +/// Get the resctrl line type. We only support L3{,CODE,DATA} and MB. +fn get_line_type(line: &str) -> LineType { + if line.starts_with("L3:") { + return LineType::L3Line; + } + if line.starts_with("L3CODE:") { + return LineType::L3CodeLine; + } + if line.starts_with("L3DATA:") { + return LineType::L3DataLine; + } + if line.starts_with("MB:") { + return LineType::MbLine; + } + + // Empty or unknown line. + LineType::Unknown +} + +/// Parse a resctrl line. +fn parse_line(line: &str) -> Option> { + let line_type = get_line_type(line); + + let maybe_tokens = match line_type { + LineType::L3Line => parse_l3_line(line).map(Some), + LineType::L3DataLine => parse_l3_line(line).map(Some), + LineType::L3CodeLine => parse_l3_line(line).map(Some), + LineType::MbLine => parse_mb_line(line).map(Some), + LineType::Unknown => Ok(None), + }; + + match maybe_tokens { + Err(err) => Some(Err(err)), + Ok(None) => None, + Ok(Some(tokens)) => Some(Ok(ParsedLine { line_type, tokens })), + } +} + +/// Compare two sets of parsed lines. Do this both ways because of possible +/// duplicate lines, meaning that the vector lengths may be different. +fn compare_lines(first_lines: &[ParsedLine], second_lines: &[ParsedLine]) -> bool { + first_lines.iter().all(|line| second_lines.contains(line)) + && second_lines.iter().all(|line| first_lines.contains(line)) +} + +/// Compares that two strings have the same set of lines (even if the lines are +/// in different order). +fn is_same_schema(combined_schema: &str, existing_schema: &str) -> Result { + // Parse the strings first to lines and then to structs. Also filter + // out lines that are non-L3{DATA,CODE} and non-MB. + let combined = combined_schema + .lines() + .filter_map(parse_line) + .collect::, _>>()?; + let existing = existing_schema + .lines() + .filter_map(parse_line) + .collect::, _>>()?; + + // Compare the two sets of parsed lines. + Ok(compare_lines(&combined, &existing)) +} + +/// Combines the l3_cache_schema and mem_bw_schema values together with the +/// rules given in Linux OCI runtime config spec. If clos_id_was_set parameter +/// is true and the directory wasn't created, the rules say that the schemas +/// need to be compared with the existing value and an error must be generated +/// if they don't match. +fn write_resctrl_schemata( + path: &Path, + id: &str, + l3_cache_schema: &Option, + mem_bw_schema: &Option, + clos_id_was_set: bool, + created_dir: bool, +) -> Result<()> { + let schemata = path.to_owned().join(id).join("schemata"); + let maybe_combined_schema = combine_l3_cache_and_mem_bw_schemas(l3_cache_schema, mem_bw_schema); + + if let Some(combined_schema) = maybe_combined_schema { + if clos_id_was_set && !created_dir { + // Compare existing schema and error out if no match. + let data = fs::read_to_string(&schemata).map_err(IntelRdtError::ReadSchemata)?; + if !is_same_schema(&combined_schema, &data)? { + Err(IntelRdtError::ExistingSchemataMismatch)?; + } + } else { + // Write the combined schema to the schemata file. + // TODO(ipuustin): File doesn't need to be created, but it's easier + // to test this way. Fix the tests so that the fake resctrl + // filesystem is pre-populated. + let mut file = OpenOptions::new() + .create(true) + .write(true) + .open(schemata) + .map_err(IntelRdtError::OpenSchemata)?; + // Prevent write!() from writing the newline with a separate call. + let schema_with_newline = combined_schema + "\n"; + write!(file, "{schema_with_newline}").map_err(IntelRdtError::WriteSchemata)?; + } + } + + Ok(()) +} + +/// Sets up Intel RDT configuration for the container process based on the +/// OCI config. The result bool tells whether or not we need to clean up +/// the created subdirectory. +pub fn setup_intel_rdt( + maybe_container_id: Option<&str>, + init_pid: &Pid, + intel_rdt: &LinuxIntelRdt, +) -> Result { + // Find mounted resctrl filesystem, error out if it can't be found. + let path = find_resctrl_mount_point().map_err(|err| { + tracing::error!("failed to find a mounted resctrl file system"); + err + })?; + let clos_id_set = intel_rdt.clos_id().is_some(); + let only_clos_id_set = + clos_id_set && intel_rdt.l3_cache_schema().is_none() && intel_rdt.mem_bw_schema().is_none(); + let id = match (intel_rdt.clos_id(), maybe_container_id) { + (Some(clos_id), _) => clos_id, + (None, Some(container_id)) => container_id, + (None, None) => Err(IntelRdtError::ResctrlIdNotFound)?, + }; + + let created_dir = write_container_pid_to_resctrl_tasks(&path, id, *init_pid, only_clos_id_set) + .map_err(|err| { + tracing::error!("failed to write container pid to resctrl tasks file"); + err + })?; + write_resctrl_schemata( + &path, + id, + intel_rdt.l3_cache_schema(), + intel_rdt.mem_bw_schema(), + clos_id_set, + created_dir, + ) + .map_err(|err| { + tracing::error!("failed to write schemata to resctrl schemata file"); + err + })?; + + // If closID is not set and the runtime has created the sub-directory, + // the runtime MUST remove the sub-directory when the container is deleted. + let need_to_delete_directory = !clos_id_set && created_dir; + + Ok(need_to_delete_directory) +} + +#[cfg(test)] +mod test { + use super::*; + use anyhow::Result; + + #[test] + fn test_combine_schemas() -> Result<()> { + let res = combine_l3_cache_and_mem_bw_schemas(&None, &None); + assert!(res.is_none()); + + let l3_1 = "L3:0=f;1=f0"; + let bw_1 = "MB:0=70;1=20"; + + let res = combine_l3_cache_and_mem_bw_schemas(&Some(l3_1.to_owned()), &None); + assert!(res.is_some()); + assert!(res.unwrap() == "L3:0=f;1=f0"); + + let res = combine_l3_cache_and_mem_bw_schemas(&None, &Some(bw_1.to_owned())); + assert!(res.is_some()); + assert!(res.unwrap() == "MB:0=70;1=20"); + + let res = + combine_l3_cache_and_mem_bw_schemas(&Some(l3_1.to_owned()), &Some(bw_1.to_owned())); + assert!(res.is_some()); + let val = res.unwrap(); + assert!(val.lines().any(|line| line == "MB:0=70;1=20")); + assert!(val.lines().any(|line| line == "L3:0=f;1=f0")); + + let l3_2 = "L3:0=f;1=f0\nL3:2=f\n;MB:0=20;1=70"; + let res = + combine_l3_cache_and_mem_bw_schemas(&Some(l3_2.to_owned()), &Some(bw_1.to_owned())); + assert!(res.is_some()); + let val = res.unwrap(); + assert!(val.lines().any(|line| line == "MB:0=70;1=20")); + assert!(val.lines().any(|line| line == "L3:0=f;1=f0")); + assert!(val.lines().any(|line| line == "L3:2=f")); + assert!(!val.lines().any(|line| line == "MB:0=20;1=70")); + + Ok(()) + } + + #[test] + fn test_is_same_schema() -> Result<()> { + // Exact same schemas. + assert!(is_same_schema("L3:0=f;1=f0", "L3:0=f;1=f0")?); + assert!(is_same_schema("L3DATA:0=f;1=f0", "L3DATA:0=f;1=f0")?); + assert!(is_same_schema("L3CODE:0=f;1=f0", "L3CODE:0=f;1=f0")?); + assert!(is_same_schema("MB:0=bar;1=f0", "MB:0=bar;1=f0")?); + assert!(is_same_schema("L3:", "L3:")?); + assert!(is_same_schema("MB:", "MB:")?); + + // Different schemas. + assert!(!is_same_schema("L3:0=f;1=f0", "L3:2=f")?); + assert!(!is_same_schema("MB:0=bar;1=f0", "MB:0=foo;1=f0")?); + assert!(!is_same_schema("L3DATA:0=f;1=f0", "L3CODE:2=f")?); + assert!(!is_same_schema("L3DATA:0=f;1=f0", "L3CODE:2=f")?); + assert!(!is_same_schema("L3DATA:0=f", "L3CODE:0=f")?); + assert!(!is_same_schema("L3:0=f", "L3DATA:0=f")?); + assert!(!is_same_schema("L3CODE:0=f", "L3:0=f")?); + assert!(!is_same_schema("MB:0=f", "L3:0=f")?); + + // Exact same multi-line schema. + assert!(is_same_schema( + "L3:0=f;1=f0\nL3:2=f", + "L3:0=f;1=f0\nL3:2=f" + )?); + + // Unknown line type is ignored. + assert!(is_same_schema( + "L3:0=f;1=f0\nL3:2=f\nBAR:foo", + "L3:0=f;1=f0\nL3:2=f" + )?); + + // Different multi-line schema. + assert!(!is_same_schema( + "L3:0=f;1=f0\nL3:2=f\nL3:3=f", + "L3:0=f;1=f0\nL3:2=f" + )?); + + // Different lines (two ways). + assert!(!is_same_schema( + "L3:0=f;1=f0\nL3:2=f\nL3:3=f", + "L3:0=f;1=f0\nL3:2=f" + )?); + assert!(!is_same_schema( + "L3:0=f;1=f0\nL3:2=f", + "L3:0=f;1=f0\nL3:2=f\nL3:3=f" + )?); + + // Same schema, different token order. + assert!(is_same_schema("L3:1=f0;0=0", "L3:0=0;1=f0")?); + + // Same schema, different whitespace and semicolons. + assert!(is_same_schema("L3:;; 0 = f; ; 1=f0", "L3:0=f;1 = f0;;")?); + + // Same schema, different leading zeros in masks. + assert!(is_same_schema("L3:0=000f", "L3:0=0f")?); + assert!(is_same_schema("L3:0=000f", "L3:0=0f")?); + assert!(is_same_schema("L3:0=f", "L3:0=0f")?); + assert!(is_same_schema("L3:0=0", "L3:0=0000")?); + + // Invalid schemas. + assert!(is_same_schema("L3:1=;0=f", "L3:1=;0=f").is_err()); + assert!(is_same_schema("L3:=0;0=f", "L3:=0;0=f").is_err()); + assert!(is_same_schema("L3:1=0=3;0=f", "L3:1=0=3;0=f").is_err()); + assert!(is_same_schema("L3:1=bar", "L3:1=bar").is_err()); + assert!(is_same_schema("MB:1=;0=f", "MB:1=;0=f").is_err()); + assert!(is_same_schema("MB:=0;0=f", "MB:=0;0=f").is_err()); + assert!(is_same_schema("MB:1=0=3;0=f", "MB:1=0=3;0=f").is_err()); + + Ok(()) + } + + #[test] + fn test_write_pid_to_resctrl_tasks() -> Result<()> { + let tmp = tempfile::tempdir().unwrap(); + + // Create the directory for id "foo". + let res = + write_container_pid_to_resctrl_tasks(tmp.path(), "foo", Pid::from_raw(1000), false); + assert!(res.unwrap()); // new directory created + let res = fs::read_to_string(tmp.path().join("foo").join("tasks")); + assert!(res.unwrap() == "1000"); + + // Create the same directory the second time. + let res = + write_container_pid_to_resctrl_tasks(tmp.path(), "foo", Pid::from_raw(1500), false); + assert!(!res.unwrap()); // no new directory created + + // If just clos_id then throw an error. + let res = + write_container_pid_to_resctrl_tasks(tmp.path(), "foobar", Pid::from_raw(2000), true); + assert!(res.is_err()); + + // If the directory already exists then it's fine to have just clos_id. + let res = + write_container_pid_to_resctrl_tasks(tmp.path(), "foo", Pid::from_raw(2500), true); + assert!(!res.unwrap()); // no new directory created + + Ok(()) + } + + #[test] + fn test_write_resctrl_schemata() -> Result<()> { + let tmp = tempfile::tempdir().unwrap(); + + let res = + write_container_pid_to_resctrl_tasks(tmp.path(), "foobar", Pid::from_raw(1000), false); + assert!(res.unwrap()); // new directory created + + // No schemes, clos_id was not set, directory created (with container id). + let res = write_resctrl_schemata(tmp.path(), "foobar", &None, &None, false, true); + assert!(res.is_ok()); + let res = fs::read_to_string(tmp.path().join("foobar").join("schemata")); + assert!(res.is_err()); // File not found because no schemes. + + let l3_1 = "L3:0=f;1=f0\nL3:2=f\nMB:0=20;1=70"; + let bw_1 = "MB:0=70;1=20"; + let res = write_resctrl_schemata( + tmp.path(), + "foobar", + &Some(l3_1.to_owned()), + &Some(bw_1.to_owned()), + false, + true, + ); + assert!(res.is_ok()); + + let res = fs::read_to_string(tmp.path().join("foobar").join("schemata")); + assert!(res.is_ok()); + assert!(is_same_schema( + "L3:0=f;1=f0\nL3:2=f\nMB:0=70;1=20\n", + &res.unwrap() + )?); + + // Try the verification case. If the directory existed (was not created + // by us) and the clos_id was set, it needs to contain the same data as + // we are trying to set. This is the same data: + let res = write_resctrl_schemata( + tmp.path(), + "foobar", + &Some(l3_1.to_owned()), + &Some(bw_1.to_owned()), + true, + false, + ); + assert!(res.is_ok()); + + // And this different data: + let l3_2 = "L3:0=f;1=f0\nMB:0=20;1=70"; + let bw_2 = "MB:0=70;1=20"; + let res = write_resctrl_schemata( + tmp.path(), + "foobar", + &Some(l3_2.to_owned()), + &Some(bw_2.to_owned()), + true, + false, + ); + assert!(res.is_err()); + + Ok(()) + } +} diff --git a/crates/libcontainer/src/process/message.rs b/crates/libcontainer/src/process/message.rs index 8cdbaf8a8..9b04723f4 100644 --- a/crates/libcontainer/src/process/message.rs +++ b/crates/libcontainer/src/process/message.rs @@ -1,7 +1,8 @@ -/// Used as a wrapper for messages to be sent between child and parent processes +use core::fmt; use serde::{Deserialize, Serialize}; -#[derive(Debug, Serialize, Deserialize)] +/// Used as a wrapper for messages to be sent between child and parent processes +#[derive(Debug, Serialize, Deserialize, Clone)] pub enum Message { IntermediateReady(i32), InitReady, @@ -11,3 +12,17 @@ pub enum Message { SeccompNotifyDone, ExecFailed(String), } + +impl fmt::Display for Message { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Message::IntermediateReady(pid) => write!(f, "IntermediateReady({})", pid), + Message::InitReady => write!(f, "InitReady"), + Message::WriteMapping => write!(f, "WriteMapping"), + Message::MappingWritten => write!(f, "MappingWritten"), + Message::SeccompNotify => write!(f, "SeccompNotify"), + Message::SeccompNotifyDone => write!(f, "SeccompNotifyDone"), + Message::ExecFailed(s) => write!(f, "ExecFailed({})", s), + } + } +} diff --git a/crates/libcontainer/src/process/mod.rs b/crates/libcontainer/src/process/mod.rs index 288993d72..6a8d98624 100644 --- a/crates/libcontainer/src/process/mod.rs +++ b/crates/libcontainer/src/process/mod.rs @@ -6,5 +6,8 @@ pub mod channel; pub mod container_init_process; pub mod container_intermediate_process; pub mod container_main_process; -pub mod fork; -pub mod message; +mod fork; +pub mod intel_rdt; +mod message; +#[cfg(feature = "libseccomp")] +mod seccomp_listener; diff --git a/crates/libcontainer/src/process/seccomp_listener.rs b/crates/libcontainer/src/process/seccomp_listener.rs new file mode 100644 index 000000000..608ff725b --- /dev/null +++ b/crates/libcontainer/src/process/seccomp_listener.rs @@ -0,0 +1,172 @@ +use crate::container::ContainerProcessState; +use crate::seccomp; +use nix::{ + sys::socket::{self, UnixAddr}, + unistd, +}; +use oci_spec::runtime; +use std::{io::IoSlice, path::Path}; + +use super::channel; + +#[derive(Debug, thiserror::Error)] +pub enum SeccompListenerError { + #[error("notify will require seccomp listener path to be set")] + MissingListenerPath, + #[error("failed to encode container process state")] + EncodeState(#[source] serde_json::Error), + #[error(transparent)] + ChannelError(#[from] channel::ChannelError), + #[error("unix syscall fails")] + UnixOther(#[source] nix::Error), +} + +type Result = std::result::Result; + +pub fn sync_seccomp( + seccomp: &runtime::LinuxSeccomp, + state: &ContainerProcessState, + init_sender: &mut channel::InitSender, + main_receiver: &mut channel::MainReceiver, +) -> Result<()> { + if seccomp::is_notify(seccomp) { + tracing::debug!("main process waiting for sync seccomp"); + let seccomp_fd = main_receiver.wait_for_seccomp_request()?; + let listener_path = seccomp + .listener_path() + .as_ref() + .ok_or(SeccompListenerError::MissingListenerPath)?; + let encoded_state = serde_json::to_vec(state).map_err(SeccompListenerError::EncodeState)?; + sync_seccomp_send_msg(listener_path, &encoded_state, seccomp_fd).map_err(|err| { + tracing::error!("failed to send msg to seccomp listener: {}", err); + err + })?; + init_sender.seccomp_notify_done()?; + // Once we sent the seccomp notify fd to the seccomp listener, we can + // safely close the fd. The SCM_RIGHTS msg will duplicate the fd to the + // process on the other end of the listener. + let _ = unistd::close(seccomp_fd); + } + + Ok(()) +} + +fn sync_seccomp_send_msg(listener_path: &Path, msg: &[u8], fd: i32) -> Result<()> { + // The seccomp listener has specific instructions on how to transmit the + // information through seccomp listener. Therefore, we have to use + // libc/nix APIs instead of Rust std lib APIs to maintain flexibility. + let socket = socket::socket( + socket::AddressFamily::Unix, + socket::SockType::Stream, + socket::SockFlag::empty(), + None, + ) + .map_err(|err| { + tracing::error!( + ?err, + "failed to create unix domain socket for seccomp listener" + ); + SeccompListenerError::UnixOther(err) + })?; + let unix_addr = socket::UnixAddr::new(listener_path).map_err(|err| { + tracing::error!( + ?err, + ?listener_path, + "failed to create unix domain socket address" + ); + SeccompListenerError::UnixOther(err) + })?; + socket::connect(socket, &unix_addr).map_err(|err| { + tracing::error!( + ?err, + ?listener_path, + "failed to connect to seccomp notify listener path" + ); + SeccompListenerError::UnixOther(err) + })?; + // We have to use sendmsg here because the spec requires us to send seccomp notify fds through + // SCM_RIGHTS message. + // Ref: https://man7.org/linux/man-pages/man3/sendmsg.3p.html + // Ref: https://man7.org/linux/man-pages/man3/cmsg.3.html + let iov = [IoSlice::new(msg)]; + let fds = [fd]; + let cmsgs = socket::ControlMessage::ScmRights(&fds); + socket::sendmsg::(socket, &iov, &[cmsgs], socket::MsgFlags::empty(), None).map_err( + |err| { + tracing::error!(?err, "failed to write container state to seccomp listener"); + SeccompListenerError::UnixOther(err) + }, + )?; + // The spec requires the listener socket to be closed immediately after sending. + let _ = unistd::close(socket); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::{container::ContainerProcessState, process::channel}; + + use super::*; + use anyhow::Result; + use oci_spec::runtime::{LinuxSeccompAction, LinuxSeccompBuilder, LinuxSyscallBuilder}; + use serial_test::serial; + + #[test] + #[serial] + fn test_sync_seccomp() -> Result<()> { + use std::io::Read; + use std::os::unix::io::IntoRawFd; + use std::os::unix::net::UnixListener; + use std::thread; + + let tmp_dir = tempfile::tempdir()?; + let scmp_file = std::fs::OpenOptions::new() + .write(true) + .create(true) + .open(tmp_dir.path().join("scmp_file"))?; + + std::fs::OpenOptions::new() + .write(true) + .create(true) + .open(tmp_dir.path().join("socket_file.sock"))?; + + let (mut main_sender, mut main_receiver) = channel::main_channel()?; + let (mut init_sender, mut init_receiver) = channel::init_channel()?; + let socket_path = tmp_dir.path().join("socket_file.sock"); + let socket_path_seccomp_th = socket_path.clone(); + + let state = ContainerProcessState::default(); + let want = serde_json::to_string(&state)?; + let th = thread::spawn(move || { + sync_seccomp( + &LinuxSeccompBuilder::default() + .listener_path(socket_path_seccomp_th) + .syscalls(vec![LinuxSyscallBuilder::default() + .action(LinuxSeccompAction::ScmpActNotify) + .build() + .unwrap()]) + .build() + .unwrap(), + &state, + &mut init_sender, + &mut main_receiver, + ) + .unwrap(); + }); + + let fd = scmp_file.into_raw_fd(); + assert!(main_sender.seccomp_notify_request(fd).is_ok()); + + std::fs::remove_file(socket_path.clone())?; + let lis = UnixListener::bind(socket_path)?; + let (mut socket, _) = lis.accept()?; + let mut got = String::new(); + socket.read_to_string(&mut got)?; + assert!(init_receiver.wait_for_seccomp_request_done().is_ok()); + + assert_eq!(want, got); + assert!(th.join().is_ok()); + Ok(()) + } +} diff --git a/crates/libcontainer/src/rootfs/device.rs b/crates/libcontainer/src/rootfs/device.rs index ba79785da..31b0db9af 100644 --- a/crates/libcontainer/src/rootfs/device.rs +++ b/crates/libcontainer/src/rootfs/device.rs @@ -1,7 +1,6 @@ use super::utils::to_sflag; use crate::syscall::{syscall::create_syscall, Syscall}; -use crate::utils::{self, PathBufExt}; -use anyhow::{bail, Context, Result}; +use crate::utils::PathBufExt; use nix::{ fcntl::{open, OFlag}, mount::MsFlags, @@ -11,6 +10,22 @@ use nix::{ use oci_spec::runtime::LinuxDevice; use std::path::{Path, PathBuf}; +#[derive(Debug, thiserror::Error)] +pub enum DeviceError { + #[error("{0:?} is not a valid device path")] + InvalidDevicePath(std::path::PathBuf), + #[error("failed syscall to create device")] + Syscall(#[from] crate::syscall::SyscallError), + #[error(transparent)] + Nix(#[from] nix::Error), + #[error(transparent)] + Other(Box), + #[error("{0}")] + Custom(String), +} + +type Result = std::result::Result; + pub struct Device { syscall: Box, } @@ -28,6 +43,10 @@ impl Device { } } + pub fn new_with_syscall(syscall: Box) -> Device { + Device { syscall } + } + pub fn create_devices<'a, I>(&self, rootfs: &Path, devices: I, bind: bool) -> Result<()> where I: IntoIterator, @@ -37,7 +56,11 @@ impl Device { .into_iter() .map(|dev| { if !dev.path().starts_with("/dev") { - bail!("{} is not a valid device path", dev.path().display()); + tracing::error!( + "{:?} is not a valid device path starting with /dev", + dev.path() + ); + return Err(DeviceError::InvalidDevicePath(dev.path().to_path_buf())); } if bind { @@ -53,22 +76,38 @@ impl Device { } fn bind_dev(&self, rootfs: &Path, dev: &LinuxDevice) -> Result<()> { - let full_container_path = create_container_dev_path(rootfs, dev) - .with_context(|| format!("could not create container path for device {:?}", dev))?; + let full_container_path = create_container_dev_path(rootfs, dev)?; + tracing::debug!( + "bind_dev with full container path {:?}", + full_container_path + ); let fd = open( &full_container_path, OFlag::O_RDWR | OFlag::O_CREAT, Mode::from_bits_truncate(0o644), - )?; + ) + .map_err(|err| { + tracing::error!("failed to open bind dev {:?}: {}", full_container_path, err); + err + })?; close(fd)?; - self.syscall.mount( - Some(dev.path()), - &full_container_path, - Some("bind"), - MsFlags::MS_BIND, - None, - )?; + self.syscall + .mount( + Some(dev.path()), + &full_container_path, + Some("bind"), + MsFlags::MS_BIND, + None, + ) + .map_err(|err| { + tracing::error!( + ?err, + path = ?full_container_path, + "failed to mount bind dev", + ); + err + })?; Ok(()) } @@ -81,38 +120,74 @@ impl Device { | ((major & !0xfff) << 32)) as u64 } - let full_container_path = create_container_dev_path(rootfs, dev) - .with_context(|| format!("could not create container path for device {:?}", dev))?; + let full_container_path = create_container_dev_path(rootfs, dev)?; - self.syscall.mknod( - &full_container_path, - to_sflag(dev.typ()), - Mode::from_bits_truncate(dev.file_mode().unwrap_or(0)), - makedev(dev.major(), dev.minor()), - )?; - self.syscall.chown( - &full_container_path, - dev.uid().map(Uid::from_raw), - dev.gid().map(Gid::from_raw), - )?; + self.syscall + .mknod( + &full_container_path, + to_sflag(dev.typ()), + Mode::from_bits_truncate(dev.file_mode().unwrap_or(0)), + makedev(dev.major(), dev.minor()), + ) + .map_err(|err| { + tracing::error!( + ?err, + path = ?full_container_path, + major = ?dev.major(), + minor = ?dev.minor(), + "failed to mknod device" + ); + + err + })?; + self.syscall + .chown( + &full_container_path, + dev.uid().map(Uid::from_raw), + dev.gid().map(Gid::from_raw), + ) + .map_err(|err| { + tracing::error!( + path = ?full_container_path, + ?err, + uid = ?dev.uid(), + gid = ?dev.gid(), + "failed to chown device" + ); + + err + })?; Ok(()) } } fn create_container_dev_path(rootfs: &Path, dev: &LinuxDevice) -> Result { - let relative_dev_path = dev - .path() - .as_relative() - .with_context(|| format!("could not convert {:?} to relative path", dev.path()))?; - let full_container_path = utils::secure_join(rootfs, relative_dev_path) - .with_context(|| format!("could not join {:?} with {:?}", rootfs, dev.path()))?; - - crate::utils::create_dir_all( + let relative_dev_path = dev.path().as_relative().map_err(|err| { + tracing::error!( + "failed to convert {:?} to relative path: {}", + dev.path(), + err + ); + DeviceError::Other(err.into()) + })?; + let full_container_path = safe_path::scoped_join(rootfs, relative_dev_path).map_err(|err| { + tracing::error!("failed to join {rootfs:?} with {:?}: {err}", dev.path()); + DeviceError::Other(err.into()) + })?; + std::fs::create_dir_all( full_container_path .parent() .unwrap_or_else(|| Path::new("")), - )?; + ) + .map_err(|err| { + tracing::error!( + "failed to create parent dir of {:?}: {}", + full_container_path, + err + ); + DeviceError::Other(err.into()) + })?; Ok(full_container_path) } @@ -121,7 +196,7 @@ fn create_container_dev_path(rootfs: &Path, dev: &LinuxDevice) -> Result Result<()> { + let tmp_dir = tempfile::tempdir()?; + let device = Device::new_with_syscall(Box::::default()); assert!(device .bind_dev( tmp_dir.path(), @@ -157,12 +232,13 @@ mod tests { .unwrap() .get_mount_args()[0]; assert_eq!(want, *got); + Ok(()) } #[test] - fn test_mknod_dev() { - let tmp_dir = TempDir::new("/tmp/test_mknod_dev").unwrap(); - let device = Device::new(); + fn test_mknod_dev() -> Result<()> { + let tmp_dir = tempfile::tempdir()?; + let device = Device::new_with_syscall(Box::::default()); assert!(device .mknod_dev( tmp_dir.path(), @@ -205,12 +281,15 @@ mod tests { .unwrap() .get_chown_args()[0]; assert_eq!(want_chown, *got_chown); + + Ok(()) } #[test] - fn test_create_devices() { - let tmp_dir = TempDir::new("/tmp/test_create_devices").unwrap(); - let device = Device::new(); + fn test_create_devices() -> Result<()> { + let tmp_dir = tempfile::tempdir()?; + let device = Device::new_with_syscall(Box::::default()); + let devices = vec![LinuxDeviceBuilder::default() .path(PathBuf::from("/dev/null")) .major(1) @@ -258,5 +337,7 @@ mod tests { .unwrap() .get_mknod_args()[0]; assert_eq!(want, *got); + + Ok(()) } } diff --git a/crates/libcontainer/src/rootfs/mod.rs b/crates/libcontainer/src/rootfs/mod.rs index aa7137e7b..33d8e8217 100644 --- a/crates/libcontainer/src/rootfs/mod.rs +++ b/crates/libcontainer/src/rootfs/mod.rs @@ -1,11 +1,32 @@ -//! During kernel initialization, a minimal replica of the ramfs filesystem is loaded, called rootfs. -//! Most systems mount another filesystem over it +//! During kernel initialization, a minimal replica of the ramfs filesystem is +//! loaded, called rootfs. Most systems mount another filesystem over it #[allow(clippy::module_inception)] pub(crate) mod rootfs; pub use rootfs::RootFS; -pub(super) mod device; +pub mod device; +pub use device::Device; + pub(super) mod mount; pub(super) mod symlink; -pub(super) mod utils; + +pub mod utils; + +#[derive(Debug, thiserror::Error)] +pub enum RootfsError { + #[error("failed syscall")] + Syscall(#[from] crate::syscall::SyscallError), + #[error(transparent)] + MissingSpec(#[from] crate::error::MissingSpecError), + #[error("unknown rootfs propagation")] + UnknownRootfsPropagation(String), + #[error(transparent)] + Symlink(#[from] symlink::SymlinkError), + #[error(transparent)] + Mount(#[from] mount::MountError), + #[error(transparent)] + Device(#[from] device::DeviceError), +} + +type Result = std::result::Result; diff --git a/crates/libcontainer/src/rootfs/mount.rs b/crates/libcontainer/src/rootfs/mount.rs index 1dc0cc533..e86e05efe 100644 --- a/crates/libcontainer/src/rootfs/mount.rs +++ b/crates/libcontainer/src/rootfs/mount.rs @@ -1,28 +1,51 @@ #[cfg(feature = "v1")] use super::symlink::Symlink; -use super::utils::{find_parent_mount, parse_mount, MountOptionConfig}; +use super::{ + symlink::SymlinkError, + utils::{parse_mount, MountOptionConfig}, +}; use crate::{ - syscall::{linux, syscall::create_syscall, Syscall}, - utils, + syscall::{linux, syscall::create_syscall, Syscall, SyscallError}, utils::PathBufExt, }; -#[cfg(feature = "v2")] -use anyhow::anyhow; -use anyhow::{bail, Context, Result}; use libcgroups::common::CgroupSetup::{Hybrid, Legacy, Unified}; #[cfg(feature = "v1")] use libcgroups::common::DEFAULT_CGROUP_ROOT; -use nix::{dir::Dir, errno::Errno, fcntl::OFlag, mount::MsFlags, sys::stat::Mode}; +use nix::{dir::Dir, errno::Errno, fcntl::OFlag, mount::MsFlags, sys::stat::Mode, NixPath}; use oci_spec::runtime::{Mount as SpecMount, MountBuilder as SpecMountBuilder}; use procfs::process::{MountInfo, MountOptFields, Process}; +use safe_path; use std::fs::{canonicalize, create_dir_all, OpenOptions}; use std::mem; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; - #[cfg(feature = "v1")] use std::{borrow::Cow, collections::HashMap}; +#[derive(Debug, thiserror::Error)] +pub enum MountError { + #[error("no source in mount spec")] + NoSource, + #[error("io error")] + Io(#[from] std::io::Error), + #[error("syscall")] + Syscall(#[from] crate::syscall::SyscallError), + #[error("nix error")] + Nix(#[from] nix::Error), + #[error("failed to build oci spec")] + SpecBuild(#[from] oci_spec::OciSpecError), + #[error(transparent)] + Other(Box), + #[error("{0}")] + Custom(String), + #[error("symlink")] + Symlink(#[from] SymlinkError), + #[error("procfs failed")] + Procfs(#[from] procfs::ProcError), +} + +type Result = std::result::Result; + #[derive(Debug)] pub struct MountOptions<'a> { pub root: &'a Path, @@ -48,27 +71,33 @@ impl Mount { } pub fn setup_mount(&self, mount: &SpecMount, options: &MountOptions) -> Result<()> { - log::debug!("Mounting {:?}", mount); + tracing::debug!("mounting {:?}", mount); let mut mount_option_config = parse_mount(mount); match mount.typ().as_deref() { Some("cgroup") => { - match libcgroups::common::get_cgroup_setup() - .context("failed to determine cgroup setup")? - { + match libcgroups::common::get_cgroup_setup().map_err(|err| { + tracing::error!("failed to determine cgroup setup: {}", err); + MountError::Other(err.into()) + })? { Legacy | Hybrid => { #[cfg(not(feature = "v1"))] panic!("libcontainer can't run in a Legacy or Hybrid cgroup setup without the v1 feature"); #[cfg(feature = "v1")] - self.mount_cgroup_v1(mount, options) - .context("failed to mount cgroup v1")? + self.mount_cgroup_v1(mount, options).map_err(|err| { + tracing::error!("failed to mount cgroup v2: {}", err); + err + })? } Unified => { #[cfg(not(feature = "v2"))] panic!("libcontainer can't run in a Unified cgroup setup without the v2 feature"); #[cfg(feature = "v2")] self.mount_cgroup_v2(mount, options, &mount_option_config) - .context("failed to mount cgroup v2")? + .map_err(|err| { + tracing::error!("failed to mount cgroup v2: {}", err); + err + })? } } } @@ -81,7 +110,10 @@ impl Mount { &mount_option_config, options.label, ) - .with_context(|| format!("failed to mount /dev: {:?}", mount))?; + .map_err(|err| { + tracing::error!("failed to mount /dev: {}", err); + err + })?; } else { self.mount_into_container( mount, @@ -89,7 +121,10 @@ impl Mount { &mount_option_config, options.label, ) - .with_context(|| format!("failed to mount: {:?}", mount))?; + .map_err(|err| { + tracing::error!("failed to mount {:?}: {}", mount, err); + err + })?; } } } @@ -99,7 +134,7 @@ impl Mount { #[cfg(feature = "v1")] fn mount_cgroup_v1(&self, cgroup_mount: &SpecMount, options: &MountOptions) -> Result<()> { - log::debug!("Mounting cgroup v1 filesystem"); + tracing::debug!("mounting cgroup v1 filesystem"); // create tmpfs into which the cgroup subsystems will be mounted let tmpfs = SpecMountBuilder::default() .source("tmpfs") @@ -112,33 +147,46 @@ impl Mount { .collect::>(), ) .build() - .context("failed to build tmpfs for cgroup")?; + .map_err(|err| { + tracing::error!("failed to build tmpfs for cgroup: {}", err); + err + })?; - self.setup_mount(&tmpfs, options) - .context("failed to mount tmpfs for cgroup")?; + self.setup_mount(&tmpfs, options).map_err(|err| { + tracing::error!("failed to mount tmpfs for cgroup: {}", err); + err + })?; // get all cgroup mounts on the host system let host_mounts: Vec = libcgroups::v1::util::list_subsystem_mount_points() - .context("failed to get subsystem mount points")? + .map_err(|err| { + tracing::error!("failed to get subsystem mount points: {}", err); + MountError::Other(err.into()) + })? .into_iter() .filter(|p| p.as_path().starts_with(DEFAULT_CGROUP_ROOT)) .collect(); - log::debug!("cgroup mounts: {:?}", host_mounts); + tracing::debug!("cgroup mounts: {:?}", host_mounts); // get process cgroups let process_cgroups: HashMap = Process::myself()? - .cgroups() - .context("failed to get process cgroups")? + .cgroups()? .into_iter() .map(|c| (c.controllers.join(","), c.pathname)) .collect(); - log::debug!("Process cgroups: {:?}", process_cgroups); + tracing::debug!("Process cgroups: {:?}", process_cgroups); let cgroup_root = options .root .join_safely(cgroup_mount.destination()) - .context("could not join rootfs path with cgroup mount destination")?; - log::debug!("cgroup root: {:?}", cgroup_root); + .map_err(|err| { + tracing::error!( + "could not join rootfs path with cgroup mount destination: {}", + err + ); + MountError::Other(err.into()) + })?; + tracing::debug!("cgroup root: {:?}", cgroup_root); let symlink = Symlink::new(); @@ -165,7 +213,7 @@ impl Mount { symlink.setup_comount_symlinks(&cgroup_root, subsystem_name)?; } else { - log::warn!("could not get subsystem name from {:?}", host_mount); + tracing::warn!("could not get subsystem name from {:?}", host_mount); } } @@ -182,7 +230,7 @@ impl Mount { subsystem_name: &str, named: bool, ) -> Result<()> { - log::debug!( + tracing::debug!( "Mounting (namespaced) {:?} cgroup subsystem", subsystem_name ); @@ -197,10 +245,13 @@ impl Mount { .collect::>(), ) .build() - .with_context(|| format!("failed to build {}", subsystem_name))?; + .map_err(|err| { + tracing::error!("failed to build {subsystem_name} mount: {err}"); + err + })?; let data: Cow = if named { - format!("name={}", subsystem_name).into() + format!("name={subsystem_name}").into() } else { subsystem_name.into() }; @@ -217,7 +268,10 @@ impl Mount { &mount_options_config, options.label, ) - .with_context(|| format!("failed to mount {:?}", subsystem_mount)) + .map_err(|err| { + tracing::error!("failed to mount {subsystem_mount:?}: {err}"); + err + }) } #[cfg(feature = "v1")] @@ -230,9 +284,9 @@ impl Mount { host_mount: &Path, process_cgroups: &HashMap, ) -> Result<()> { - log::debug!("Mounting (emulated) {:?} cgroup subsystem", subsystem_name); + tracing::debug!("Mounting (emulated) {:?} cgroup subsystem", subsystem_name); let named_hierarchy: Cow = if named { - format!("name={}", subsystem_name).into() + format!("name={subsystem_name}").into() } else { subsystem_name.into() }; @@ -242,22 +296,24 @@ impl Mount { .source( host_mount .join_safely(proc_path.as_str()) - .with_context(|| { - format!( - "failed to join mount source for {} subsystem", - subsystem_name - ) + .map_err(|err| { + tracing::error!( + "failed to join mount source for {subsystem_name} subsystem: {}", + err + ); + MountError::Other(err.into()) })?, ) .destination( cgroup_mount .destination() .join_safely(subsystem_name) - .with_context(|| { - format!( - "failed to join mount destination for {} subsystem", - subsystem_name - ) + .map_err(|err| { + tracing::error!( + "failed to join mount destination for {subsystem_name} subsystem: {}", + err + ); + MountError::Other(err.into()) })?, ) .typ("bind") @@ -268,12 +324,14 @@ impl Mount { .collect::>(), ) .build()?; - log::debug!("Mounting emulated cgroup subsystem: {:?}", emulated); + tracing::debug!("Mounting emulated cgroup subsystem: {:?}", emulated); - self.setup_mount(&emulated, options) - .with_context(|| format!("failed to mount {} cgroup hierarchy", subsystem_name))?; + self.setup_mount(&emulated, options).map_err(|err| { + tracing::error!("failed to mount {subsystem_name} cgroup hierarchy: {}", err); + err + })?; } else { - log::warn!("Could not mount {:?} cgroup subsystem", subsystem_name); + tracing::warn!("Could not mount {:?} cgroup subsystem", subsystem_name); } Ok(()) @@ -286,7 +344,7 @@ impl Mount { options: &MountOptions, mount_option_config: &MountOptionConfig, ) -> Result<()> { - log::debug!("Mounting cgroup v2 filesystem"); + tracing::debug!("Mounting cgroup v2 filesystem"); let cgroup_mount = SpecMountBuilder::default() .typ("cgroup2") @@ -294,7 +352,7 @@ impl Mount { .destination(cgroup_mount.destination()) .options(Vec::new()) .build()?; - log::debug!("{:?}", cgroup_mount); + tracing::debug!("{:?}", cgroup_mount); if self .mount_into_container( @@ -303,28 +361,43 @@ impl Mount { mount_option_config, options.label, ) - .context("failed to mount into container") .is_err() { - let host_mount = libcgroups::v2::util::get_unified_mount_point() - .context("failed to get unified mount point")?; - - let process_cgroup = Process::myself()? + let host_mount = libcgroups::v2::util::get_unified_mount_point().map_err(|err| { + tracing::error!("failed to get unified mount point: {}", err); + MountError::Other(err.into()) + })?; + + let process_cgroup = Process::myself() + .map_err(|err| { + tracing::error!("failed to get /proc/self: {}", err); + MountError::Other(err.into()) + })? .cgroups() - .context("failed to get process cgroups")? + .map_err(|err| { + tracing::error!("failed to get process cgroups: {}", err); + MountError::Other(err.into()) + })? .into_iter() .find(|c| c.hierarchy == 0) .map(|c| PathBuf::from(c.pathname)) - .ok_or_else(|| anyhow!("failed to find unified process cgroup"))?; - + .ok_or_else(|| { + MountError::Custom("failed to find unified process cgroup".into()) + })?; let bind_mount = SpecMountBuilder::default() .typ("bind") - .source(host_mount.join_safely(process_cgroup)?) + .source(host_mount.join_safely(process_cgroup).map_err(|err| { + tracing::error!("failed to join host mount for cgroup hierarchy: {}", err); + MountError::Other(err.into()) + })?) .destination(cgroup_mount.destination()) .options(Vec::new()) .build() - .context("failed to build cgroup bind mount")?; - log::debug!("{:?}", bind_mount); + .map_err(|err| { + tracing::error!("failed to build cgroup bind mount: {}", err); + err + })?; + tracing::debug!("{:?}", bind_mount); let mut mount_option_config = (*mount_option_config).clone(); mount_option_config.flags |= MsFlags::MS_BIND; @@ -334,7 +407,10 @@ impl Mount { &mount_option_config, options.label, ) - .context("failed to bind mount cgroup hierarchy")?; + .map_err(|err| { + tracing::error!("failed to bind mount cgroup hierarchy: {}", err); + err + })?; } Ok(()) @@ -343,7 +419,16 @@ impl Mount { /// Make parent mount of rootfs private if it was shared, which is required by pivot_root. /// It also makes sure following bind mount does not propagate in other namespaces. pub fn make_parent_mount_private(&self, rootfs: &Path) -> Result> { - let mount_infos = Process::myself()?.mountinfo()?; + let mount_infos = Process::myself() + .map_err(|err| { + tracing::error!("failed to get /proc/self: {}", err); + MountError::Other(err.into()) + })? + .mountinfo() + .map_err(|err| { + tracing::error!("failed to get mount info: {}", err); + MountError::Other(err.into()) + })?; let parent_mount = find_parent_mount(rootfs, mount_infos)?; // check parent mount has 'shared' propagation type @@ -378,43 +463,57 @@ impl Mount { if let Some(l) = label { if typ != Some("proc") && typ != Some("sysfs") { match mount_option_config.data.is_empty() { - true => d = format!("context=\"{}\"", l), + true => d = format!("context=\"{l}\""), false => d = format!("{},context=\"{}\"", mount_option_config.data, l), } } } - let dest_for_host = utils::secure_join(rootfs, m.destination()) - .with_context(|| format!("failed to join {:?} with {:?}", rootfs, m.destination()))?; + let dest_for_host = safe_path::scoped_join(rootfs, m.destination()).map_err(|err| { + tracing::error!( + "failed to join rootfs {:?} with mount destination {:?}: {}", + rootfs, + m.destination(), + err + ); + MountError::Other(err.into()) + })?; let dest = Path::new(&dest_for_host); - let source = m - .source() - .as_ref() - .with_context(|| "no source in mount spec".to_string())?; + let source = m.source().as_ref().ok_or(MountError::NoSource)?; let src = if typ == Some("bind") { - let src = canonicalize(source) - .with_context(|| format!("failed to canonicalize: {:?}", source))?; + let src = canonicalize(source).map_err(|err| { + tracing::error!("failed to canonicalize {:?}: {}", source, err); + err + })?; let dir = if src.is_file() { Path::new(&dest).parent().unwrap() } else { Path::new(&dest) }; - create_dir_all(dir) - .with_context(|| format!("failed to create dir for bind mount: {:?}", dir))?; + create_dir_all(dir).map_err(|err| { + tracing::error!("failed to create dir for bind mount {:?}: {}", dir, err); + err + })?; - if src.is_file() { + if src.is_file() && !dest.exists() { OpenOptions::new() .create(true) .write(true) .open(dest) - .with_context(|| format!("failed to create file for bind mount: {:?}", src))?; + .map_err(|err| { + tracing::error!("failed to create file for bind mount {:?}: {}", src, err); + err + })?; } src } else { - create_dir_all(dest).with_context(|| format!("Failed to create device: {:?}", dest))?; + create_dir_all(dest).map_err(|err| { + tracing::error!("failed to create device: {:?}", dest); + err + })?; PathBuf::from(source) }; @@ -423,9 +522,10 @@ impl Mount { self.syscall .mount(Some(&*src), dest, typ, mount_option_config.flags, Some(&*d)) { - if let Some(errno) = err.downcast_ref() { + if let SyscallError::Nix(errno) = err { if !matches!(errno, Errno::EINVAL) { - bail!("mount of {:?} failed. {}", m.destination(), errno); + tracing::error!("mount of {:?} failed. {}", m.destination(), errno); + return Err(err.into()); } } @@ -437,7 +537,10 @@ impl Mount { mount_option_config.flags, Some(&mount_option_config.data), ) - .with_context(|| format!("failed to mount {:?} to {:?}", src, dest))?; + .map_err(|err| { + tracing::error!("failed to mount {src:?} to {dest:?}"); + err + })?; } if typ == Some("bind") @@ -458,7 +561,10 @@ impl Mount { mount_option_config.flags | MsFlags::MS_REMOUNT, None, ) - .with_context(|| format!("Failed to remount: {:?}", dest))?; + .map_err(|err| { + tracing::error!("failed to remount {:?}: {}", dest, err); + err + })?; } if let Some(mount_attr) = &mount_option_config.rec_attr { @@ -477,19 +583,33 @@ impl Mount { } } +/// Find parent mount of rootfs in given mount infos +pub fn find_parent_mount( + rootfs: &Path, + mount_infos: Vec, +) -> std::result::Result { + // find the longest mount point + let parent_mount_info = mount_infos + .into_iter() + .filter(|mi| rootfs.starts_with(&mi.mount_point)) + .max_by(|mi1, mi2| mi1.mount_point.len().cmp(&mi2.mount_point.len())) + .ok_or_else(|| { + MountError::Custom(format!("can't find the parent mount of {:?}", rootfs)) + })?; + Ok(parent_mount_info) +} + #[cfg(test)] mod tests { - #[cfg(feature = "v1")] - use std::fs; - use super::*; use crate::syscall::test::{MountArgs, TestHelperSyscall}; - use crate::utils::create_temp_dir; - use anyhow::Result; + use anyhow::{Context, Result}; + #[cfg(feature = "v1")] + use std::fs; #[test] fn test_mount_to_container() { - let tmp_dir = create_temp_dir("test_mount_to_container").unwrap(); + let tmp_dir = tempfile::tempdir().unwrap(); { let m = Mount::new(); let mount = &SpecMountBuilder::default() @@ -585,7 +705,7 @@ mod tests { #[test] fn test_make_parent_mount_private() { - let tmp_dir = create_temp_dir("test_make_parent_mount_private").unwrap(); + let tmp_dir = tempfile::tempdir().unwrap(); let m = Mount::new(); let result = m.make_parent_mount_private(tmp_dir.path()); assert!(result.is_ok()); @@ -615,7 +735,7 @@ mod tests { #[test] #[cfg(feature = "v1")] fn test_namespaced_subsystem_success() -> Result<()> { - let tmp = create_temp_dir("test_namespaced_subsystem_success")?; + let tmp = tempfile::tempdir().unwrap(); let container_cgroup = Path::new("/container_cgroup"); let mounter = Mount::new(); @@ -641,7 +761,10 @@ mod tests { let expected = MountArgs { source: Some(PathBuf::from("cgroup")), - target: tmp.join_safely(container_cgroup)?.join(subsystem_name), + target: tmp + .path() + .join_safely(container_cgroup)? + .join(subsystem_name), fstype: Some("cgroup".to_owned()), flags: MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV, data: Some("cpu".to_owned()), @@ -664,8 +787,8 @@ mod tests { #[cfg(feature = "v1")] fn test_emulated_subsystem_success() -> Result<()> { // arrange - let tmp = create_temp_dir("test_emulated_subsystem")?; - let host_cgroup_mount = tmp.join("host_cgroup"); + let tmp = tempfile::tempdir().unwrap(); + let host_cgroup_mount = tmp.path().join("host_cgroup"); let host_cgroup = host_cgroup_mount.join("cpu/container1"); fs::create_dir_all(&host_cgroup)?; @@ -704,7 +827,10 @@ mod tests { // assert let expected = MountArgs { source: Some(host_cgroup), - target: tmp.join_safely(container_cgroup)?.join(subsystem_name), + target: tmp + .path() + .join_safely(container_cgroup)? + .join(subsystem_name), fstype: Some("bind".to_owned()), flags: MsFlags::MS_BIND | MsFlags::MS_REC, data: Some("".to_owned()), @@ -727,7 +853,7 @@ mod tests { #[cfg(feature = "v1")] fn test_mount_cgroup_v1() -> Result<()> { // arrange - let tmp = create_temp_dir("test_mount_cgroup_v1")?; + let tmp = tempfile::tempdir().unwrap(); let container_cgroup = PathBuf::from("/sys/fs/cgroup"); let spec_cgroup_mount = SpecMountBuilder::default() @@ -764,7 +890,7 @@ mod tests { let expected = MountArgs { source: Some(PathBuf::from("tmpfs".to_owned())), - target: tmp.join_safely(&container_cgroup)?, + target: tmp.path().join_safely(&container_cgroup)?, fstype: Some("tmpfs".to_owned()), flags: MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV, data: Some("mode=755".to_owned()), @@ -775,12 +901,15 @@ mod tests { let subsystem_name = host_mount.file_name().and_then(|f| f.to_str()).unwrap(); let expected = MountArgs { source: Some(PathBuf::from("cgroup".to_owned())), - target: tmp.join_safely(&container_cgroup)?.join(subsystem_name), + target: tmp + .path() + .join_safely(&container_cgroup)? + .join(subsystem_name), fstype: Some("cgroup".to_owned()), flags: MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV, data: Some( if subsystem_name == "systemd" { - format!("name={}", subsystem_name) + format!("name={subsystem_name}") } else { subsystem_name.to_string() } @@ -797,7 +926,7 @@ mod tests { #[cfg(feature = "v2")] fn test_mount_cgroup_v2() -> Result<()> { // arrange - let tmp = create_temp_dir("test_mount_cgroup_v2")?; + let tmp = tempfile::tempdir().unwrap(); let container_cgroup = PathBuf::from("/sys/fs/cgroup"); let spec_cgroup_mount = SpecMountBuilder::default() @@ -829,7 +958,7 @@ mod tests { // assert let expected = MountArgs { source: Some(PathBuf::from("cgroup".to_owned())), - target: tmp.join_safely(container_cgroup)?, + target: tmp.path().join_safely(container_cgroup)?, fstype: Some("cgroup2".to_owned()), flags: MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV, data: Some("".to_owned()), @@ -847,4 +976,46 @@ mod tests { Ok(()) } + + #[test] + fn test_find_parent_mount() -> anyhow::Result<()> { + let mount_infos = vec![ + MountInfo { + mnt_id: 11, + pid: 10, + majmin: "".to_string(), + root: "/".to_string(), + mount_point: PathBuf::from("/"), + mount_options: Default::default(), + opt_fields: vec![], + fs_type: "ext4".to_string(), + mount_source: Some("/dev/sda1".to_string()), + super_options: Default::default(), + }, + MountInfo { + mnt_id: 12, + pid: 11, + majmin: "".to_string(), + root: "/".to_string(), + mount_point: PathBuf::from("/proc"), + mount_options: Default::default(), + opt_fields: vec![], + fs_type: "proc".to_string(), + mount_source: Some("proc".to_string()), + super_options: Default::default(), + }, + ]; + + let res = find_parent_mount(Path::new("/path/to/rootfs"), mount_infos) + .context("failed to get parent mount")?; + assert_eq!(res.mnt_id, 11); + Ok(()) + } + + #[test] + fn test_find_parent_mount_with_empty_mount_infos() { + let mount_infos = vec![]; + let res = find_parent_mount(Path::new("/path/to/rootfs"), mount_infos); + assert!(res.is_err()); + } } diff --git a/crates/libcontainer/src/rootfs/rootfs.rs b/crates/libcontainer/src/rootfs/rootfs.rs index acc13eb97..edd36fe49 100644 --- a/crates/libcontainer/src/rootfs/rootfs.rs +++ b/crates/libcontainer/src/rootfs/rootfs.rs @@ -3,9 +3,12 @@ use super::{ mount::{Mount, MountOptions}, symlink::Symlink, utils::default_devices, + Result, RootfsError, +}; +use crate::{ + error::MissingSpecError, + syscall::{syscall::create_syscall, Syscall}, }; -use crate::syscall::{syscall::create_syscall, Syscall}; -use anyhow::{bail, Context, Result}; use nix::mount::MsFlags; use oci_spec::runtime::{Linux, Spec}; use std::path::Path; @@ -35,35 +38,48 @@ impl RootFS { bind_devices: bool, cgroup_ns: bool, ) -> Result<()> { - log::debug!("Prepare rootfs: {:?}", rootfs); + tracing::debug!(?rootfs, "prepare rootfs"); let mut flags = MsFlags::MS_REC; - let linux = spec.linux().as_ref().context("no linux in spec")?; + let linux = spec.linux().as_ref().ok_or(MissingSpecError::Linux)?; match linux.rootfs_propagation().as_deref() { Some("shared") => flags |= MsFlags::MS_SHARED, Some("private") => flags |= MsFlags::MS_PRIVATE, Some("slave" | "unbindable") | None => flags |= MsFlags::MS_SLAVE, - Some(uknown) => bail!("unknown rootfs_propagation: {}", uknown), + Some(unknown) => { + return Err(RootfsError::UnknownRootfsPropagation(unknown.to_string())); + } } self.syscall .mount(None, Path::new("/"), None, flags, None) - .context("failed to mount rootfs")?; + .map_err(|err| { + tracing::error!( + ?err, + ?flags, + "failed to change the mount propagation type of the root" + ); + + err + })?; let mounter = Mount::new(); - mounter - .make_parent_mount_private(rootfs) - .context("failed to change parent mount of rootfs private")?; + mounter.make_parent_mount_private(rootfs)?; - log::debug!("mount root fs {:?}", rootfs); - self.syscall.mount( - Some(rootfs), - rootfs, - None, - MsFlags::MS_BIND | MsFlags::MS_REC, - None, - )?; + tracing::debug!("mount root fs {:?}", rootfs); + self.syscall + .mount( + Some(rootfs), + rootfs, + None, + MsFlags::MS_BIND | MsFlags::MS_REC, + None, + ) + .map_err(|err| { + tracing::error!(?rootfs, ?err, "failed to bind mount rootfs"); + err + })?; let global_options = MountOptions { root: rootfs, @@ -73,19 +89,13 @@ impl RootFS { if let Some(mounts) = spec.mounts() { for mount in mounts { - mounter - .setup_mount(mount, &global_options) - .with_context(|| format!("failed to setup mount {:#?}", mount))?; + mounter.setup_mount(mount, &global_options)?; } } let symlinker = Symlink::new(); - symlinker - .setup_kcore_symlink(rootfs) - .context("failed to setup kcore symlink")?; - symlinker - .setup_default_symlinks(rootfs) - .context("failed to setup default symlinks")?; + symlinker.setup_kcore_symlink(rootfs)?; + symlinker.setup_default_symlinks(rootfs)?; let devicer = Device::new(); if let Some(added_devices) = linux.devices() { @@ -112,9 +122,16 @@ impl RootFS { }; if let Some(flags) = flags { - log::debug!("make root mount {:?}", flags); self.syscall - .mount(None, Path::new("/"), None, flags, None)?; + .mount(None, Path::new("/"), None, flags, None) + .map_err(|err| { + tracing::error!( + ?err, + ?flags, + "failed to adjust the mount propagation type of the root" + ); + err + })?; } Ok(()) diff --git a/crates/libcontainer/src/rootfs/symlink.rs b/crates/libcontainer/src/rootfs/symlink.rs index 0d552a93e..af6d11efb 100644 --- a/crates/libcontainer/src/rootfs/symlink.rs +++ b/crates/libcontainer/src/rootfs/symlink.rs @@ -1,8 +1,19 @@ use crate::syscall::{syscall::create_syscall, Syscall}; -use anyhow::{bail, Context, Result}; use std::fs::remove_file; use std::path::Path; +#[derive(Debug, thiserror::Error)] +pub enum SymlinkError { + #[error("syscall failed")] + Syscall { + source: crate::syscall::SyscallError, + }, + #[error("failed symlink: {msg}")] + Other { msg: String }, +} + +type Result = std::result::Result; + pub struct Symlink { syscall: Box, } @@ -33,7 +44,10 @@ impl Symlink { let link = cgroup_root.join(comount); self.syscall .symlink(Path::new(subsystem_name), &link) - .with_context(|| format!("failed to symlink {:?} to {:?}", link, subsystem_name))?; + .map_err(|err| { + tracing::error!("failed to symlink {link:?} to {subsystem_name:?}"); + SymlinkError::Syscall { source: err } + })?; } Ok(()) @@ -43,13 +57,18 @@ impl Symlink { let ptmx = rootfs.join("dev/ptmx"); if let Err(e) = remove_file(&ptmx) { if e.kind() != ::std::io::ErrorKind::NotFound { - bail!("could not delete /dev/ptmx") + return Err(SymlinkError::Other { + msg: "could not delete /dev/ptmx".into(), + }); } } self.syscall .symlink(Path::new("pts/ptmx"), &ptmx) - .context("failed to symlink ptmx")?; + .map_err(|err| { + tracing::error!("failed to symlink ptmx"); + SymlinkError::Syscall { source: err } + })?; Ok(()) } @@ -59,7 +78,10 @@ impl Symlink { if Path::new("/proc/kcore").exists() { self.syscall .symlink(Path::new("/proc/kcore"), &rootfs.join("dev/kcore")) - .context("Failed to symlink kcore")?; + .map_err(|err| { + tracing::error!("failed to symlink kcore"); + SymlinkError::Syscall { source: err } + })?; } Ok(()) } @@ -74,7 +96,10 @@ impl Symlink { for (src, dst) in defaults { self.syscall .symlink(Path::new(src), &rootfs.join(dst)) - .context("failed to symlink defaults")?; + .map_err(|err| { + tracing::error!("failed to symlink defaults"); + SymlinkError::Syscall { source: err } + })?; } Ok(()) @@ -88,8 +113,7 @@ mod tests { use crate::syscall::linux::LinuxSyscall; use crate::syscall::test::TestHelperSyscall; #[cfg(feature = "v1")] - use crate::utils::create_temp_dir; - use crate::utils::TempDir; + use anyhow::{Context, Result}; use nix::{ fcntl::{open, OFlag}, sys::stat::Mode, @@ -101,7 +125,7 @@ mod tests { #[test] fn test_setup_ptmx() { { - let tmp_dir = TempDir::new("/tmp/test_setup_ptmx").unwrap(); + let tmp_dir = tempfile::tempdir().unwrap(); let symlink = Symlink::new(); assert!(symlink.setup_ptmx(tmp_dir.path()).is_ok()); let want = (PathBuf::from("pts/ptmx"), tmp_dir.path().join("dev/ptmx")); @@ -115,7 +139,7 @@ mod tests { } // make remove_file goes into the bail! path { - let tmp_dir = TempDir::new("/tmp/test_setup_ptmx").unwrap(); + let tmp_dir = tempfile::tempdir().unwrap(); open( &tmp_dir.path().join("dev"), OFlag::O_RDWR | OFlag::O_CREAT, @@ -140,7 +164,7 @@ mod tests { #[test] fn test_setup_default_symlinks() { - let tmp_dir = TempDir::new("/tmp/test_setup_default_symlinks").unwrap(); + let tmp_dir = tempfile::tempdir().unwrap(); let symlink = Symlink::new(); assert!(symlink.setup_default_symlinks(tmp_dir.path()).is_ok()); let want = vec![ @@ -174,16 +198,16 @@ mod tests { #[cfg(feature = "v1")] fn setup_comounted_symlinks_success() -> Result<()> { // arrange - let tmp = create_temp_dir("setup_comounted_symlinks_success")?; - let cpu = tmp.join("cpu"); - let cpuacct = tmp.join("cpuacct"); - let cpu_cpuacct = tmp.join("cpu,cpuacct"); + let tmp = tempfile::tempdir().unwrap(); + let cpu = tmp.path().join("cpu"); + let cpuacct = tmp.path().join("cpuacct"); + let cpu_cpuacct = tmp.path().join("cpu,cpuacct"); fs::create_dir_all(cpu_cpuacct)?; let symlink = Symlink::with_syscall(Box::new(LinuxSyscall)); // act symlink - .setup_comount_symlinks(&tmp, "cpu,cpuacct") + .setup_comount_symlinks(tmp.path(), "cpu,cpuacct") .context("failed to setup symlinks")?; // assert @@ -217,12 +241,12 @@ mod tests { #[cfg(feature = "v1")] fn setup_comounted_symlinks_no_comounts() -> Result<()> { // arrange - let tmp = create_temp_dir("setup_comounted_symlinks_no_comounts")?; + let tmp = tempfile::tempdir().unwrap(); let symlink = Symlink::with_syscall(Box::new(LinuxSyscall)); // act let result = symlink - .setup_comount_symlinks(&tmp, "memory,task") + .setup_comount_symlinks(tmp.path(), "memory,task") .context("failed to setup symlinks"); // assert diff --git a/crates/libcontainer/src/rootfs/utils.rs b/crates/libcontainer/src/rootfs/utils.rs index 6adff2c4f..f1aaa8844 100644 --- a/crates/libcontainer/src/rootfs/utils.rs +++ b/crates/libcontainer/src/rootfs/utils.rs @@ -1,13 +1,7 @@ -use anyhow::{anyhow, Result}; -use nix::{mount::MsFlags, sys::stat::SFlag, NixPath}; -use oci_spec::runtime::{LinuxDevice, LinuxDeviceBuilder, LinuxDeviceType, Mount}; -use procfs::process::MountInfo; -use std::{ - path::{Path, PathBuf}, - str::FromStr, -}; - use crate::syscall::linux::{self, MountAttrOption}; +use nix::{mount::MsFlags, sys::stat::SFlag}; +use oci_spec::runtime::{LinuxDevice, LinuxDeviceBuilder, LinuxDeviceType, Mount}; +use std::{path::PathBuf, str::FromStr}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct MountOptionConfig { @@ -17,7 +11,7 @@ pub struct MountOptionConfig { /// Mount data applied to the mount. pub data: String, - /// RecAttr represents mount properties to be applied recrusively. + /// RecAttr represents mount properties to be applied recursively. pub rec_attr: Option, } @@ -183,66 +177,13 @@ pub fn parse_mount(m: &Mount) -> MountOptionConfig { } } -/// Find parent mount of rootfs in given mount infos -pub fn find_parent_mount(rootfs: &Path, mount_infos: Vec) -> Result { - // find the longest mount point - let parent_mount_info = mount_infos - .into_iter() - .filter(|mi| rootfs.starts_with(&mi.mount_point)) - .max_by(|mi1, mi2| mi1.mount_point.len().cmp(&mi2.mount_point.len())) - .ok_or_else(|| anyhow!("couldn't find parent mount of {}", rootfs.display()))?; - Ok(parent_mount_info) -} - #[cfg(test)] mod tests { use crate::syscall::linux::MountAttr; use super::*; - use anyhow::Context; - use oci_spec::runtime::MountBuilder; - #[test] - fn test_find_parent_mount() -> anyhow::Result<()> { - let mount_infos = vec![ - MountInfo { - mnt_id: 11, - pid: 10, - majmin: "".to_string(), - root: "/".to_string(), - mount_point: PathBuf::from("/"), - mount_options: Default::default(), - opt_fields: vec![], - fs_type: "ext4".to_string(), - mount_source: Some("/dev/sda1".to_string()), - super_options: Default::default(), - }, - MountInfo { - mnt_id: 12, - pid: 11, - majmin: "".to_string(), - root: "/".to_string(), - mount_point: PathBuf::from("/proc"), - mount_options: Default::default(), - opt_fields: vec![], - fs_type: "proc".to_string(), - mount_source: Some("proc".to_string()), - super_options: Default::default(), - }, - ]; - - let res = find_parent_mount(Path::new("/path/to/rootfs"), mount_infos) - .context("Failed to get parent mount")?; - assert_eq!(res.mnt_id, 11); - Ok(()) - } - - #[test] - fn test_find_parent_mount_with_empty_mount_infos() { - let mount_infos = vec![]; - let res = find_parent_mount(Path::new("/path/to/rootfs"), mount_infos); - assert!(res.is_err()); - } + use oci_spec::runtime::MountBuilder; #[test] fn test_to_sflag() { diff --git a/crates/libcontainer/src/rootless.rs b/crates/libcontainer/src/rootless.rs index bfd38d6a6..87eff2495 100644 --- a/crates/libcontainer/src/rootless.rs +++ b/crates/libcontainer/src/rootless.rs @@ -1,5 +1,5 @@ -use crate::{namespaces::Namespaces, utils}; -use anyhow::{bail, Context, Result}; +use crate::error::MissingSpecError; +use crate::namespaces::{NamespaceError, Namespaces}; use nix::unistd::Pid; use oci_spec::runtime::{Linux, LinuxIdMapping, LinuxNamespace, LinuxNamespaceType, Mount, Spec}; use std::fs; @@ -7,40 +7,161 @@ use std::path::Path; use std::process::Command; use std::{env, path::PathBuf}; +// Wrap the uid/gid path function into a struct for dependency injection. This +// allows us to mock the id mapping logic in unit tests by using a different +// base path other than `/proc`. +#[derive(Debug, Clone)] +pub struct RootlessIDMapper { + base_path: PathBuf, +} + +impl Default for RootlessIDMapper { + fn default() -> Self { + Self { + // By default, the `uid_map` and `gid_map` files are located in the + // `/proc` directory. In the production code, we can use the + // default. + base_path: PathBuf::from("/proc"), + } + } +} + +impl RootlessIDMapper { + // In production code, we can direclt use the `new` function without the + // need to worry about the default. + pub fn new() -> Self { + Default::default() + } + + pub fn get_uid_path(&self, pid: &Pid) -> PathBuf { + self.base_path.join(pid.to_string()).join("uid_map") + } + pub fn get_gid_path(&self, pid: &Pid) -> PathBuf { + self.base_path.join(pid.to_string()).join("gid_map") + } + + #[cfg(test)] + pub fn ensure_uid_path(&self, pid: &Pid) -> std::result::Result<(), std::io::Error> { + std::fs::create_dir_all(self.get_uid_path(pid).parent().unwrap())?; + + Ok(()) + } + + #[cfg(test)] + pub fn ensure_gid_path(&self, pid: &Pid) -> std::result::Result<(), std::io::Error> { + std::fs::create_dir_all(self.get_gid_path(pid).parent().unwrap())?; + + Ok(()) + } + + #[cfg(test)] + // In test, we need to fake the base path to a temporary directory. + pub fn new_test(path: PathBuf) -> Self { + Self { base_path: path } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum RootlessError { + #[error(transparent)] + MissingSpec(#[from] crate::error::MissingSpecError), + #[error("rootless container requires valid user namespace definition")] + NoUserNamespace, + #[error("invalid spec for rootless container")] + InvalidSpec(#[from] ValidateSpecError), + #[error("failed to read unprivileged userns clone")] + ReadUnprivilegedUsernsClone(#[source] std::io::Error), + #[error("failed to parse unprivileged userns clone")] + ParseUnprivilegedUsernsClone(#[source] std::num::ParseIntError), + #[error("unknown userns clone value")] + UnknownUnprivilegedUsernsClone(u8), + #[error(transparent)] + IDMapping(#[from] MappingError), +} + +type Result = std::result::Result; + +#[derive(Debug, thiserror::Error)] +pub enum ValidateSpecError { + #[error(transparent)] + MissingSpec(#[from] crate::error::MissingSpecError), + #[error("rootless container requires valid user namespace definition")] + NoUserNamespace, + #[error("rootless container requires valid uid mappings")] + NoUIDMappings, + #[error("rootless container requires valid gid mappings")] + NoGIDMapping, + #[error("no mount in spec")] + NoMountSpec, + #[error("unprivileged user can't set supplementary groups")] + UnprivilegedUser, + #[error("supplementary group needs to be mapped in the gid mappings")] + GidNotMapped(u32), + #[error("failed to parse ID")] + ParseID(#[source] std::num::ParseIntError), + #[error("mount options require mapping uid inside the rootless container")] + MountGidMapping(u32), + #[error("mount options require mapping gid inside the rootless container")] + MountUidMapping(u32), + #[error(transparent)] + Namespaces(#[from] NamespaceError), +} + +#[derive(Debug, thiserror::Error)] +pub enum MappingError { + #[error("newuidmap/newgidmap binaries could not be found in path")] + BinaryNotFound, + #[error("could not find PATH")] + NoPathEnv, + #[error("failed to execute newuidmap/newgidmap")] + Execute(#[source] std::io::Error), + #[error("at least one id mapping needs to be defined")] + NoIDMapping, + #[error("failed to write id mapping")] + WriteIDMapping(#[source] std::io::Error), +} + #[derive(Debug, Clone, Default)] -pub struct Rootless<'a> { +pub struct Rootless { /// Location of the newuidmap binary pub newuidmap: Option, /// Location of the newgidmap binary pub newgidmap: Option, /// Mappings for user ids - pub(crate) uid_mappings: Option<&'a Vec>, + pub(crate) uid_mappings: Option>, /// Mappings for group ids - pub(crate) gid_mappings: Option<&'a Vec>, + pub(crate) gid_mappings: Option>, /// Info on the user namespaces pub user_namespace: Option, /// Is rootless container requested by a privileged user pub privileged: bool, + /// Path to the id mappings + pub rootless_id_mapper: RootlessIDMapper, } -impl<'a> Rootless<'a> { - pub fn new(spec: &'a Spec) -> Result>> { - let linux = spec.linux().as_ref().context("no linux in spec")?; - let namespaces = Namespaces::from(linux.namespaces().as_ref()); - let user_namespace = namespaces.get(LinuxNamespaceType::User); +impl Rootless { + pub fn new(spec: &Spec) -> Result> { + let linux = spec.linux().as_ref().ok_or(MissingSpecError::Linux)?; + let namespaces = Namespaces::try_from(linux.namespaces().as_ref()) + .map_err(ValidateSpecError::Namespaces)?; + let user_namespace = namespaces + .get(LinuxNamespaceType::User) + .map_err(ValidateSpecError::Namespaces)?; // If conditions requires us to use rootless, we must either create a new // user namespace or enter an existing. if rootless_required() && user_namespace.is_none() { - bail!("rootless container requires valid user namespace definition"); + return Err(RootlessError::NoUserNamespace); } if user_namespace.is_some() && user_namespace.unwrap().path().is_none() { - log::debug!("rootless container should be created"); + tracing::debug!("rootless container should be created"); - validate_spec_for_rootless(spec) - .context("The spec failed to comply to rootless requirement")?; - let mut rootless = Rootless::from(linux); + validate_spec_for_rootless(spec).map_err(|err| { + tracing::error!("failed to validate spec for rootless container: {}", err); + err + })?; + let mut rootless = Rootless::try_from(linux)?; if let Some((uid_binary, gid_binary)) = lookup_map_binaries(linux)? { rootless.newuidmap = Some(uid_binary); rootless.newgidmap = Some(gid_binary); @@ -48,75 +169,63 @@ impl<'a> Rootless<'a> { Ok(Some(rootless)) } else { - log::debug!("This is NOT a rootless container"); + tracing::debug!("this is NOT a rootless container"); Ok(None) } } pub fn write_uid_mapping(&self, target_pid: Pid) -> Result<()> { - log::debug!("Write UID mapping for {:?}", target_pid); - if let Some(uid_mappings) = self.uid_mappings { + tracing::debug!("write UID mapping for {:?}", target_pid); + if let Some(uid_mappings) = self.uid_mappings.as_ref() { write_id_mapping( target_pid, - get_uid_path(&target_pid).as_path(), + self.rootless_id_mapper.get_uid_path(&target_pid).as_path(), uid_mappings, self.newuidmap.as_deref(), - ) - } else { - Ok(()) + )?; } + Ok(()) } pub fn write_gid_mapping(&self, target_pid: Pid) -> Result<()> { - log::debug!("Write GID mapping for {:?}", target_pid); - if let Some(gid_mappings) = self.gid_mappings { - return write_id_mapping( + tracing::debug!("write GID mapping for {:?}", target_pid); + if let Some(gid_mappings) = self.gid_mappings.as_ref() { + write_id_mapping( target_pid, - get_gid_path(&target_pid).as_path(), + self.rootless_id_mapper.get_gid_path(&target_pid).as_path(), gid_mappings, self.newgidmap.as_deref(), - ); - } else { - Ok(()) + )?; } + Ok(()) + } + + pub fn with_id_mapper(&mut self, mapper: RootlessIDMapper) { + self.rootless_id_mapper = mapper } } -impl<'a> From<&'a Linux> for Rootless<'a> { - fn from(linux: &'a Linux) -> Self { - let namespaces = Namespaces::from(linux.namespaces().as_ref()); - let user_namespace = namespaces.get(LinuxNamespaceType::User); - Self { +impl TryFrom<&Linux> for Rootless { + type Error = RootlessError; + + fn try_from(linux: &Linux) -> Result { + let namespaces = Namespaces::try_from(linux.namespaces().as_ref()) + .map_err(ValidateSpecError::Namespaces)?; + let user_namespace = namespaces + .get(LinuxNamespaceType::User) + .map_err(ValidateSpecError::Namespaces)?; + Ok(Self { newuidmap: None, newgidmap: None, - uid_mappings: linux.uid_mappings().as_ref(), - gid_mappings: linux.gid_mappings().as_ref(), + uid_mappings: linux.uid_mappings().to_owned(), + gid_mappings: linux.gid_mappings().to_owned(), user_namespace: user_namespace.cloned(), privileged: nix::unistd::geteuid().is_root(), - } + rootless_id_mapper: RootlessIDMapper::new(), + }) } } -#[cfg(not(test))] -fn get_uid_path(pid: &Pid) -> PathBuf { - PathBuf::from(format!("/proc/{pid}/uid_map")) -} - -#[cfg(test)] -pub fn get_uid_path(pid: &Pid) -> PathBuf { - utils::get_temp_dir_path(format!("{pid}_mapping_path").as_str()).join("uid_map") -} - -#[cfg(not(test))] -fn get_gid_path(pid: &Pid) -> PathBuf { - PathBuf::from(format!("/proc/{pid}/gid_map")) -} - -#[cfg(test)] -pub fn get_gid_path(pid: &Pid) -> PathBuf { - utils::get_temp_dir_path(format!("{pid}_mapping_path").as_str()).join("gid_map") -} - /// Checks if rootless mode should be used pub fn rootless_required() -> bool { if !nix::unistd::geteuid().is_root() { @@ -133,42 +242,49 @@ pub fn unprivileged_user_ns_enabled() -> Result { } let content = - fs::read_to_string(user_ns_sysctl).context("failed to read unprivileged userns clone")?; + fs::read_to_string(user_ns_sysctl).map_err(RootlessError::ReadUnprivilegedUsernsClone)?; - match content.trim().parse::()? { + match content + .trim() + .parse::() + .map_err(RootlessError::ParseUnprivilegedUsernsClone)? + { 0 => Ok(false), 1 => Ok(true), - v => bail!("failed to parse unprivileged userns value: {}", v), + v => Err(RootlessError::UnknownUnprivilegedUsernsClone(v)), } } /// Validates that the spec contains the required information for /// running in rootless mode -fn validate_spec_for_rootless(spec: &Spec) -> Result<()> { - let linux = spec.linux().as_ref().context("no linux in spec")?; - let namespaces = Namespaces::from(linux.namespaces().as_ref()); - if namespaces.get(LinuxNamespaceType::User).is_none() { - bail!("rootless containers require the specification of a user namespace"); +fn validate_spec_for_rootless(spec: &Spec) -> std::result::Result<(), ValidateSpecError> { + tracing::debug!(?spec, "validating spec for rootless container"); + let linux = spec.linux().as_ref().ok_or(MissingSpecError::Linux)?; + let namespaces = Namespaces::try_from(linux.namespaces().as_ref())?; + if namespaces.get(LinuxNamespaceType::User)?.is_none() { + return Err(ValidateSpecError::NoUserNamespace); } let gid_mappings = linux .gid_mappings() .as_ref() - .context("rootless containers require gidMappings in spec")?; + .ok_or(ValidateSpecError::NoGIDMapping)?; let uid_mappings = linux .uid_mappings() .as_ref() - .context("rootless containers require uidMappings in spec")?; + .ok_or(ValidateSpecError::NoUIDMappings)?; if uid_mappings.is_empty() { - bail!("rootless containers require at least one uid mapping"); + return Err(ValidateSpecError::NoUIDMappings); } if gid_mappings.is_empty() { - bail!("rootless containers require at least one gid mapping") + return Err(ValidateSpecError::NoGIDMapping); } validate_mounts_for_rootless( - spec.mounts().as_ref().context("no mounts in spec")?, + spec.mounts() + .as_ref() + .ok_or(ValidateSpecError::NoMountSpec)?, uid_mappings, gid_mappings, )?; @@ -184,16 +300,18 @@ fn validate_spec_for_rootless(spec: &Spec) -> Result<()> { (true, false) => { for gid in additional_gids { if !is_id_mapped(*gid, gid_mappings) { - bail!("gid {} is specified as supplementary group, but is not mapped in the user namespace", gid); + tracing::error!(?gid,"gid is specified as supplementary group, but is not mapped in the user namespace"); + return Err(ValidateSpecError::GidNotMapped(*gid)); } } } (false, false) => { - bail!( - "user is {} (unprivileged). Supplementary groups cannot be set in \ + tracing::error!( + user = ?nix::unistd::geteuid(), + "user is unprivileged. Supplementary groups cannot be set in \ a rootless container for this user due to CVE-2014-8989", - nix::unistd::geteuid() - ) + ); + return Err(ValidateSpecError::UnprivilegedUser); } _ => {} } @@ -206,16 +324,40 @@ fn validate_mounts_for_rootless( mounts: &[Mount], uid_mappings: &[LinuxIdMapping], gid_mappings: &[LinuxIdMapping], -) -> Result<()> { +) -> std::result::Result<(), ValidateSpecError> { for mount in mounts { if let Some(options) = mount.options() { for opt in options { - if opt.starts_with("uid=") && !is_id_mapped(opt[4..].parse()?, uid_mappings) { - bail!("Mount {:?} specifies option {} which is not mapped inside the rootless container", mount, opt); + if opt.starts_with("uid=") + && !is_id_mapped( + opt[4..].parse().map_err(ValidateSpecError::ParseID)?, + uid_mappings, + ) + { + tracing::error!( + ?mount, + ?opt, + "mount specifies option which is not mapped inside the rootless container" + ); + return Err(ValidateSpecError::MountUidMapping( + opt[4..].parse().map_err(ValidateSpecError::ParseID)?, + )); } - if opt.starts_with("gid=") && !is_id_mapped(opt[4..].parse()?, gid_mappings) { - bail!("Mount {:?} specifies option {} which is not mapped inside the rootless container", mount, opt); + if opt.starts_with("gid=") + && !is_id_mapped( + opt[4..].parse().map_err(ValidateSpecError::ParseID)?, + gid_mappings, + ) + { + tracing::error!( + ?mount, + ?opt, + "mount specifies option which is not mapped inside the rootless container" + ); + return Err(ValidateSpecError::MountGidMapping( + opt[4..].parse().map_err(ValidateSpecError::ParseID)?, + )); } } } @@ -232,7 +374,9 @@ fn is_id_mapped(id: u32, mappings: &[LinuxIdMapping]) -> bool { /// Looks up the location of the newuidmap and newgidmap binaries which /// are required to write multiple user/group mappings -pub fn lookup_map_binaries(spec: &Linux) -> Result> { +pub fn lookup_map_binaries( + spec: &Linux, +) -> std::result::Result, MappingError> { if let Some(uid_mappings) = spec.uid_mappings() { if uid_mappings.len() == 1 && uid_mappings.len() == 1 { return Ok(None); @@ -243,15 +387,15 @@ pub fn lookup_map_binaries(spec: &Linux) -> Result> { match (uidmap, gidmap) { (Some(newuidmap), Some(newgidmap)) => Ok(Some((newuidmap, newgidmap))), - _ => bail!("newuidmap/newgidmap binaries could not be found in path. This is required if multiple id mappings are specified"), + _ => Err(MappingError::BinaryNotFound), } } else { Ok(None) } } -fn lookup_map_binary(binary: &str) -> Result> { - let paths = env::var("PATH").context("could not find PATH")?; +fn lookup_map_binary(binary: &str) -> std::result::Result, MappingError> { + let paths = env::var("PATH").map_err(|_| MappingError::NoPathEnv)?; Ok(paths .split_terminator(':') .map(|p| Path::new(p).join(binary)) @@ -263,17 +407,20 @@ fn write_id_mapping( map_file: &Path, mappings: &[LinuxIdMapping], map_binary: Option<&Path>, -) -> Result<()> { - log::debug!("Write ID mapping: {:?}", mappings); +) -> std::result::Result<(), MappingError> { + tracing::debug!("Write ID mapping: {:?}", mappings); match mappings.len() { - 0 => bail!("at least one id mapping needs to be defined"), + 0 => return Err(MappingError::NoIDMapping), 1 => { let mapping = mappings .first() .and_then(|m| format!("{} {} {}", m.container_id(), m.host_id(), m.size()).into()) .unwrap(); - utils::write_file(map_file, mapping)?; + std::fs::write(map_file, &mapping).map_err(|err| { + tracing::error!(?err, ?map_file, ?mapping, "failed to write uid/gid mapping"); + MappingError::WriteIDMapping(err) + })?; } _ => { let args: Vec = mappings @@ -291,7 +438,10 @@ fn write_id_mapping( .arg(pid.to_string()) .args(args) .output() - .with_context(|| format!("failed to execute {:?}", map_binary))?; + .map_err(|err| { + tracing::error!(?err, ?map_binary, "failed to execute newuidmap/newgidmap"); + MappingError::Execute(err) + })?; } } @@ -302,15 +452,18 @@ fn write_id_mapping( mod tests { use std::fs; + use super::*; + use anyhow::Result; use nix::unistd::getpid; use oci_spec::runtime::{ LinuxBuilder, LinuxIdMappingBuilder, LinuxNamespaceBuilder, SpecBuilder, }; + use rand::Rng; use serial_test::serial; - use crate::utils::{test_utils::gen_u32, TempDir}; - - use super::*; + fn gen_u32() -> u32 { + rand::thread_rng().gen() + } #[test] fn test_validate_ok() -> Result<()> { @@ -445,15 +598,20 @@ mod tests { .gid_mappings(gid_mappings) .build()?; let spec = SpecBuilder::default().linux(linux).build()?; - let rootless = Rootless::new(&spec)?.unwrap(); + let pid = getpid(); - let tempdir = TempDir::new(get_uid_path(&pid).parent().unwrap())?; - let uid_map_path = tempdir.join("uid_map"); - let _ = fs::File::create(&uid_map_path)?; + let tmp = tempfile::tempdir()?; + let id_mapper = RootlessIDMapper { + base_path: tmp.path().to_path_buf(), + }; + id_mapper.ensure_uid_path(&pid)?; + + let mut rootless = Rootless::new(&spec)?.unwrap(); + rootless.with_id_mapper(id_mapper.clone()); rootless.write_uid_mapping(pid)?; assert_eq!( format!("{container_id} {host_uid} {size}"), - fs::read_to_string(uid_map_path)? + fs::read_to_string(id_mapper.get_uid_path(&pid))? ); rootless.write_gid_mapping(pid)?; Ok(()) @@ -485,15 +643,20 @@ mod tests { .gid_mappings(gid_mappings) .build()?; let spec = SpecBuilder::default().linux(linux).build()?; - let rootless = Rootless::new(&spec)?.unwrap(); + let pid = getpid(); - let tempdir = TempDir::new(get_gid_path(&pid).parent().unwrap())?; - let gid_map_path = tempdir.join("gid_map"); - let _ = fs::File::create(&gid_map_path)?; + let tmp = tempfile::tempdir()?; + let id_mapper = RootlessIDMapper { + base_path: tmp.path().to_path_buf(), + }; + id_mapper.ensure_gid_path(&pid)?; + + let mut rootless = Rootless::new(&spec)?.unwrap(); + rootless.with_id_mapper(id_mapper.clone()); rootless.write_gid_mapping(pid)?; assert_eq!( format!("{container_id} {host_gid} {size}"), - fs::read_to_string(gid_map_path)? + fs::read_to_string(id_mapper.get_gid_path(&pid))? ); Ok(()) } diff --git a/crates/libcontainer/src/seccomp/mod.rs b/crates/libcontainer/src/seccomp/mod.rs index d6bb4e166..2bfce4cb0 100644 --- a/crates/libcontainer/src/seccomp/mod.rs +++ b/crates/libcontainer/src/seccomp/mod.rs @@ -1,6 +1,3 @@ -use anyhow::bail; -use anyhow::Context; -use anyhow::Result; use libseccomp::ScmpAction; use libseccomp::ScmpArch; use libseccomp::ScmpArgCompare; @@ -12,8 +9,52 @@ use oci_spec::runtime::LinuxSeccomp; use oci_spec::runtime::LinuxSeccompAction; use oci_spec::runtime::LinuxSeccompFilterFlag; use oci_spec::runtime::LinuxSeccompOperator; +use std::num::TryFromIntError; use std::os::unix::io; +#[derive(Debug, thiserror::Error)] +pub enum SeccompError { + #[error("failed to translate trace action due to failed to convert errno {errno} into i16")] + TraceAction { source: TryFromIntError, errno: i32 }, + #[error("SCMP_ACT_NOTIFY cannot be used as default action")] + NotifyAsDefaultAction, + #[error("SCMP_ACT_NOTIFY cannot be used for the write syscall")] + NotifyWriteSyscall, + #[error("failed to add arch to seccomp")] + AddArch { + source: libseccomp::error::SeccompError, + arch: Arch, + }, + #[error("failed to load seccomp context")] + LoadContext { + source: libseccomp::error::SeccompError, + }, + #[error("failed to get seccomp notify id")] + GetNotifyId { + source: libseccomp::error::SeccompError, + }, + #[error("failed to add rule to seccomp")] + AddRule { + source: libseccomp::error::SeccompError, + }, + #[error("failed to create new seccomp filter")] + NewFilter { + source: libseccomp::error::SeccompError, + default: LinuxSeccompAction, + }, + #[error("failed to set filter flag")] + SetFilterFlag { + source: libseccomp::error::SeccompError, + flag: LinuxSeccompFilterFlag, + }, + #[error("failed to set SCMP_FLTATR_CTL_NNP")] + SetCtlNnp { + source: libseccomp::error::SeccompError, + }, +} + +type Result = std::result::Result; + fn translate_arch(arch: Arch) -> ScmpArch { match arch { Arch::ScmpArchNative => ScmpArch::Native, @@ -37,18 +78,24 @@ fn translate_arch(arch: Arch) -> ScmpArch { } fn translate_action(action: LinuxSeccompAction, errno: Option) -> Result { + tracing::trace!(?action, ?errno, "translating action"); let errno = errno.map(|e| e as i32).unwrap_or(libc::EPERM); let action = match action { LinuxSeccompAction::ScmpActKill => ScmpAction::KillThread, LinuxSeccompAction::ScmpActTrap => ScmpAction::Trap, LinuxSeccompAction::ScmpActErrno => ScmpAction::Errno(errno), - LinuxSeccompAction::ScmpActTrace => ScmpAction::Trace(errno.try_into()?), + LinuxSeccompAction::ScmpActTrace => ScmpAction::Trace( + errno + .try_into() + .map_err(|err| SeccompError::TraceAction { source: err, errno })?, + ), LinuxSeccompAction::ScmpActAllow => ScmpAction::Allow, LinuxSeccompAction::ScmpActKillProcess => ScmpAction::KillProcess, LinuxSeccompAction::ScmpActNotify => ScmpAction::Notify, LinuxSeccompAction::ScmpActLog => ScmpAction::Log, }; + tracing::trace!(?action, "translated action"); Ok(action) } @@ -76,7 +123,7 @@ fn check_seccomp(seccomp: &LinuxSeccomp) -> Result<()> { // handle read/close syscall and allow read and close to proceed as // expected. if seccomp.default_action() == LinuxSeccompAction::ScmpActNotify { - bail!("SCMP_ACT_NOTIFY cannot be used as default action"); + return Err(SeccompError::NotifyAsDefaultAction); } if let Some(syscalls) = seccomp.syscalls() { @@ -84,7 +131,7 @@ fn check_seccomp(seccomp: &LinuxSeccomp) -> Result<()> { if syscall.action() == LinuxSeccompAction::ScmpActNotify { for name in syscall.names() { if name == "write" { - bail!("SCMP_ACT_NOTIFY cannot be used for the write syscall"); + return Err(SeccompError::NotifyWriteSyscall); } } } @@ -94,29 +141,37 @@ fn check_seccomp(seccomp: &LinuxSeccomp) -> Result<()> { Ok(()) } +#[tracing::instrument(level = "trace", skip(seccomp))] pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result> { check_seccomp(seccomp)?; + tracing::trace!(default_action = ?seccomp.default_action(), errno = ?seccomp.default_errno_ret(), "initializing seccomp"); let default_action = translate_action(seccomp.default_action(), seccomp.default_errno_ret())?; - let mut ctx = ScmpFilterContext::new_filter(translate_action( - seccomp.default_action(), - seccomp.default_errno_ret(), - )?)?; + let mut ctx = + ScmpFilterContext::new_filter(default_action).map_err(|err| SeccompError::NewFilter { + source: err, + default: seccomp.default_action(), + })?; if let Some(flags) = seccomp.flags() { for flag in flags { match flag { - LinuxSeccompFilterFlag::SeccompFilterFlagLog => ctx.set_ctl_log(true)?, - LinuxSeccompFilterFlag::SeccompFilterFlagTsync => ctx.set_ctl_tsync(true)?, - LinuxSeccompFilterFlag::SeccompFilterFlagSpecAllow => ctx.set_ctl_ssb(true)?, + LinuxSeccompFilterFlag::SeccompFilterFlagLog => ctx.set_ctl_log(true), + LinuxSeccompFilterFlag::SeccompFilterFlagTsync => ctx.set_ctl_tsync(true), + LinuxSeccompFilterFlag::SeccompFilterFlagSpecAllow => ctx.set_ctl_ssb(true), } + .map_err(|err| SeccompError::SetFilterFlag { + source: err, + flag: *flag, + })?; } } if let Some(architectures) = seccomp.architectures() { for &arch in architectures { + tracing::trace!(?arch, "adding architecture"); ctx.add_arch(translate_arch(arch)) - .context("failed to add arch to seccomp")?; + .map_err(|err| SeccompError::AddArch { source: err, arch })?; } } @@ -127,7 +182,8 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result> { // set it here. If the seccomp load operation fails without enough // privilege, so be it. To prevent this automatic behavior, we unset the // value here. - ctx.set_ctl_nnp(false)?; + ctx.set_ctl_nnp(false) + .map_err(|err| SeccompError::SetCtlNnp { source: err })?; if let Some(syscalls) = seccomp.syscalls() { for syscall in syscalls { @@ -135,8 +191,8 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result> { if action == default_action { // When the action is the same as the default action, the rule is redundant. We can // skip this here to avoid failing when we add the rules. - log::warn!( - "Detect a seccomp action that is the same as the default action: {:?}", + tracing::warn!( + "detect a seccomp action that is the same as the default action: {:?}", syscall ); continue; @@ -148,7 +204,7 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result> { Err(_) => { // If we failed to resolve the syscall by name, likely the kernel // doeesn't support this syscall. So it is safe to skip... - log::warn!( + tracing::warn!( "failed to resolve syscall, likely kernel doesn't support this. {:?}", name ); @@ -175,18 +231,26 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result> { translate_op(arg.op(), arg.value_two()), arg.value(), ); + tracing::trace!(?name, ?action, ?arg, "add seccomp conditional rule"); ctx.add_rule_conditional(action, sc, &[cmp]) - .with_context(|| { - format!( - "failed to add seccomp action: {:?}. Cmp: {:?} Syscall: {name}", - &action, cmp, - ) + .map_err(|err| { + tracing::error!( + "failed to add seccomp action: {:?}. Cmp: {:?} Syscall: {name}", &action, cmp, + ); + SeccompError::AddRule { + source: err, + } })?; } } None => { - ctx.add_rule(action, sc).with_context(|| { - format!("failed to add seccomp rule: {:?}. Syscall: {name}", &sc) + tracing::trace!(?name, ?action, "add seccomp rule"); + ctx.add_rule(action, sc).map_err(|err| { + tracing::error!( + "failed to add seccomp rule: {:?}. Syscall: {name}", + &sc + ); + SeccompError::AddRule { source: err } })?; } } @@ -198,12 +262,13 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result> { // thread must have the CAP_SYS_ADMIN capability in its user namespace, or // the thread must already have the no_new_privs bit set. // Ref: https://man7.org/linux/man-pages/man2/seccomp.2.html - ctx.load().context("failed to load seccomp context")?; + ctx.load() + .map_err(|err| SeccompError::LoadContext { source: err })?; let fd = if is_notify(seccomp) { Some( ctx.get_notify_fd() - .context("failed to get seccomp notify fd")?, + .map_err(|err| SeccompError::GetNotifyId { source: err })?, ) } else { None @@ -223,8 +288,8 @@ pub fn is_notify(seccomp: &LinuxSeccomp) -> bool { #[cfg(test)] mod tests { use super::*; - use crate::utils::test_utils; - use anyhow::Result; + use crate::test_utils::{self, TestCallbackError}; + use anyhow::{Context, Result}; use oci_spec::runtime::Arch; use oci_spec::runtime::{LinuxSeccompBuilder, LinuxSyscallBuilder}; use serial_test::serial; @@ -258,17 +323,20 @@ mod tests { test_utils::test_in_child_process(|| { let _ = prctl::set_no_new_privileges(true); - initialize_seccomp(&seccomp_profile)?; + initialize_seccomp(&seccomp_profile).expect("failed to initialize seccomp"); let ret = nix::unistd::getcwd(); if ret.is_ok() { - bail!("getcwd didn't error out as seccomp profile specified"); + Err(TestCallbackError::Custom( + "getcwd didn't error out as seccomp profile specified".to_string(), + ))?; } if let Some(errno) = ret.err() { if errno != nix::errno::from_i32(expect_error) { - bail!( - "getcwd failed but we didn't get the expected error from seccomp profile: {}", errno - ); + Err(TestCallbackError::Custom(format!( + "getcwd failed but we didn't get the expected error from seccomp profile: {}", + errno + )))?; } } @@ -290,7 +358,7 @@ mod tests { let seccomp_profile = spec.linux().as_ref().unwrap().seccomp().as_ref().unwrap(); test_utils::test_in_child_process(|| { let _ = prctl::set_no_new_privileges(true); - initialize_seccomp(seccomp_profile)?; + initialize_seccomp(seccomp_profile).expect("failed to initialize seccomp"); Ok(()) })?; @@ -312,9 +380,12 @@ mod tests { .build()?; test_utils::test_in_child_process(|| { let _ = prctl::set_no_new_privileges(true); - let fd = initialize_seccomp(&seccomp_profile)?; + let fd = + initialize_seccomp(&seccomp_profile).expect("failed to initialize seccomp profile"); if fd.is_none() { - bail!("failed to get a seccomp notify fd with notify seccomp profile"); + Err(TestCallbackError::Custom( + "failed to get a seccomp notify fd with notify seccomp profile".to_string(), + ))?; } Ok(()) diff --git a/crates/libcontainer/src/signal.rs b/crates/libcontainer/src/signal.rs index 6afd971c0..547098600 100644 --- a/crates/libcontainer/src/signal.rs +++ b/crates/libcontainer/src/signal.rs @@ -1,6 +1,5 @@ //! Returns *nix signal enum value from passed string -use anyhow::{bail, Context, Result}; use nix::sys::signal::Signal as NixSignal; use std::convert::TryFrom; @@ -8,11 +7,18 @@ use std::convert::TryFrom; #[derive(Debug)] pub struct Signal(NixSignal); +#[derive(Debug, thiserror::Error)] +pub enum SignalError { + #[error("invalid signal: {0}")] + InvalidSignal(T), +} + impl TryFrom<&str> for Signal { - type Error = anyhow::Error; + type Error = SignalError; fn try_from(s: &str) -> Result { use NixSignal::*; + Ok(match s.to_ascii_uppercase().as_str() { "1" | "HUP" | "SIGHUP" => SIGHUP, "2" | "INT" | "SIGINT" => SIGINT, @@ -45,18 +51,18 @@ impl TryFrom<&str> for Signal { "29" | "IO" | "SIGIO" => SIGIO, "30" | "PWR" | "SIGPWR" => SIGPWR, "31" | "SYS" | "SIGSYS" => SIGSYS, - _ => bail! {"{} is not a valid signal", s}, + _ => return Err(SignalError::InvalidSignal(s.to_string())), }) .map(Signal) } } impl TryFrom for Signal { - type Error = anyhow::Error; + type Error = SignalError; fn try_from(value: i32) -> Result { NixSignal::try_from(value) - .with_context(|| format!("{} is not a valid signal", value)) + .map_err(|_| SignalError::InvalidSignal(value)) .map(Signal) } } diff --git a/crates/libcontainer/src/syscall/linux.rs b/crates/libcontainer/src/syscall/linux.rs index dd27c8e22..f5a6164bb 100644 --- a/crates/libcontainer/src/syscall/linux.rs +++ b/crates/libcontainer/src/syscall/linux.rs @@ -1,19 +1,8 @@ //! Implements Command trait for Linux systems -use std::ffi::{CStr, CString, OsStr}; -use std::fs; -use std::os::unix::ffi::OsStrExt; -use std::os::unix::fs::symlink; -use std::os::unix::io::RawFd; -use std::str::FromStr; -use std::sync::Arc; -use std::{any::Any, mem, path::Path, ptr}; - -use anyhow::{anyhow, bail, Context, Error, Result}; use caps::{CapSet, CapsHashSet}; use libc::{c_char, setdomainname, uid_t}; use nix::fcntl; use nix::{ - errno::Errno, fcntl::{open, OFlag}, mount::{mount, umount2, MntFlags, MsFlags}, sched::{unshare, CloneFlags}, @@ -21,12 +10,17 @@ use nix::{ unistd, unistd::{chown, fchdir, pivot_root, setgroups, sethostname, Gid, Uid}, }; -use syscalls::{syscall, Sysno, Sysno::close_range}; - use oci_spec::runtime::LinuxRlimit; +use std::ffi::{CStr, CString, OsStr}; +use std::fs; +use std::os::unix::ffi::OsStrExt; +use std::os::unix::fs::symlink; +use std::os::unix::io::RawFd; +use std::str::FromStr; +use std::sync::Arc; +use std::{any::Any, mem, path::Path, ptr}; -use super::Syscall; -use crate::syscall::syscall::CloseRange; +use super::{Result, Syscall, SyscallError}; use crate::{capabilities, utils}; // Flags used in mount_setattr(2). @@ -77,9 +71,9 @@ pub enum MountAttrOption { } impl FromStr for MountAttrOption { - type Err = Error; + type Err = SyscallError; - fn from_str(option: &str) -> Result { + fn from_str(option: &str) -> std::result::Result { match option { "rro" => Ok(MountAttrOption::MountArrtRdonly(false, MOUNT_ATTR_RDONLY)), "rrw" => Ok(MountAttrOption::MountArrtRdonly(true, MOUNT_ATTR_RDONLY)), @@ -124,7 +118,7 @@ impl FromStr for MountAttrOption { MOUNT_ATTR_NOSYMFOLLOW, )), // No support for MOUNT_ATTR_IDMAP yet (needs UserNS FD) - _ => Err(anyhow!("Unexpected option.")), + _ => Err(SyscallError::UnexpectedMountAttrOption(option.to_string())), } } } @@ -195,7 +189,7 @@ impl LinuxSyscall { } fn emulate_close_range(preserve_fds: i32) -> Result<()> { - let open_fds = Self::get_open_fds().with_context(|| "failed to obtain opened fds")?; + let open_fds = Self::get_open_fds()?; // Include stdin, stdout, and stderr for fd 0, 1, and 2 respectively. let min_fd = preserve_fds + 3; let to_be_cleaned_up_fds: Vec = open_fds @@ -215,10 +209,19 @@ impl LinuxSyscall { // Get a list of open fds for the calling process. fn get_open_fds() -> Result> { const PROCFS_FD_PATH: &str = "/proc/self/fd"; - utils::ensure_procfs(Path::new(PROCFS_FD_PATH)) - .with_context(|| format!("{} is not the actual procfs", PROCFS_FD_PATH))?; + utils::ensure_procfs(Path::new(PROCFS_FD_PATH)).map_err(|err| { + tracing::error!(?err, "failed to ensure /proc is mounted"); + match err { + utils::EnsureProcfsError::Nix(err) => SyscallError::Nix(err), + utils::EnsureProcfsError::IO(err) => SyscallError::IO(err), + } + })?; - let fds: Vec = fs::read_dir(PROCFS_FD_PATH)? + let fds: Vec = fs::read_dir(PROCFS_FD_PATH) + .map_err(|err| { + tracing::error!(?err, "failed to read /proc/self/fd"); + err + })? .filter_map(|entry| match entry { Ok(entry) => Some(entry.path()), Err(_) => None, @@ -250,7 +253,11 @@ impl Syscall for LinuxSyscall { /// Function to set given path as root path inside process fn pivot_rootfs(&self, path: &Path) -> Result<()> { // open the path as directory and read only - let newroot = open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?; + let newroot = + open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty()).map_err(|errno| { + tracing::error!(?errno, ?path, "failed to open the new root for pivot root"); + errno + })?; // make the given path as the root directory for the container // see https://man7.org/linux/man-pages/man2/pivot_root.2.html, specially the notes @@ -260,7 +267,10 @@ impl Syscall for LinuxSyscall { // this path. This is done, as otherwise, we will need to create a separate temporary directory under the new root path // so we can move the original root there, and then unmount that. This way saves the creation of the temporary // directory to put original root directory. - pivot_root(path, path)?; + pivot_root(path, path).map_err(|errno| { + tracing::error!(?errno, ?path, "failed to pivot root to"); + errno + })?; // Make the original root directory rslave to avoid propagating unmount event to the host mount namespace. // We should use MS_SLAVE not MS_PRIVATE according to https://github.com/opencontainers/runc/pull/1500. @@ -270,15 +280,26 @@ impl Syscall for LinuxSyscall { None::<&str>, MsFlags::MS_SLAVE | MsFlags::MS_REC, None::<&str>, - )?; + ) + .map_err(|errno| { + tracing::error!(?errno, "failed to make original root directory rslave"); + errno + })?; // Unmount the original root directory which was stacked on top of new root directory // MNT_DETACH makes the mount point unavailable to new accesses, but waits till the original mount point // to be free of activity to actually unmount // see https://man7.org/linux/man-pages/man2/umount2.2.html for more information - umount2("/", MntFlags::MNT_DETACH)?; - // Change directory to root - fchdir(newroot)?; + umount2("/", MntFlags::MNT_DETACH).map_err(|errno| { + tracing::error!(?errno, "failed to unmount old root directory"); + errno + })?; + // Change directory to the new root + fchdir(newroot).map_err(|errno| { + tracing::error!(?errno, ?newroot, "failed to change directory to new root"); + errno + })?; + Ok(()) } @@ -290,12 +311,27 @@ impl Syscall for LinuxSyscall { /// set uid and gid for process fn set_id(&self, uid: Uid, gid: Gid) -> Result<()> { - if let Err(e) = prctl::set_keep_capabilities(true) { - bail!("set keep capabilities returned {}", e); - }; + prctl::set_keep_capabilities(true).map_err(|errno| { + tracing::error!(?errno, "failed to set keep capabilities to true"); + nix::errno::from_i32(errno) + })?; // args : real *id, effective *id, saved set *id respectively - unistd::setresgid(gid, gid, gid)?; - unistd::setresuid(uid, uid, uid)?; + unistd::setresgid(gid, gid, gid).map_err(|err| { + tracing::error!( + ?err, + ?gid, + "failed to set real, effective and saved set gid" + ); + err + })?; + unistd::setresuid(uid, uid, uid).map_err(|err| { + tracing::error!( + ?err, + ?uid, + "failed to set real, effective and saved set uid" + ); + err + })?; // if not the root user, reset capabilities to effective capabilities, // which are used by kernel to perform checks @@ -303,9 +339,10 @@ impl Syscall for LinuxSyscall { if uid != Uid::from_raw(0) { capabilities::reset_effective(self)?; } - if let Err(e) = prctl::set_keep_capabilities(false) { - bail!("set keep capabilities returned {}", e); - }; + prctl::set_keep_capabilities(false).map_err(|errno| { + tracing::error!(?errno, "failed to set keep capabilities to false"); + nix::errno::from_i32(errno) + })?; Ok(()) } @@ -313,9 +350,9 @@ impl Syscall for LinuxSyscall { // see https://man7.org/linux/man-pages/man2/unshare.2.html for more information fn unshare(&self, flags: CloneFlags) -> Result<()> { unshare(flags)?; + Ok(()) } - /// Set capabilities for container process fn set_capability(&self, cset: CapSet, value: &CapsHashSet) -> Result<()> { match cset { @@ -341,9 +378,7 @@ impl Syscall for LinuxSyscall { /// Sets hostname for process fn set_hostname(&self, hostname: &str) -> Result<()> { - if let Err(e) = sethostname(hostname) { - bail!("Failed to set {} as hostname. {:?}", hostname, e) - } + sethostname(hostname)?; Ok(()) } @@ -352,20 +387,14 @@ impl Syscall for LinuxSyscall { fn set_domainname(&self, domainname: &str) -> Result<()> { let ptr = domainname.as_bytes().as_ptr() as *const c_char; let len = domainname.len(); - let res = unsafe { setdomainname(ptr, len) }; - - match res { + match unsafe { setdomainname(ptr, len) } { 0 => Ok(()), - -1 => bail!( - "Failed to set {} as domainname. {}", - domainname, - std::io::Error::last_os_error() - ), - _ => bail!( - "Failed to set {} as domainname. unexpected error occor.", - domainname - ), - } + -1 => Err(nix::Error::last()), + + _ => Err(nix::Error::UnknownErrno), + }?; + + Ok(()) } /// Sets resource limit for process @@ -374,10 +403,19 @@ impl Syscall for LinuxSyscall { rlim_cur: rlimit.soft(), rlim_max: rlimit.hard(), }; + + // Change for musl libc based on seccomp needs + #[cfg(not(target_env = "musl"))] let res = unsafe { libc::setrlimit(rlimit.typ() as u32, rlim) }; - if let Err(e) = Errno::result(res).map(drop) { - bail!("Failed to set {:?}. {:?}", rlimit.typ(), e) - } + #[cfg(target_env = "musl")] + let res = unsafe { libc::setrlimit(rlimit.typ() as i32, rlim) }; + + match res { + 0 => Ok(()), + -1 => Err(SyscallError::Nix(nix::Error::last())), + _ => Err(SyscallError::Nix(nix::Error::UnknownErrno)), + }?; + Ok(()) } @@ -429,59 +467,59 @@ impl Syscall for LinuxSyscall { flags: MsFlags, data: Option<&str>, ) -> Result<()> { - match mount(source, target, fstype, flags, data) { - Ok(_) => Ok(()), - Err(e) => Err(anyhow!(e)), - } + mount(source, target, fstype, flags, data)?; + Ok(()) } fn symlink(&self, original: &Path, link: &Path) -> Result<()> { - match symlink(original, link) { - Ok(_) => Ok(()), - Err(e) => Err(anyhow!(e)), - } + symlink(original, link)?; + + Ok(()) } fn mknod(&self, path: &Path, kind: SFlag, perm: Mode, dev: u64) -> Result<()> { - match mknod(path, kind, perm, dev) { - Ok(_) => Ok(()), - Err(e) => Err(anyhow!(e)), - } + mknod(path, kind, perm, dev)?; + + Ok(()) } fn chown(&self, path: &Path, owner: Option, group: Option) -> Result<()> { - match chown(path, owner, group) { - Ok(_) => Ok(()), - Err(e) => Err(anyhow!(e)), - } + chown(path, owner, group)?; + + Ok(()) } fn set_groups(&self, groups: &[Gid]) -> Result<()> { - match setgroups(groups) { - Ok(_) => Ok(()), - Err(e) => Err(anyhow!(e)), - } + setgroups(groups)?; + + Ok(()) } + #[tracing::instrument(skip(self))] fn close_range(&self, preserve_fds: i32) -> Result<()> { - let result = unsafe { - syscall!( - close_range, - 3 + preserve_fds as usize, - usize::MAX, - CloseRange::CLOEXEC.bits() + match unsafe { + libc::syscall( + libc::SYS_close_range, + 3 + preserve_fds, + libc::c_int::MAX, + libc::CLOSE_RANGE_CLOEXEC, ) - }; - - match result { - Ok(_) => Ok(()), - Err(e) if e == syscalls::Errno::ENOSYS || e == syscalls::Errno::EINVAL => { - // close_range was introduced in kernel 5.9 and CLOSEEXEC was introduced in - // kernel 5.11. If the kernel is older we emulate close_range in userspace. - Self::emulate_close_range(preserve_fds) + } { + 0 => Ok(()), + -1 => { + match nix::errno::Errno::last() { + nix::errno::Errno::ENOSYS | nix::errno::Errno::EINVAL => { + // close_range was introduced in kernel 5.9 and CLOSEEXEC was introduced in + // kernel 5.11. If the kernel is older we emulate close_range in userspace. + Self::emulate_close_range(preserve_fds) + } + e => Err(SyscallError::Nix(e)), + } } - Err(e) => bail!(e), - } + _ => Err(SyscallError::Nix(nix::errno::Errno::UnknownErrno)), + }?; + + Ok(()) } fn mount_setattr( @@ -492,28 +530,53 @@ impl Syscall for LinuxSyscall { mount_attr: &MountAttr, size: libc::size_t, ) -> Result<()> { - let path_pathbuf = pathname.to_path_buf(); - let path_str = path_pathbuf.to_str(); - let path_c_string = match path_str { - Some(path_str) => CString::new(path_str)?, - None => bail!("Invalid filename"), - }; - let result = unsafe { - // TODO: nix/libc crate hasn't supported mount_setattr system call yet. - syscall!( - Sysno::mount_setattr, + let path_c_string = pathname + .to_path_buf() + .to_str() + .map(CString::new) + .ok_or_else(|| { + tracing::error!(path = ?pathname, "failed to convert path to string"); + nix::Error::EINVAL + })? + .map_err(|err| { + tracing::error!(path = ?pathname, ?err, "failed to convert path to string"); + nix::Error::EINVAL + })?; + + match unsafe { + libc::syscall( + libc::SYS_mount_setattr, dirfd, path_c_string.as_ptr(), flags, mount_attr as *const MountAttr, - size + size, ) - }; + } { + 0 => Ok(()), + -1 => Err(nix::Error::last()), + _ => Err(nix::Error::UnknownErrno), + }?; + Ok(()) + } - match result { - Ok(_) => Ok(()), - Err(e) => bail!(e), - } + fn set_io_priority(&self, class: i64, priority: i64) -> Result<()> { + let ioprio_who_progress: libc::c_int = 1; + let ioprio_who_pid = 0; + let iop = (class << 13) | priority; + match unsafe { + libc::syscall( + libc::SYS_ioprio_set, + ioprio_who_progress, + ioprio_who_pid, + iop as libc::c_ulong, + ) + } { + 0 => Ok(()), + -1 => Err(nix::Error::last()), + _ => Err(nix::Error::UnknownErrno), + }?; + Ok(()) } } diff --git a/crates/libcontainer/src/syscall/mod.rs b/crates/libcontainer/src/syscall/mod.rs index 543997e8e..d27bf411a 100644 --- a/crates/libcontainer/src/syscall/mod.rs +++ b/crates/libcontainer/src/syscall/mod.rs @@ -8,3 +8,16 @@ pub mod syscall; pub mod test; pub use syscall::Syscall; +#[derive(Debug, thiserror::Error)] +pub enum SyscallError { + #[error("unexpected mount attr option: {0}")] + UnexpectedMountAttrOption(String), + #[error(transparent)] + Nix(#[from] nix::Error), + #[error(transparent)] + IO(#[from] std::io::Error), + #[error("failed to set capabilities: {0}")] + SetCaps(#[from] caps::errors::CapsError), +} + +type Result = std::result::Result; diff --git a/crates/libcontainer/src/syscall/syscall.rs b/crates/libcontainer/src/syscall/syscall.rs index 14ba9620a..0f85fd5f1 100644 --- a/crates/libcontainer/src/syscall/syscall.rs +++ b/crates/libcontainer/src/syscall/syscall.rs @@ -1,10 +1,6 @@ //! An interface trait so that rest of Youki can call //! necessary functions without having to worry about their //! implementation details -use std::{any::Any, ffi::OsStr, path::Path, sync::Arc}; - -use anyhow::Result; -use bitflags::bitflags; use caps::{CapSet, CapsHashSet}; use libc; use nix::{ @@ -13,12 +9,14 @@ use nix::{ sys::stat::{Mode, SFlag}, unistd::{Gid, Uid}, }; +use std::{any::Any, ffi::OsStr, path::Path, sync::Arc}; use oci_spec::runtime::LinuxRlimit; use crate::syscall::{ linux::{LinuxSyscall, MountAttr}, test::TestHelperSyscall, + Result, }; /// This specifies various kernel/other functionalities required for @@ -56,19 +54,34 @@ pub trait Syscall { mount_attr: &MountAttr, size: libc::size_t, ) -> Result<()>; + fn set_io_priority(&self, class: i64, priority: i64) -> Result<()>; } -pub fn create_syscall() -> Box { - if cfg!(test) { - Box::::default() - } else { - Box::new(LinuxSyscall) +#[derive(Clone, Copy)] +pub enum SyscallType { + Linux, + Test, +} + +impl Default for SyscallType { + fn default() -> Self { + if cfg!(test) { + SyscallType::Test + } else { + SyscallType::Linux + } } } -bitflags! { -pub struct CloseRange : usize { - const NONE = 0b00000000; - const UNSHARE = 0b00000010; - const CLOEXEC = 0b00000100; -}} +impl SyscallType { + pub fn create_syscall(&self) -> Box { + match self { + SyscallType::Linux => Box::new(LinuxSyscall), + SyscallType::Test => Box::::default(), + } + } +} + +pub fn create_syscall() -> Box { + SyscallType::default().create_syscall() +} diff --git a/crates/libcontainer/src/syscall/test.rs b/crates/libcontainer/src/syscall/test.rs index 2bf7f034f..56fa9a7cf 100644 --- a/crates/libcontainer/src/syscall/test.rs +++ b/crates/libcontainer/src/syscall/test.rs @@ -17,7 +17,7 @@ use nix::{ use oci_spec::runtime::LinuxRlimit; -use super::{linux, Syscall}; +use super::{linux, Result, Syscall}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct MountArgs { @@ -43,10 +43,16 @@ pub struct ChownArgs { pub group: Option, } +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct IoPriorityArgs { + pub class: i64, + pub priority: i64, +} + #[derive(Default)] struct Mock { values: Vec>, - ret_err: Option anyhow::Result<()>>, + ret_err: Option Result<()>>, ret_err_times: usize, } @@ -62,6 +68,7 @@ pub enum ArgName { Domainname, Groups, Capability, + IoPriority, } impl ArgName { @@ -77,6 +84,7 @@ impl ArgName { ArgName::Domainname, ArgName::Groups, ArgName::Capability, + ArgName::IoPriority, ] .iter() .copied() @@ -102,7 +110,7 @@ impl Default for MockCalls { } impl MockCalls { - fn act(&self, name: ArgName, value: Box) -> anyhow::Result<()> { + fn act(&self, name: ArgName, value: Box) -> Result<()> { if self.args.get(&name).unwrap().borrow().ret_err_times > 0 { self.args.get(&name).unwrap().borrow_mut().ret_err_times -= 1; if let Some(e) = &self.args.get(&name).unwrap().borrow().ret_err { @@ -138,39 +146,39 @@ impl Syscall for TestHelperSyscall { self } - fn pivot_rootfs(&self, _path: &Path) -> anyhow::Result<()> { + fn pivot_rootfs(&self, _path: &Path) -> Result<()> { unimplemented!() } - fn set_ns(&self, rawfd: i32, nstype: CloneFlags) -> anyhow::Result<()> { + fn set_ns(&self, rawfd: i32, nstype: CloneFlags) -> Result<()> { self.mocks .act(ArgName::Namespace, Box::new((rawfd, nstype))) } - fn set_id(&self, _uid: Uid, _gid: Gid) -> anyhow::Result<()> { + fn set_id(&self, _uid: Uid, _gid: Gid) -> Result<()> { unimplemented!() } - fn unshare(&self, flags: CloneFlags) -> anyhow::Result<()> { + fn unshare(&self, flags: CloneFlags) -> Result<()> { self.mocks.act(ArgName::Unshare, Box::new(flags)) } - fn set_capability(&self, cset: CapSet, value: &CapsHashSet) -> anyhow::Result<()> { + fn set_capability(&self, cset: CapSet, value: &CapsHashSet) -> Result<()> { self.mocks .act(ArgName::Capability, Box::new((cset, value.clone()))) } - fn set_hostname(&self, hostname: &str) -> anyhow::Result<()> { + fn set_hostname(&self, hostname: &str) -> Result<()> { self.mocks .act(ArgName::Hostname, Box::new(hostname.to_owned())) } - fn set_domainname(&self, domainname: &str) -> anyhow::Result<()> { + fn set_domainname(&self, domainname: &str) -> Result<()> { self.mocks .act(ArgName::Domainname, Box::new(domainname.to_owned())) } - fn set_rlimit(&self, _rlimit: &LinuxRlimit) -> anyhow::Result<()> { + fn set_rlimit(&self, _rlimit: &LinuxRlimit) -> Result<()> { todo!() } @@ -178,7 +186,7 @@ impl Syscall for TestHelperSyscall { Some(OsString::from("youki").into()) } - fn chroot(&self, _: &Path) -> anyhow::Result<()> { + fn chroot(&self, _: &Path) -> Result<()> { todo!() } @@ -189,7 +197,7 @@ impl Syscall for TestHelperSyscall { fstype: Option<&str>, flags: MsFlags, data: Option<&str>, - ) -> anyhow::Result<()> { + ) -> Result<()> { self.mocks.act( ArgName::Mount, Box::new(MountArgs { @@ -202,14 +210,14 @@ impl Syscall for TestHelperSyscall { ) } - fn symlink(&self, original: &Path, link: &Path) -> anyhow::Result<()> { + fn symlink(&self, original: &Path, link: &Path) -> Result<()> { self.mocks.act( ArgName::Symlink, Box::new((original.to_path_buf(), link.to_path_buf())), ) } - fn mknod(&self, path: &Path, kind: SFlag, perm: Mode, dev: u64) -> anyhow::Result<()> { + fn mknod(&self, path: &Path, kind: SFlag, perm: Mode, dev: u64) -> Result<()> { self.mocks.act( ArgName::Mknod, Box::new(MknodArgs { @@ -220,7 +228,7 @@ impl Syscall for TestHelperSyscall { }), ) } - fn chown(&self, path: &Path, owner: Option, group: Option) -> anyhow::Result<()> { + fn chown(&self, path: &Path, owner: Option, group: Option) -> Result<()> { self.mocks.act( ArgName::Chown, Box::new(ChownArgs { @@ -231,11 +239,11 @@ impl Syscall for TestHelperSyscall { ) } - fn set_groups(&self, groups: &[Gid]) -> anyhow::Result<()> { + fn set_groups(&self, groups: &[Gid]) -> Result<()> { self.mocks.act(ArgName::Groups, Box::new(groups.to_vec())) } - fn close_range(&self, _: i32) -> anyhow::Result<()> { + fn close_range(&self, _: i32) -> Result<()> { todo!() } @@ -246,13 +254,20 @@ impl Syscall for TestHelperSyscall { _: u32, _: &linux::MountAttr, _: libc::size_t, - ) -> anyhow::Result<()> { + ) -> Result<()> { todo!() } + + fn set_io_priority(&self, class: i64, priority: i64) -> Result<()> { + self.mocks.act( + ArgName::IoPriority, + Box::new(IoPriorityArgs { class, priority }), + ) + } } impl TestHelperSyscall { - pub fn set_ret_err(&self, name: ArgName, err: fn() -> anyhow::Result<()>) { + pub fn set_ret_err(&self, name: ArgName, err: fn() -> Result<()>) { self.mocks.fetch_mut(name).ret_err = Some(err); self.set_ret_err_times(name, 1); } @@ -350,4 +365,13 @@ impl TestHelperSyscall { .map(|x| x.downcast_ref::>().unwrap().clone()) .collect::>>() } + + pub fn get_io_priority_args(&self) -> Vec { + self.mocks + .fetch(ArgName::IoPriority) + .values + .iter() + .map(|x| x.downcast_ref::().unwrap().clone()) + .collect::>() + } } diff --git a/crates/libcontainer/src/test_utils.rs b/crates/libcontainer/src/test_utils.rs new file mode 100644 index 000000000..74dc27419 --- /dev/null +++ b/crates/libcontainer/src/test_utils.rs @@ -0,0 +1,139 @@ +use nix::sys::wait; +use serde::{Deserialize, Serialize}; + +// Normally, error types are not implemented as serialize/deserialize, but to +// pass the error from the child process to the parent process, we need to +// implement an error type that can be serialized and deserialized. +#[derive(Debug, Serialize, Deserialize)] +struct ErrorEnclosure { + source: Option>, + description: String, +} + +impl ErrorEnclosure { + pub fn new(e: &T) -> ErrorEnclosure + where + T: ?Sized + std::error::Error, + { + ErrorEnclosure { + description: e.to_string(), + source: e.source().map(|s| Box::new(ErrorEnclosure::new(s))), + } + } +} + +impl std::fmt::Display for ErrorEnclosure { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.description) + } +} + +impl std::error::Error for ErrorEnclosure { + fn source(&self) -> Option<&(dyn 'static + std::error::Error)> { + self.source + .as_ref() + .map(|source| &**source as &(dyn 'static + std::error::Error)) + } + + fn description(&self) -> &str { + &self.description + } +} + +type ClosureResult = Result<(), ErrorEnclosure>; + +#[derive(Debug, thiserror::Error)] +pub enum TestError { + #[error("failed to create channel")] + Channel(#[from] crate::channel::ChannelError), + #[error("failed to fork")] + Fork(#[source] nix::Error), + #[error("failed to wait for child process")] + Wait(#[source] nix::Error), + #[error("failed to run function in child process")] + Execution(#[source] Box), + #[error("the closure caused the child process to panic")] + Panic, +} + +#[derive(Debug, thiserror::Error)] +pub enum TestCallbackError { + #[error("{0}")] + Custom(String), + #[error("{0:?}")] + Other(#[from] Box), +} + +impl From<&str> for TestCallbackError { + fn from(s: &str) -> Self { + TestCallbackError::Custom(s.to_string()) + } +} + +impl From for TestCallbackError { + fn from(s: String) -> Self { + TestCallbackError::Custom(s) + } +} + +pub fn test_in_child_process(cb: F) -> Result<(), TestError> +where + F: FnOnce() -> Result<(), TestCallbackError> + std::panic::UnwindSafe, +{ + let (mut sender, mut receiver) = crate::channel::channel::()?; + match unsafe { nix::unistd::fork().map_err(TestError::Fork)? } { + nix::unistd::ForkResult::Parent { child } => { + // Close unused senders + sender.close().map_err(TestError::Channel)?; + let res = receiver.recv().map_err(TestError::Channel)?; + wait::waitpid(child, None).map_err(TestError::Wait)?; + res.map_err(|err| TestError::Execution(Box::new(err)))?; + } + nix::unistd::ForkResult::Child => { + // Close unused receiver in the child + receiver.close().map_err(TestError::Channel)?; + let test_result = match std::panic::catch_unwind(cb) { + Ok(ret) => ret.map_err(|err| ErrorEnclosure::new(&err)), + Err(_) => Err(ErrorEnclosure::new(&TestError::Panic)), + }; + + // If we can't send the error to the parent process, there is + // nothing we can do other than exit properly. + let _ = sender.send(test_result); + std::process::exit(0); + } + }; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use core::panic; + + use super::*; + use anyhow::{bail, Result}; + + #[test] + fn test_child_process() -> Result<()> { + if test_in_child_process(|| Err(TestCallbackError::Custom("test error".to_string()))) + .is_ok() + { + bail!("expecting the child process to return an error") + } + + Ok(()) + } + + #[test] + fn test_panic_child_process() -> Result<()> { + let ret = test_in_child_process(|| { + panic!("test panic"); + }); + if ret.is_ok() { + bail!("expecting the child process to panic") + } + + Ok(()) + } +} diff --git a/crates/libcontainer/src/tty.rs b/crates/libcontainer/src/tty.rs index 37ee944f3..e43ecae58 100644 --- a/crates/libcontainer/src/tty.rs +++ b/crates/libcontainer/src/tty.rs @@ -1,21 +1,73 @@ //! tty (teletype) for user-system interaction +use nix::errno::Errno; +use nix::sys::socket::{self, UnixAddr}; +use nix::unistd::close; +use nix::unistd::dup2; use std::io::IoSlice; use std::os::unix::fs::symlink; use std::os::unix::io::AsRawFd; use std::os::unix::prelude::RawFd; -use std::path::Path; +use std::path::{Path, PathBuf}; -use anyhow::Context; -use anyhow::{bail, Result}; -use nix::errno::Errno; -use nix::sys::socket::{self, UnixAddr}; -use nix::unistd::close; -use nix::unistd::dup2; +#[derive(Debug)] +pub enum StdIO { + Stdin = 0, + Stdout = 1, + Stderr = 2, +} -const STDIN: i32 = 0; -const STDOUT: i32 = 1; -const STDERR: i32 = 2; +impl From for i32 { + fn from(value: StdIO) -> Self { + match value { + StdIO::Stdin => 0, + StdIO::Stdout => 1, + StdIO::Stderr => 2, + } + } +} + +impl std::fmt::Display for StdIO { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StdIO::Stdin => write!(f, "stdin"), + StdIO::Stdout => write!(f, "stdout"), + StdIO::Stderr => write!(f, "stderr"), + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum TTYError { + #[error("failed to connect/duplicate {stdio}")] + ConnectStdIO { source: nix::Error, stdio: StdIO }, + #[error("failed to create console socket")] + CreateConsoleSocket { + source: nix::Error, + socket_name: String, + }, + #[error("failed to symlink console socket into container_dir")] + Symlink { + source: std::io::Error, + linked: Box, + console_socket_path: Box, + }, + #[error("invalid socker name: {socket_name:?}")] + InvalidSocketName { + socket_name: String, + source: nix::Error, + }, + #[error("failed to create console socket fd")] + CreateConsoleSocketFd { source: nix::Error }, + #[error("could not create pseudo terminal")] + CreatePseudoTerminal { source: nix::Error }, + #[error("failed to send pty master")] + SendPtyMaster { source: nix::Error }, + #[error("could not close console socket")] + CloseConsoleSocket { source: nix::Error }, +} + +type Result = std::result::Result; // TODO: Handling when there isn't console-socket. pub fn setup_console_socket( @@ -24,21 +76,31 @@ pub fn setup_console_socket( socket_name: &str, ) -> Result { let linked = container_dir.join(socket_name); - symlink(console_socket_path, linked)?; + symlink(console_socket_path, &linked).map_err(|err| TTYError::Symlink { + source: err, + linked: linked.to_path_buf().into(), + console_socket_path: console_socket_path.to_path_buf().into(), + })?; let mut csocketfd = socket::socket( socket::AddressFamily::Unix, socket::SockType::Stream, socket::SockFlag::empty(), None, - )?; - csocketfd = match socket::connect(csocketfd, &socket::UnixAddr::new(socket_name)?) { - Err(errno) => { - if !matches!(errno, Errno::ENOENT) { - bail!("failed to open {}", socket_name); - } - -1 - } + ) + .map_err(|err| TTYError::CreateConsoleSocketFd { source: err })?; + csocketfd = match socket::connect( + csocketfd, + &socket::UnixAddr::new(socket_name).map_err(|err| TTYError::InvalidSocketName { + source: err, + socket_name: socket_name.to_string(), + })?, + ) { + Err(Errno::ENOENT) => -1, + Err(errno) => Err(TTYError::CreateConsoleSocket { + source: errno, + socket_name: socket_name.to_string(), + })?, Ok(()) => csocketfd, }; Ok(csocketfd) @@ -47,8 +109,8 @@ pub fn setup_console_socket( pub fn setup_console(console_fd: &RawFd) -> Result<()> { // You can also access pty master, but it is better to use the API. // ref. https://github.com/containerd/containerd/blob/261c107ffc4ff681bc73988f64e3f60c32233b37/vendor/github.com/containerd/go-runc/console.go#L139-L154 - let openpty_result = - nix::pty::openpty(None, None).context("could not create pseudo terminal")?; + let openpty_result = nix::pty::openpty(None, None) + .map_err(|err| TTYError::CreatePseudoTerminal { source: err })?; let pty_name: &[u8] = b"/dev/ptmx"; let iov = [IoSlice::new(pty_name)]; let fds = [openpty_result.master]; @@ -60,23 +122,34 @@ pub fn setup_console(console_fd: &RawFd) -> Result<()> { socket::MsgFlags::empty(), None, ) - .context("failed to send pty master")?; + .map_err(|err| TTYError::SendPtyMaster { source: err })?; if unsafe { libc::ioctl(openpty_result.slave, libc::TIOCSCTTY) } < 0 { - log::warn!("could not TIOCSCTTY"); + tracing::warn!("could not TIOCSCTTY"); }; let slave = openpty_result.slave; - connect_stdio(&slave, &slave, &slave).context("could not dup tty to stderr")?; - close(console_fd.as_raw_fd()).context("could not close console socket")?; + connect_stdio(&slave, &slave, &slave)?; + close(console_fd.as_raw_fd()).map_err(|err| TTYError::CloseConsoleSocket { source: err })?; + Ok(()) } fn connect_stdio(stdin: &RawFd, stdout: &RawFd, stderr: &RawFd) -> Result<()> { - dup2(stdin.as_raw_fd(), STDIN)?; - dup2(stdout.as_raw_fd(), STDOUT)?; + dup2(stdin.as_raw_fd(), StdIO::Stdin.into()).map_err(|err| TTYError::ConnectStdIO { + source: err, + stdio: StdIO::Stdin, + })?; + dup2(stdout.as_raw_fd(), StdIO::Stdout.into()).map_err(|err| TTYError::ConnectStdIO { + source: err, + stdio: StdIO::Stdout, + })?; // FIXME: Rarely does it fail. // error message: `Error: Resource temporarily unavailable (os error 11)` - dup2(stderr.as_raw_fd(), STDERR)?; + dup2(stderr.as_raw_fd(), StdIO::Stderr.into()).map_err(|err| TTYError::ConnectStdIO { + source: err, + stdio: StdIO::Stderr, + })?; + Ok(()) } @@ -84,20 +157,18 @@ fn connect_stdio(stdin: &RawFd, stdout: &RawFd, stderr: &RawFd) -> Result<()> { mod tests { use super::*; + use anyhow::Result; + use serial_test::serial; use std::env; use std::fs::{self, File}; use std::os::unix::net::UnixListener; use std::path::PathBuf; - use serial_test::serial; - - use crate::utils::{create_temp_dir, TempDir}; - const CONSOLE_SOCKET: &str = "console-socket"; - fn setup(testname: &str) -> Result<(TempDir, PathBuf, PathBuf)> { - let testdir = create_temp_dir(testname)?; - let rundir_path = Path::join(&testdir, "run"); + fn setup() -> Result<(tempfile::TempDir, PathBuf, PathBuf)> { + let testdir = tempfile::tempdir()?; + let rundir_path = Path::join(testdir.path(), "run"); fs::create_dir(&rundir_path)?; let socket_path = Path::new(&rundir_path).join("socket"); let _ = File::create(&socket_path); @@ -108,10 +179,10 @@ mod tests { #[test] #[serial] fn test_setup_console_socket() { - let init = setup("test_setup_console_socket"); + let init = setup(); assert!(init.is_ok()); let (testdir, rundir_path, socket_path) = init.unwrap(); - let lis = UnixListener::bind(Path::join(&testdir, "console-socket")); + let lis = UnixListener::bind(Path::join(testdir.path(), "console-socket")); assert!(lis.is_ok()); let fd = setup_console_socket(&rundir_path, &socket_path, CONSOLE_SOCKET); assert!(fd.is_ok()); @@ -121,7 +192,7 @@ mod tests { #[test] #[serial] fn test_setup_console_socket_empty() { - let init = setup("test_setup_console_socket_empty"); + let init = setup(); assert!(init.is_ok()); let (_testdir, rundir_path, socket_path) = init.unwrap(); let fd = setup_console_socket(&rundir_path, &socket_path, CONSOLE_SOCKET); @@ -132,10 +203,10 @@ mod tests { #[test] #[serial] fn test_setup_console_socket_invalid() { - let init = setup("test_setup_console_socket_invalid"); + let init = setup(); assert!(init.is_ok()); let (testdir, rundir_path, socket_path) = init.unwrap(); - let _socket = File::create(Path::join(&testdir, "console-socket")); + let _socket = File::create(Path::join(testdir.path(), "console-socket")); assert!(_socket.is_ok()); let fd = setup_console_socket(&rundir_path, &socket_path, CONSOLE_SOCKET); assert!(fd.is_err()); @@ -144,10 +215,10 @@ mod tests { #[test] #[serial] fn test_setup_console() { - let init = setup("test_setup_console"); + let init = setup(); assert!(init.is_ok()); let (testdir, rundir_path, socket_path) = init.unwrap(); - let lis = UnixListener::bind(Path::join(&testdir, "console-socket")); + let lis = UnixListener::bind(Path::join(testdir.path(), "console-socket")); assert!(lis.is_ok()); let fd = setup_console_socket(&rundir_path, &socket_path, CONSOLE_SOCKET); let status = setup_console(&fd.unwrap()); diff --git a/crates/libcontainer/src/utils.rs b/crates/libcontainer/src/utils.rs index 781316302..27f735dfa 100644 --- a/crates/libcontainer/src/utils.rs +++ b/crates/libcontainer/src/utils.rs @@ -1,39 +1,55 @@ //! Utility functionality -use anyhow::Context; -use anyhow::{bail, Result}; -use nix::sys::stat::Mode; -use nix::sys::statfs; -use nix::unistd; -use nix::unistd::{Uid, User}; use std::collections::HashMap; -use std::ffi::CString; use std::fs::{self, DirBuilder, File}; -use std::io::ErrorKind; -use std::ops::Deref; use std::os::linux::fs::MetadataExt; use std::os::unix::fs::DirBuilderExt; -use std::os::unix::prelude::{AsRawFd, OsStrExt}; +use std::os::unix::prelude::AsRawFd; use std::path::{Component, Path, PathBuf}; +use nix::sys::stat::Mode; +use nix::sys::statfs; +use nix::unistd::{Uid, User}; + +#[derive(Debug, thiserror::Error)] +pub enum PathBufExtError { + #[error("relative path cannot be converted to the path in the container")] + RelativePath, + #[error("failed to strip prefix from {path:?}")] + StripPrefix { + path: PathBuf, + source: std::path::StripPrefixError, + }, + #[error("failed to canonicalize path {path:?}")] + Canonicalize { + path: PathBuf, + source: std::io::Error, + }, + #[error("failed to get current directory")] + CurrentDir { source: std::io::Error }, +} + pub trait PathBufExt { - fn as_relative(&self) -> Result<&Path>; - fn join_safely>(&self, p: P) -> Result; - fn canonicalize_safely(&self) -> Result; + fn as_relative(&self) -> Result<&Path, PathBufExtError>; + fn join_safely>(&self, p: P) -> Result; + fn canonicalize_safely(&self) -> Result; fn normalize(&self) -> PathBuf; } impl PathBufExt for Path { - fn as_relative(&self) -> Result<&Path> { - if self.is_relative() { - bail!("relative path cannot be converted to the path in the container.") - } else { - self.strip_prefix("/") - .with_context(|| format!("failed to strip prefix from {:?}", self)) + fn as_relative(&self) -> Result<&Path, PathBufExtError> { + match self.is_relative() { + true => Err(PathBufExtError::RelativePath), + false => Ok(self + .strip_prefix("/") + .map_err(|e| PathBufExtError::StripPrefix { + path: self.to_path_buf(), + source: e, + })?), } } - fn join_safely>(&self, path: P) -> Result { + fn join_safely>(&self, path: P) -> Result { let path = path.as_ref(); if path.is_relative() { return Ok(self.join(path)); @@ -41,19 +57,25 @@ impl PathBufExt for Path { let stripped = path .strip_prefix("/") - .with_context(|| format!("failed to strip prefix from {}", path.display()))?; + .map_err(|e| PathBufExtError::StripPrefix { + path: self.to_path_buf(), + source: e, + })?; Ok(self.join(stripped)) } /// Canonicalizes existing and not existing paths - fn canonicalize_safely(&self) -> Result { + fn canonicalize_safely(&self) -> Result { if self.exists() { self.canonicalize() - .with_context(|| format!("failed to canonicalize path {:?}", self)) + .map_err(|e| PathBufExtError::Canonicalize { + path: self.to_path_buf(), + source: e, + }) } else { if self.is_relative() { let p = std::env::current_dir() - .context("could not get current directory")? + .map_err(|e| PathBufExtError::CurrentDir { source: e })? .join(self); return Ok(p.normalize()); } @@ -121,17 +143,6 @@ pub fn get_user_home(uid: u32) -> Option { } } -pub fn do_exec(path: impl AsRef, args: &[String]) -> Result<()> { - let p = CString::new(path.as_ref().as_os_str().as_bytes()) - .with_context(|| format!("failed to convert path {:?} to cstring", path.as_ref()))?; - let a: Vec = args - .iter() - .map(|s| CString::new(s.as_bytes()).unwrap_or_default()) - .collect(); - unistd::execvp(&p, &a)?; - Ok(()) -} - /// If None, it will generate a default path for cgroups. pub fn get_cgroup_path( cgroups_path: &Option, @@ -142,25 +153,44 @@ pub fn get_cgroup_path( Some(cpath) => cpath.clone(), None => match rootless { false => PathBuf::from(container_id), - true => PathBuf::from(format!(":youki:{}", container_id)), + true => PathBuf::from(format!(":youki:{container_id}")), }, } } -pub fn write_file, C: AsRef<[u8]>>(path: P, contents: C) -> Result<()> { - let path = path.as_ref(); - fs::write(path, contents).with_context(|| format!("failed to write to {:?}", path))?; +pub fn write_file, C: AsRef<[u8]>>( + path: P, + contents: C, +) -> Result<(), std::io::Error> { + fs::write(path.as_ref(), contents).map_err(|err| { + tracing::error!(path = ?path.as_ref(), ?err, "failed to write file"); + err + })?; + Ok(()) } -pub fn create_dir_all>(path: P) -> Result<()> { - let path = path.as_ref(); - fs::create_dir_all(path).with_context(|| format!("failed to create directory {:?}", path)) +pub fn create_dir_all>(path: P) -> Result<(), std::io::Error> { + fs::create_dir_all(path.as_ref()).map_err(|err| { + tracing::error!(path = ?path.as_ref(), ?err, "failed to create directory"); + err + })?; + Ok(()) } -pub fn open>(path: P) -> Result { - let path = path.as_ref(); - File::open(path).with_context(|| format!("failed to open {:?}", path)) +pub fn open>(path: P) -> Result { + File::open(path.as_ref()).map_err(|err| { + tracing::error!(path = ?path.as_ref(), ?err, "failed to open file"); + err + }) +} + +#[derive(Debug, thiserror::Error)] +pub enum MkdirWithModeError { + #[error("IO error")] + Io(#[from] std::io::Error), + #[error("metadata doesn't match the expected attributes")] + MetadataMismatch, } /// Creates the specified directory and all parent directories with the specified mode. Ensures @@ -176,241 +206,62 @@ pub fn open>(path: P) -> Result { /// create_dir_all_with_mode(&path, 1000, Mode::S_IRWXU).unwrap(); /// assert!(path.exists()) /// ``` -pub fn create_dir_all_with_mode>(path: P, owner: u32, mode: Mode) -> Result<()> { +pub fn create_dir_all_with_mode>( + path: P, + owner: u32, + mode: Mode, +) -> Result<(), MkdirWithModeError> { let path = path.as_ref(); if !path.exists() { DirBuilder::new() .recursive(true) .mode(mode.bits()) - .create(path) - .with_context(|| format!("failed to create directory {}", path.display()))?; + .create(path)?; } - let metadata = path - .metadata() - .with_context(|| format!("failed to get metadata for {}", path.display()))?; - + let metadata = path.metadata()?; if metadata.is_dir() && metadata.st_uid() == owner && metadata.st_mode() & mode.bits() == mode.bits() { Ok(()) } else { - bail!( - "metadata for {} does not possess the expected attributes", - path.display() - ); + Err(MkdirWithModeError::MetadataMismatch) } } +#[derive(Debug, thiserror::Error)] +pub enum EnsureProcfsError { + #[error(transparent)] + Nix(#[from] nix::Error), + #[error(transparent)] + IO(#[from] std::io::Error), +} + // Make sure a given path is on procfs. This is to avoid the security risk that // /proc path is mounted over. Ref: CVE-2019-16884 -pub fn ensure_procfs(path: &Path) -> Result<()> { - let procfs_fd = fs::File::open(path)?; - let fstat_info = statfs::fstatfs(&procfs_fd.as_raw_fd())?; +pub fn ensure_procfs(path: &Path) -> Result<(), EnsureProcfsError> { + let procfs_fd = fs::File::open(path).map_err(|err| { + tracing::error!(?err, ?path, "failed to open procfs file"); + err + })?; + let fstat_info = statfs::fstatfs(&procfs_fd.as_raw_fd()).map_err(|err| { + tracing::error!(?err, ?path, "failed to fstatfs the procfs"); + err + })?; if fstat_info.filesystem_type() != statfs::PROC_SUPER_MAGIC { - bail!(format!("{:?} is not on the procfs", path)); + tracing::error!(?path, "given path is not on the procfs"); + Err(nix::Error::EINVAL)?; } Ok(()) } -pub fn secure_join>(rootfs: P, unsafe_path: P) -> Result { - let mut rootfs = rootfs.into(); - let mut path = unsafe_path.into(); - let mut clean_path = PathBuf::new(); - - let mut part = path.iter(); - let mut i = 0; - - loop { - if i > 255 { - bail!("dereference too many symlinks, may be infinite loop"); - } - - let part_path = match part.next() { - None => break, - Some(part) => PathBuf::from(part), - }; - - if !part_path.is_absolute() { - if part_path.starts_with("..") { - clean_path.pop(); - } else { - // check if symlink then dereference - let curr_path = PathBuf::from(&rootfs).join(&clean_path).join(&part_path); - let metadata = match curr_path.symlink_metadata() { - Ok(metadata) => Some(metadata), - Err(error) => match error.kind() { - // if file does not exists, treat it as normal path - ErrorKind::NotFound => None, - other_error => { - bail!( - "unable to obtain symlink metadata for file {:?}: {:?}", - curr_path, - other_error - ); - } - }, - }; - - if let Some(metadata) = metadata { - if metadata.file_type().is_symlink() { - let link_path = fs::read_link(curr_path)?; - path = link_path.join(part.as_path()); - part = path.iter(); - - // increase after dereference symlink - i += 1; - continue; - } - } - - clean_path.push(&part_path); - } - } - } - - rootfs.push(clean_path); - Ok(rootfs) -} - -pub struct TempDir { - path: Option, -} - -impl TempDir { - pub fn new>(path: P) -> Result { - let p = path.into(); - std::fs::create_dir_all(&p) - .with_context(|| format!("failed to create directory {}", p.display()))?; - Ok(Self { path: Some(p) }) - } - - pub fn path(&self) -> &Path { - self.path - .as_ref() - .expect("temp dir has already been removed") - } - - pub fn remove(&mut self) { - if let Some(p) = &self.path { - let _ = fs::remove_dir_all(p); - self.path = None; - } - } -} - -impl Drop for TempDir { - fn drop(&mut self) { - self.remove(); - } -} - -impl AsRef for TempDir { - fn as_ref(&self) -> &Path { - self.path() - } -} - -impl Deref for TempDir { - type Target = Path; - - fn deref(&self) -> &Self::Target { - self.path() - } -} - -pub fn create_temp_dir(test_name: &str) -> Result { - let dir = TempDir::new(get_temp_dir_path(test_name))?; - Ok(dir) -} - -pub fn get_temp_dir_path(test_name: &str) -> PathBuf { - std::env::temp_dir().join(test_name) -} - -pub fn get_executable_path(name: &str, path_var: &str) -> Option { - let paths = path_var.trim_start_matches("PATH="); - // if path has / in it, we have to assume absolute path, as per runc impl - if name.contains('/') && PathBuf::from(name).exists() { - return Some(PathBuf::from(name)); - } - for path in paths.split(':') { - let potential_path = PathBuf::from(path).join(name); - if potential_path.exists() { - return Some(potential_path); - } - } - None -} - -pub fn is_executable(path: &Path) -> Result { - use std::os::unix::fs::PermissionsExt; - let metadata = path.metadata()?; - let permissions = metadata.permissions(); - // we have to check if the path is file and the execute bit - // is set. In case of directories, the execute bit is also set, - // so have to check if this is a file or not - Ok(metadata.is_file() && permissions.mode() & 0o001 != 0) -} - -#[cfg(test)] -pub(crate) mod test_utils { - use crate::process::channel; - use anyhow::Context; - use anyhow::{bail, Result}; - use nix::sys::wait; - use rand::Rng; - use serde::{Deserialize, Serialize}; - - #[derive(Debug, Serialize, Deserialize)] - struct TestResult { - success: bool, - message: String, - } - - pub fn test_in_child_process Result<()>>(cb: F) -> Result<()> { - let (mut sender, mut receiver) = channel::channel::()?; - match unsafe { nix::unistd::fork()? } { - nix::unistd::ForkResult::Parent { child } => { - let res = receiver.recv()?; - wait::waitpid(child, None)?; - - if !res.success { - bail!("child process failed: {}", res.message); - } - } - nix::unistd::ForkResult::Child => { - let test_result = match cb() { - Ok(_) => TestResult { - success: true, - message: String::new(), - }, - Err(err) => TestResult { - success: false, - message: err.to_string(), - }, - }; - sender - .send(test_result) - .context("failed to send from the child process")?; - std::process::exit(0); - } - }; - - Ok(()) - } - - pub fn gen_u32() -> u32 { - rand::thread_rng().gen() - } -} - #[cfg(test)] mod tests { use super::*; + use anyhow::{bail, Result}; #[test] pub fn test_get_unix_user() { @@ -444,11 +295,12 @@ mod tests { PathBuf::from("/youki") ); } + #[test] fn test_parse_env() -> Result<()> { let key = "key".to_string(); let value = "value".to_string(); - let env_input = vec![format!("{}={}", key, value)]; + let env_input = vec![format!("{key}={value}")]; let env_output = parse_env(&env_input); assert_eq!( env_output.len(), @@ -459,111 +311,57 @@ mod tests { Ok(()) } - #[test] - fn test_secure_join() { - assert_eq!( - secure_join(Path::new("/tmp/rootfs"), Path::new("path")).unwrap(), - PathBuf::from("/tmp/rootfs/path") - ); - assert_eq!( - secure_join(Path::new("/tmp/rootfs"), Path::new("more/path")).unwrap(), - PathBuf::from("/tmp/rootfs/more/path") - ); - assert_eq!( - secure_join(Path::new("/tmp/rootfs"), Path::new("/absolute/path")).unwrap(), - PathBuf::from("/tmp/rootfs/absolute/path") - ); - assert_eq!( - secure_join( - Path::new("/tmp/rootfs"), - Path::new("/path/with/../parent/./sample") - ) - .unwrap(), - PathBuf::from("/tmp/rootfs/path/parent/sample") - ); - assert_eq!( - secure_join(Path::new("/tmp/rootfs"), Path::new("/../../../../tmp")).unwrap(), - PathBuf::from("/tmp/rootfs/tmp") - ); - assert_eq!( - secure_join(Path::new("/tmp/rootfs"), Path::new("./../../../../var/log")).unwrap(), - PathBuf::from("/tmp/rootfs/var/log") - ); - assert_eq!( - secure_join( - Path::new("/tmp/rootfs"), - Path::new("../../../../etc/passwd") - ) - .unwrap(), - PathBuf::from("/tmp/rootfs/etc/passwd") - ); - } - #[test] - fn test_secure_join_symlink() { - use std::os::unix::fs::symlink; - - let tmp = create_temp_dir("root").unwrap(); - let test_root_dir = tmp.path(); - - symlink("somepath", PathBuf::from(&test_root_dir).join("etc")).unwrap(); - symlink( - "../../../../../../../../../../../../../etc", - PathBuf::from(&test_root_dir).join("longbacklink"), - ) - .unwrap(); - symlink( - "/../../../../../../../../../../../../../etc/passwd", - PathBuf::from(&test_root_dir).join("absolutelink"), - ) - .unwrap(); - - assert_eq!( - secure_join(test_root_dir, PathBuf::from("etc").as_path()).unwrap(), - PathBuf::from(&test_root_dir).join("somepath") - ); - assert_eq!( - secure_join(test_root_dir, PathBuf::from("longbacklink").as_path()).unwrap(), - PathBuf::from(&test_root_dir).join("somepath") - ); - assert_eq!( - secure_join(test_root_dir, PathBuf::from("absolutelink").as_path()).unwrap(), - PathBuf::from(&test_root_dir).join("somepath/passwd") - ); - } #[test] - fn test_get_executable_path() { - let non_existing_abs_path = "/some/non/existent/absolute/path"; - let existing_abs_path = "/usr/bin/sh"; - let existing_binary = "sh"; - let non_existing_binary = "non-existent"; - let path_value = "PATH=/usr/bin:/bin"; - - assert_eq!( - get_executable_path(existing_abs_path, path_value), - Some(PathBuf::from(existing_abs_path)) - ); - assert_eq!(get_executable_path(non_existing_abs_path, path_value), None); - - assert_eq!( - get_executable_path(existing_binary, path_value), - Some(PathBuf::from("/usr/bin/sh")) - ); - - assert_eq!(get_executable_path(non_existing_binary, path_value), None); + fn test_create_dir_all_with_mode() -> Result<()> { + { + let temdir = tempfile::tempdir()?; + let path = temdir.path().join("test"); + let uid = nix::unistd::getuid().as_raw(); + let mode = Mode::S_IRWXU; + create_dir_all_with_mode(&path, uid, mode)?; + let metadata = path.metadata()?; + assert!(path.is_dir()); + assert_eq!(metadata.st_uid(), uid); + assert_eq!(metadata.st_mode() & mode.bits(), mode.bits()); + } + { + let temdir = tempfile::tempdir()?; + let path = temdir.path().join("test"); + let mode = Mode::S_IRWXU; + std::fs::create_dir(&path)?; + assert!(path.is_dir()); + match create_dir_all_with_mode(&path, 8899, mode) { + Err(MkdirWithModeError::MetadataMismatch) => {} + _ => bail!("should return MetadataMismatch"), + } + } + Ok(()) } #[test] - fn test_is_executable() { - let executable_path = PathBuf::from("/bin/sh"); - let directory_path = PathBuf::from("/tmp"); - // a file guaranteed to be on linux and not executable - let non_executable_path = PathBuf::from("/boot/initrd.img"); - let non_existent_path = PathBuf::from("/some/non/existent/path"); - - assert!(is_executable(&non_existent_path).is_err()); - assert!(is_executable(&executable_path).unwrap()); - assert!(!is_executable(&non_executable_path).unwrap()); - assert!(!is_executable(&directory_path).unwrap()); + fn test_io() -> Result<()> { + { + let tempdir = tempfile::tempdir()?; + let path = tempdir.path().join("test"); + write_file(&path, "test".as_bytes())?; + open(&path)?; + assert!(create_dir_all(path).is_err()); + } + { + let tempdir = tempfile::tempdir()?; + let path = tempdir.path().join("test"); + create_dir_all(&path)?; + assert!(write_file(&path, "test".as_bytes()).is_err()); + } + { + let tempdir = tempfile::tempdir()?; + let path = tempdir.path().join("test"); + assert!(open(&path).is_err()); + create_dir_all(&path)?; + assert!(path.is_dir()) + } + + Ok(()) } } diff --git a/crates/libcontainer/src/workload/default.rs b/crates/libcontainer/src/workload/default.rs index 0ab08ce0a..2beae5ce9 100644 --- a/crates/libcontainer/src/workload/default.rs +++ b/crates/libcontainer/src/workload/default.rs @@ -1,18 +1,15 @@ use std::ffi::CString; -use anyhow::{bail, Context, Result}; use nix::unistd; use oci_spec::runtime::Spec; -use super::{Executor, EMPTY}; +use super::{Executor, ExecutorError, EMPTY}; -const EXECUTOR_NAME: &str = "default"; - -pub struct DefaultExecutor {} - -impl Executor for DefaultExecutor { - fn exec(spec: &Spec) -> Result<()> { - log::debug!("Executing workload with default handler"); +/// Return the default executor. The default executor will execute the command +/// specified in the oci spec. +pub fn get_executor() -> Executor { + Box::new(|spec: &Spec| -> Result<(), ExecutorError> { + tracing::debug!("executing workload with default handler"); let args = spec .process() .as_ref() @@ -20,28 +17,26 @@ impl Executor for DefaultExecutor { .unwrap_or(&EMPTY); if args.is_empty() { - bail!("at least one process arg must be specified") + tracing::error!("no arguments provided to execute"); + Err(ExecutorError::InvalidArg)?; } let executable = args[0].as_str(); - let p = CString::new(executable.as_bytes()) - .with_context(|| format!("failed to convert path {:?} to cstring", executable))?; + let cstring_path = CString::new(executable.as_bytes()).map_err(|err| { + tracing::error!("failed to convert path {executable:?} to cstring: {}", err,); + ExecutorError::InvalidArg + })?; let a: Vec = args .iter() .map(|s| CString::new(s.as_bytes()).unwrap_or_default()) .collect(); - unistd::execvp(&p, &a)?; + unistd::execvp(&cstring_path, &a).map_err(|err| { + tracing::error!(?err, filename = ?cstring_path, args = ?a, "failed to execvp"); + ExecutorError::Execution(err.into()) + })?; - // After do_exec is called, the process is replaced with the container + // After execvp is called, the process is replaced with the container // payload through execvp, so it should never reach here. unreachable!(); - } - - fn can_handle(_: &Spec) -> Result { - Ok(true) - } - - fn name() -> &'static str { - EXECUTOR_NAME - } + }) } diff --git a/crates/libcontainer/src/workload/mod.rs b/crates/libcontainer/src/workload/mod.rs index 358bc93ac..bae41e04d 100644 --- a/crates/libcontainer/src/workload/mod.rs +++ b/crates/libcontainer/src/workload/mod.rs @@ -1,51 +1,19 @@ -use anyhow::{Context, Result}; use oci_spec::runtime::Spec; -use self::default::DefaultExecutor; -#[cfg(feature = "wasm-wasmedge")] -use self::wasmedge::WasmEdgeExecutor; -#[cfg(feature = "wasm-wasmer")] -use self::wasmer::WasmerExecutor; -#[cfg(feature = "wasm-wasmtime")] -use self::wasmtime::WasmtimeExecutor; - pub mod default; -#[cfg(feature = "wasm-wasmedge")] -pub mod wasmedge; -#[cfg(feature = "wasm-wasmer")] -pub mod wasmer; -#[cfg(feature = "wasm-wasmtime")] -pub mod wasmtime; - -static EMPTY: Vec = Vec::new(); -pub trait Executor { - /// Executes the workload - fn exec(spec: &Spec) -> Result<()>; - /// Checks if the handler is able to handle the workload - fn can_handle(spec: &Spec) -> Result; - /// The name of the handler - fn name() -> &'static str; +pub static EMPTY: Vec = Vec::new(); + +#[derive(Debug, thiserror::Error)] +pub enum ExecutorError { + #[error("invalid argument")] + InvalidArg, + #[error("failed to execute workload")] + Execution(#[from] Box), + #[error("{0}")] + Other(String), + #[error("{0} executor can't handle spec")] + CantHandle(&'static str), } -pub struct ExecutorManager {} - -impl ExecutorManager { - pub fn exec(spec: &Spec) -> Result<()> { - #[cfg(feature = "wasm-wasmer")] - if WasmerExecutor::can_handle(spec)? { - return WasmerExecutor::exec(spec).context("wasmer execution failed"); - } - #[cfg(feature = "wasm-wasmedge")] - if WasmEdgeExecutor::can_handle(spec)? { - return WasmEdgeExecutor::exec(spec).context("wasmedge execution failed"); - } - - #[cfg(feature = "wasm-wasmtime")] - if WasmtimeExecutor::can_handle(spec)? { - return WasmtimeExecutor::exec(spec).context("wasmtime execution failed"); - } - - DefaultExecutor::exec(spec).context("default execution failed") - } -} +pub type Executor = Box Result<(), ExecutorError>>; diff --git a/crates/libcontainer/src/workload/wasmedge.rs b/crates/libcontainer/src/workload/wasmedge.rs deleted file mode 100644 index 03957a6b6..000000000 --- a/crates/libcontainer/src/workload/wasmedge.rs +++ /dev/null @@ -1,90 +0,0 @@ -use anyhow::Result; -use oci_spec::runtime::Spec; -use wasmedge_sdk::{ - config::{CommonConfigOptions, ConfigBuilder, HostRegistrationConfigOptions}, - params, Vm, -}; - -use super::Executor; - -const EXECUTOR_NAME: &str = "wasmedge"; - -pub struct WasmEdgeExecutor {} -impl Executor for WasmEdgeExecutor { - fn exec(spec: &Spec) -> Result<()> { - // parse wasi parameters - let args = get_args(spec); - let mut cmd = args[0].clone(); - if let Some(stripped) = args[0].strip_prefix(std::path::MAIN_SEPARATOR) { - cmd = stripped.to_string(); - } - let envs = env_to_wasi(spec); - - // create configuration with `wasi` option enabled - let config = ConfigBuilder::new(CommonConfigOptions::default()) - .with_host_registration_config(HostRegistrationConfigOptions::default().wasi(true)) - .build()?; - - // create a vm with the config settings - let mut vm = Vm::new(Some(config))?; - - // initialize the wasi module with the parsed parameters - let mut wasi_instance = vm.wasi_module()?; - wasi_instance.initialize( - Some(args.iter().map(|s| s as &str).collect()), - Some(envs.iter().map(|s| s as &str).collect()), - None, - ); - - let mut vm = vm.register_module_from_file("main", cmd)?; - - let ins = vm.named_module("main")?; - ins.func("_start") - .expect("Not found '_start' func in the 'main' module instance") - .call(&mut vm, params!())?; - - Ok(()) - } - - fn can_handle(spec: &Spec) -> Result { - if let Some(annotations) = spec.annotations() { - if let Some(handler) = annotations.get("run.oci.handler") { - return Ok(handler == "wasm"); - } - - if let Some(variant) = annotations.get("module.wasm.image/variant") { - return Ok(variant == "compat"); - } - } - - Ok(false) - } - - fn name() -> &'static str { - EXECUTOR_NAME - } -} - -fn get_args(spec: &Spec) -> &[String] { - let p = match spec.process() { - None => return &[], - Some(p) => p, - }; - - match p.args() { - None => &[], - Some(args) => args.as_slice(), - } -} - -fn env_to_wasi(spec: &Spec) -> Vec { - let default = vec![]; - let env = spec - .process() - .as_ref() - .unwrap() - .env() - .as_ref() - .unwrap_or(&default); - env.to_vec() -} diff --git a/crates/libcontainer/src/workload/wasmer.rs b/crates/libcontainer/src/workload/wasmer.rs deleted file mode 100644 index 5901f45f9..000000000 --- a/crates/libcontainer/src/workload/wasmer.rs +++ /dev/null @@ -1,126 +0,0 @@ -use anyhow::{bail, Context, Result}; -use oci_spec::runtime::Spec; -use wasmer::{Instance, Module, Store}; -use wasmer_wasi::WasiState; - -use super::{Executor, EMPTY}; - -const EXECUTOR_NAME: &str = "wasmer"; - -pub struct WasmerExecutor {} - -impl Executor for WasmerExecutor { - fn exec(spec: &Spec) -> Result<()> { - log::debug!("Executing workload with wasmer handler"); - let process = spec.process().as_ref(); - - let args = process.and_then(|p| p.args().as_ref()).unwrap_or(&EMPTY); - let env = process - .and_then(|p| p.env().as_ref()) - .unwrap_or(&EMPTY) - .iter() - .filter_map(|e| { - e.split_once('=') - .filter(|kv| !kv.0.contains('\u{0}') && !kv.1.contains('\u{0}')) - .map(|kv| (kv.0.trim(), kv.1.trim())) - }); - - if args.is_empty() { - bail!("at least one process arg must be specified") - } - - if !args[0].ends_with(".wasm") && !args[0].ends_with(".wat") { - bail!( - "first argument must be a wasm or wat module, but was {}", - args[0] - ) - } - - let mut wasm_env = WasiState::new("youki_wasm_app") - .args(args.iter().skip(1)) - .envs(env) - .finalize()?; - - let store = Store::default(); - let module = Module::from_file(&store, &args[0]) - .with_context(|| format!("could not load wasm module from {}", &args[0]))?; - - let imports = wasm_env - .import_object(&module) - .context("could not retrieve wasm imports")?; - let instance = - Instance::new(&module, &imports).context("wasm module could not be instantiated")?; - - let start = instance - .exports - .get_function("_start") - .context("could not retrieve wasm module main function")?; - start - .call(&[]) - .context("wasm module was not executed successfully")?; - - Ok(()) - } - - fn can_handle(spec: &Spec) -> Result { - if let Some(annotations) = spec.annotations() { - if let Some(handler) = annotations.get("run.oci.handler") { - return Ok(handler == "wasm"); - } - - if let Some(variant) = annotations.get("module.wasm.image/variant") { - return Ok(variant == "compat"); - } - } - - Ok(false) - } - - fn name() -> &'static str { - EXECUTOR_NAME - } -} - -#[cfg(test)] -mod tests { - use super::*; - use oci_spec::runtime::SpecBuilder; - use std::collections::HashMap; - - #[test] - fn test_can_handle_oci_handler() -> Result<()> { - let mut annotations = HashMap::with_capacity(1); - annotations.insert("run.oci.handler".to_owned(), "wasm".to_owned()); - let spec = SpecBuilder::default() - .annotations(annotations) - .build() - .context("build spec")?; - - assert!(WasmerExecutor::can_handle(&spec).context("can handle")?); - - Ok(()) - } - - #[test] - fn test_can_handle_compat_wasm_spec() -> Result<()> { - let mut annotations = HashMap::with_capacity(1); - annotations.insert("module.wasm.image/variant".to_owned(), "compat".to_owned()); - let spec = SpecBuilder::default() - .annotations(annotations) - .build() - .context("build spec")?; - - assert!(WasmerExecutor::can_handle(&spec).context("can handle")?); - - Ok(()) - } - - #[test] - fn test_can_handle_no_execute() -> Result<()> { - let spec = SpecBuilder::default().build().context("build spec")?; - - assert!(!WasmerExecutor::can_handle(&spec).context("can handle")?); - - Ok(()) - } -} diff --git a/crates/libcontainer/src/workload/wasmtime.rs b/crates/libcontainer/src/workload/wasmtime.rs deleted file mode 100644 index e454f8ca4..000000000 --- a/crates/libcontainer/src/workload/wasmtime.rs +++ /dev/null @@ -1,96 +0,0 @@ -use anyhow::{anyhow, bail, Context, Result}; -use oci_spec::runtime::Spec; -use wasmtime::*; -use wasmtime_wasi::WasiCtxBuilder; - -use super::{Executor, EMPTY}; - -const EXECUTOR_NAME: &str = "wasmtime"; - -pub struct WasmtimeExecutor {} - -impl Executor for WasmtimeExecutor { - fn exec(spec: &Spec) -> Result<()> { - log::info!("Executing workload with wasmtime handler"); - let process = spec.process().as_ref(); - - let args = spec - .process() - .as_ref() - .and_then(|p| p.args().as_ref()) - .unwrap_or(&EMPTY); - if args.is_empty() { - bail!("at least one process arg must be specified") - } - - if !args[0].ends_with(".wasm") && !args[0].ends_with(".wat") { - bail!( - "first argument must be a wasm or wat module, but was {}", - args[0] - ) - } - - let mut cmd = args[0].clone(); - let stripped = args[0].strip_prefix(std::path::MAIN_SEPARATOR); - if let Some(cmd_stripped) = stripped { - cmd = cmd_stripped.to_string(); - } - - let envs: Vec<(String, String)> = process - .and_then(|p| p.env().as_ref()) - .unwrap_or(&EMPTY) - .iter() - .filter_map(|e| { - e.split_once('=') - .map(|kv| (kv.0.trim().to_string(), kv.1.trim().to_string())) - }) - .collect(); - - let engine = Engine::default(); - let module = Module::from_file(&engine, &cmd) - .with_context(|| format!("could not load wasm module from {}", &cmd))?; - - let mut linker = Linker::new(&engine); - wasmtime_wasi::add_to_linker(&mut linker, |s| s) - .context("cannot add wasi context to linker")?; - - let wasi = WasiCtxBuilder::new() - .inherit_stdio() - .args(args) - .context("cannot add args to wasi context")? - .envs(&envs) - .context("cannot add environment variables to wasi context")? - .build(); - - let mut store = Store::new(&engine, wasi); - - let instance = linker - .instantiate(&mut store, &module) - .context("wasm module could not be instantiated")?; - let start = instance - .get_func(&mut store, "_start") - .ok_or_else(|| anyhow!("could not retrieve wasm module main function"))?; - - start - .call(&mut store, &[], &mut []) - .context("wasm module was not executed successfully") - } - - fn can_handle(spec: &Spec) -> Result { - if let Some(annotations) = spec.annotations() { - if let Some(handler) = annotations.get("run.oci.handler") { - return Ok(handler == "wasm"); - } - - if let Some(variant) = annotations.get("module.wasm.image/variant") { - return Ok(variant == "compat"); - } - } - - Ok(false) - } - - fn name() -> &'static str { - EXECUTOR_NAME - } -} diff --git a/crates/liboci-cli/Cargo.toml b/crates/liboci-cli/Cargo.toml index 4ae88068b..3d5d3a240 100644 --- a/crates/liboci-cli/Cargo.toml +++ b/crates/liboci-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "liboci-cli" -version = "0.0.4" +version = "0.1.0" description = "Parse command line arguments for OCI container runtimes" license-file = "../../LICENSE" repository = "https://github.com/containers/youki" @@ -11,6 +11,6 @@ edition = "2021" keywords = ["youki", "container", "oci"] [dependencies.clap] -version = "4.0.32" +version = "4.1.6" default-features = false features = ["std", "suggestions", "derive", "cargo", "help", "usage", "error-context"] diff --git a/crates/liboci-cli/README.md b/crates/liboci-cli/README.md index ea5809ae9..bd0b5c2f6 100644 --- a/crates/liboci-cli/README.md +++ b/crates/liboci-cli/README.md @@ -16,6 +16,7 @@ Interface](https://github.com/opencontainers/runtime-tools/blob/master/docs/comm | checkpoint | | | โœ… | โœ… | | | events | โœ… | | โœ… | | โœ… | | exec | โœ… | | โœ… | โœ… | โœ… | +| features | โœ… | | โœ… | | | | list | โœ… | | โœ… | โœ… | โœ… | | pause | โœ… | | โœ… | โœ… | โœ… | | ps | โœ… | | โœ… | โœ… | โœ… | diff --git a/crates/liboci-cli/src/checkpoint.rs b/crates/liboci-cli/src/checkpoint.rs index 3e8f72309..3ba62932e 100644 --- a/crates/liboci-cli/src/checkpoint.rs +++ b/crates/liboci-cli/src/checkpoint.rs @@ -2,29 +2,55 @@ use clap::Parser; use std::path::PathBuf; /// Checkpoint a running container +/// Reference: https://github.com/opencontainers/runc/blob/main/man/runc-checkpoint.8.md #[derive(Parser, Debug)] pub struct Checkpoint { - #[clap(value_parser = clap::builder::NonEmptyStringValueParser::new(), required = true)] - pub container_id: String, - /// Allow external unix sockets - #[clap(long)] - pub ext_unix_sk: bool, - /// Allow file locks - #[clap(long)] - pub file_locks: bool, /// Path for saving criu image files #[clap(long, default_value = "checkpoint")] pub image_path: PathBuf, + /// Path for saving work files and logs + #[clap(long)] + pub work_path: Option, + /// Path for previous criu image file in pre-dump + #[clap(long)] + pub parent_path: Option, /// Leave the process running after checkpointing #[clap(long)] pub leave_running: bool, + /// Allow open tcp connections + #[clap(long)] + pub tcp_established: bool, + /// Allow external unix sockets + #[clap(long)] + pub ext_unix_sk: bool, /// Allow shell jobs #[clap(long)] pub shell_job: bool, - /// Allow open tcp connections + /// Use lazy migration mechanism #[clap(long)] - pub tcp_established: bool, - /// Path for saving work files and logs + pub lazy_pages: bool, + /// Pass a file descriptor fd to criu #[clap(long)] - pub work_path: Option, + pub status_fd: Option, // TODO: Is u32 the right type? + /// Start a page server at the given URL + #[clap(long)] + pub page_server: Option, + /// Allow file locks + #[clap(long)] + pub file_locks: bool, + /// Do a pre-dump + #[clap(long)] + pub pre_dump: bool, + /// Cgroups mode + #[clap(long)] + pub manage_cgroups_mode: Option, + /// Checkpoint a namespace, but don't save its properties + #[clap(long)] + pub empty_ns: bool, + /// Enable auto-deduplication + #[clap(long)] + pub auto_dedup: bool, + + #[clap(value_parser = clap::builder::NonEmptyStringValueParser::new(), required = true)] + pub container_id: String, } diff --git a/crates/liboci-cli/src/create.rs b/crates/liboci-cli/src/create.rs index c83d56960..a67879220 100644 --- a/crates/liboci-cli/src/create.rs +++ b/crates/liboci-cli/src/create.rs @@ -3,22 +3,30 @@ use clap::Parser; use std::path::PathBuf; /// Create a container +/// Reference: https://github.com/opencontainers/runc/blob/main/man/runc-create.8.md #[derive(Parser, Debug)] pub struct Create { - /// File to write pid of the container created - // note that in the end, container is just another process - #[clap(short, long)] - pub pid_file: Option, - /// path to the bundle directory, containing config.json and root filesystem + /// Path to the bundle directory, containing config.json and root filesystem #[clap(short, long, default_value = ".")] pub bundle: PathBuf, /// Unix socket (file) path , which will receive file descriptor of the writing end of the pseudoterminal #[clap(short, long)] pub console_socket: Option, + /// File to write pid of the container created + // note that in the end, container is just another process + #[clap(short, long)] + pub pid_file: Option, + /// Do not use pivot rool to jail process inside rootfs + #[clap(long)] + pub no_pivot: bool, + /// Do not create a new session keyring for the container. + #[clap(long)] + pub no_new_keyring: bool, /// Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total) #[clap(long, default_value = "0")] pub preserve_fds: i32, - /// name of the container instance to be started + + /// Name of the container instance to be started #[clap(value_parser = clap::builder::NonEmptyStringValueParser::new(), required = true)] pub container_id: String, } diff --git a/crates/liboci-cli/src/exec.rs b/crates/liboci-cli/src/exec.rs index f6b283e60..8212d2196 100644 --- a/crates/liboci-cli/src/exec.rs +++ b/crates/liboci-cli/src/exec.rs @@ -4,40 +4,67 @@ use std::path::PathBuf; use clap::Parser; /// Execute a process within an existing container +/// Reference: https://github.com/opencontainers/runc/blob/main/man/runc-exec.8.md #[derive(Parser, Debug)] pub struct Exec { /// Unix socket (file) path , which will receive file descriptor of the writing end of the pseudoterminal #[clap(long)] pub console_socket: Option, - #[clap(short, long)] - pub tty: bool, #[clap(long)] /// Current working directory of the container pub cwd: Option, - #[clap(long)] - /// The file to which the pid of the container process should be written to - pub pid_file: Option, /// Environment variables that should be set in the container - #[clap(short, long, value_parser = parse_key_val::, number_of_values = 1)] + #[clap(short, long, value_parser = parse_env::, number_of_values = 1)] pub env: Vec<(String, String)>, - /// Prevent the process from gaining additional privileges - #[clap(long)] - pub no_new_privs: bool, + #[clap(short, long)] + pub tty: bool, + /// Run the command as a user + #[clap(short, long, value_parser = parse_user::)] + pub user: Option<(u32, Option)>, + /// Add additional group IDs. Can be specified multiple times + #[clap(long, short = 'g', number_of_values = 1)] + pub additional_gids: Vec, /// Path to process.json #[clap(short, long)] pub process: Option, /// Detach from the container process #[clap(short, long)] pub detach: bool, + #[clap(long)] + /// The file to which the pid of the container process should be written to + pub pid_file: Option, + /// Set the asm process label for the process commonly used with selinux + #[clap(long)] + pub process_label: Option, + /// Set the apparmor profile for the process + #[clap(long)] + pub apparmor: Option, + /// Prevent the process from gaining additional privileges + #[clap(long)] + pub no_new_privs: bool, + /// Add a capability to the bounding set for the process + #[clap(long, number_of_values = 1)] + pub cap: Vec, + /// Pass N additional file descriptors to the container + #[clap(long, default_value = "0")] + pub preserve_fds: i32, + /// Allow exec in a paused container + #[clap(long)] + pub ignore_paused: bool, + /// Execute a process in a sub-cgroup + #[clap(long)] + pub cgroup: Option, + /// Identifier of the container #[clap(value_parser = clap::builder::NonEmptyStringValueParser::new(), required = true)] pub container_id: String, + /// Command that should be executed in the container #[clap(required = false)] pub command: Vec, } -fn parse_key_val(s: &str) -> Result<(T, U), Box> +fn parse_env(s: &str) -> Result<(T, U), Box> where T: std::str::FromStr, T::Err: Error + Send + Sync + 'static, @@ -46,6 +73,20 @@ where { let pos = s .find('=') - .ok_or_else(|| format!("invalid KEY=value: no `=` found in `{}`", s))?; + .ok_or_else(|| format!("invalid VAR=value: no `=` found in `{s}`"))?; Ok((s[..pos].parse()?, s[pos + 1..].parse()?)) } + +fn parse_user(s: &str) -> Result<(T, Option), Box> +where + T: std::str::FromStr, + T::Err: Error + Send + Sync + 'static, + U: std::str::FromStr, + U::Err: Error + Send + Sync + 'static, +{ + if let Some(pos) = s.find(':') { + Ok((s[..pos].parse()?, Some(s[pos + 1..].parse()?))) + } else { + Ok((s.parse()?, None)) + } +} diff --git a/crates/liboci-cli/src/features.rs b/crates/liboci-cli/src/features.rs new file mode 100644 index 000000000..384a2953d --- /dev/null +++ b/crates/liboci-cli/src/features.rs @@ -0,0 +1,9 @@ +use clap::Parser; + +/// Return the features list for a container +/// This subcommand was introduced in runc by +/// https://github.com/opencontainers/runc/pull/3296 +/// It is documented here: +/// https://github.com/opencontainers/runtime-spec/blob/main/features-linux.md +#[derive(Parser, Debug)] +pub struct Features {} diff --git a/crates/liboci-cli/src/lib.rs b/crates/liboci-cli/src/lib.rs index 48fb9d46d..89c48a6d4 100644 --- a/crates/liboci-cli/src/lib.rs +++ b/crates/liboci-cli/src/lib.rs @@ -17,6 +17,7 @@ pub use {create::Create, delete::Delete, kill::Kill, start::Start, state::State} mod checkpoint; mod events; mod exec; +mod features; mod list; mod pause; mod ps; @@ -26,8 +27,8 @@ mod spec; mod update; pub use { - checkpoint::Checkpoint, events::Events, exec::Exec, list::List, pause::Pause, ps::Ps, - resume::Resume, run::Run, spec::Spec, update::Update, + checkpoint::Checkpoint, events::Events, exec::Exec, features::Features, list::List, + pause::Pause, ps::Ps, resume::Resume, run::Run, spec::Spec, update::Update, }; // Subcommands parsed by liboci-cli, based on the [OCI @@ -52,6 +53,7 @@ pub enum CommonCmd { Checkpointt(Checkpoint), Events(Events), Exec(Exec), + Features(Features), List(List), Pause(Pause), #[clap(allow_hyphen_values = true)] @@ -66,12 +68,13 @@ pub enum CommonCmd { // flags, but these are commonly accepted by runtimes #[derive(Parser, Debug)] pub struct GlobalOpts { - /// change log level to debug. - // Example in future : '--debug change log level to debug. (default: "warn")' + /// set the log file to write youki logs to (default is '/dev/stderr') + #[clap(short, long, overrides_with("log"))] + pub log: Option, + /// change log level to debug, but the `log-level` flag takes precedence #[clap(long)] pub debug: bool, - #[clap(short, long)] - pub log: Option, + /// set the log format ('text' (default), or 'json') (default: "text") #[clap(long)] pub log_format: Option, /// root directory to store container state diff --git a/crates/liboci-cli/src/list.rs b/crates/liboci-cli/src/list.rs index 44a2ad2d1..acb49eb8a 100644 --- a/crates/liboci-cli/src/list.rs +++ b/crates/liboci-cli/src/list.rs @@ -2,4 +2,12 @@ use clap::Parser; /// List created containers #[derive(Parser, Debug)] -pub struct List {} +pub struct List { + /// Specify the format (default or table) + #[clap(long, default_value = "table")] + pub format: String, + + /// Only display container IDs + #[clap(long, short)] + pub quiet: bool, +} diff --git a/crates/liboci-cli/src/run.rs b/crates/liboci-cli/src/run.rs index b9d9c8de1..8b7281b8e 100644 --- a/crates/liboci-cli/src/run.rs +++ b/crates/liboci-cli/src/run.rs @@ -4,20 +4,35 @@ use std::path::PathBuf; /// Create a container and immediately start it #[derive(Parser, Debug)] pub struct Run { - /// File to write pid of the container created - // note that in the end, container is just another process - #[clap(short, long)] - pub pid_file: Option, - /// path to the bundle directory, containing config.json and root filesystem + /// Path to the bundle directory, containing config.json and root filesystem #[clap(short, long, default_value = ".")] pub bundle: PathBuf, /// Unix socket (file) path , which will receive file descriptor of the writing end of the pseudoterminal #[clap(short, long)] pub console_socket: Option, + /// File to write pid of the container created + // note that in the end, container is just another process + #[clap(short, long)] + pub pid_file: Option, + /// Disable the use of the subreaper used to reap reparented processes + #[clap(long)] + pub no_subreaper: bool, + /// Do not use pivot root to jail process inside rootfs + #[clap(long)] + pub no_pivot: bool, + /// Do not create a new session keyring for the container. This will cause the container to inherit the calling processes session key. + #[clap(long)] + pub no_new_keyring: bool, /// Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total) #[clap(long, default_value = "0")] pub preserve_fds: i32, + // Keep container's state directory and cgroup + #[clap(long)] + pub keep: bool, /// name of the container instance to be started #[clap(value_parser = clap::builder::NonEmptyStringValueParser::new(), required = true)] pub container_id: String, + /// Detach from the container process + #[clap(short, long)] + pub detach: bool, } diff --git a/crates/liboci-cli/src/spec.rs b/crates/liboci-cli/src/spec.rs index 6f685fcdd..d2f034f09 100644 --- a/crates/liboci-cli/src/spec.rs +++ b/crates/liboci-cli/src/spec.rs @@ -1,8 +1,13 @@ use clap::Parser; +use std::path::PathBuf; /// Command generates a config.json #[derive(Parser, Debug)] pub struct Spec { + /// Set path to the root of the bundle directory + #[clap(long, short)] + pub bundle: Option, + /// Generate a configuration for a rootless container #[clap(long)] pub rootless: bool, diff --git a/crates/liboci-cli/src/update.rs b/crates/liboci-cli/src/update.rs index b05f7b24a..eee356cb3 100644 --- a/crates/liboci-cli/src/update.rs +++ b/crates/liboci-cli/src/update.rs @@ -4,15 +4,67 @@ use std::path::PathBuf; /// Update running container resource constraints #[derive(Parser, Debug)] pub struct Update { - #[clap(value_parser = clap::builder::NonEmptyStringValueParser::new(), required = true)] - pub container_id: String, - /// Read the new resource limits from the given json file. Use - to read from stdin. /// If this option is used, all other options are ignored. #[clap(short, long)] pub resources: Option, + /// Set a new I/O weight + #[clap(long)] + pub blkio_weight: Option, + + /// Set CPU CFS period to be used for hardcapping (in microseconds) + #[clap(long)] + pub cpu_period: Option, + + /// Set CPU usage limit within a given period (in microseconds) + #[clap(long)] + pub cpu_quota: Option, + + /// Set CPU realtime period to be used for hardcapping (in microseconds) + #[clap(long)] + pub cpu_rt_period: Option, + + /// Set CPU realtime hardcap limit (in microseconds) + #[clap(long)] + pub cpu_rt_runtime: Option, + + /// Set CPU shares (relative weight vs. other containers) + #[clap(long)] + pub cpu_share: Option, + + /// Set CPU(s) to use. The list can contain commas and ranges. For example: 0-3,7 + #[clap(long)] + pub cpuset_cpus: Option, + + /// Set memory node(s) to use. The list format is the same as for --cpuset-cpus. + #[clap(long)] + pub cpuset_mems: Option, + + /// Set memory limit to num bytes. + #[clap(long)] + pub memory: Option, + + /// Set memory reservation (or soft limit) to num bytes. + #[clap(long)] + pub memory_reservation: Option, + + /// Set total memory + swap usage to num bytes. Use -1 to unset the limit (i.e. use unlimited swap). + #[clap(long)] + pub memory_swap: Option, + /// Set the maximum number of processes allowed in the container #[clap(long)] pub pids_limit: Option, + + /// Set the value for Intel RDT/CAT L3 cache schema. + #[clap(long)] + pub l3_cache_schema: Option, + + /// Set the Intel RDT/MBA memory bandwidth schema. + #[clap(long)] + pub mem_bw_schema: Option, + + #[clap(value_parser = clap::builder::NonEmptyStringValueParser::new(), required = true)] + pub container_id: String, } diff --git a/crates/youki/Cargo.toml b/crates/youki/Cargo.toml index 60c296dbb..fc4f6a4db 100644 --- a/crates/youki/Cargo.toml +++ b/crates/youki/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "youki" -version = "0.0.4" +version = "0.1.0" description = "A container runtime written in Rust" license-file = "../../LICENSE" repository = "https://github.com/containers/youki" @@ -16,36 +16,44 @@ systemd = ["libcgroups/systemd", "libcontainer/systemd", "v2"] v2 = ["libcgroups/v2", "libcontainer/v2"] v1 = ["libcgroups/v1", "libcontainer/v1"] cgroupsv2_devices = ["libcgroups/cgroupsv2_devices", "libcontainer/cgroupsv2_devices"] -wasm-wasmer = ["libcontainer/wasm-wasmer"] -wasm-wasmedge = ["libcontainer/wasm-wasmedge"] -wasm-wasmtime = ["libcontainer/wasm-wasmtime"] +wasm-wasmer = ["wasmer", "wasmer-wasix"] +wasm-wasmedge = ["wasmedge-sdk/standalone"] +wasm-wasmtime = ["wasmtime", "wasmtime-wasi"] [dependencies.clap] -version = "4.0.32" +version = "4.1.6" default-features = false features = ["std", "suggestions", "derive", "cargo", "help", "usage", "error-context"] [dependencies] -anyhow = "1.0.68" -chrono = { version = "0.4", features = ["serde"] } -libcgroups = { version = "0.0.4", path = "../libcgroups", default-features = false } -libcontainer = { version = "0.0.4", path = "../libcontainer", default-features = false } -liboci-cli = { version = "0.0.4", path = "../liboci-cli" } -log = { version = "0.4", features = ["std"] } -nix = "0.25.0" -oci-spec = { version = "^0.5.5", features = ["runtime"] } -once_cell = "1.17.0" +anyhow = "1.0.72" +chrono = { version = "0.4", default-features = false, features = ["clock", "serde"] } +libcgroups = { version = "0.1.0", path = "../libcgroups", default-features = false } +libcontainer = { version = "0.1.0", path = "../libcontainer", default-features = false } +liboci-cli = { version = "0.1.0", path = "../liboci-cli" } +nix = "0.26.2" +once_cell = "1.18.0" pentacle = "1.0.0" -procfs = "0.14.2" +procfs = "0.15.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tabwriter = "1" -clap_complete = "4.0.7" +clap_complete = "4.1.3" caps = "0.5.5" +wasmer = { version = "4.0.0", optional = true } +wasmer-wasix = { version = "0.9.0", optional = true } +wasmedge-sdk = { version = "0.9.0", optional = true } +wasmtime = {version = "10.0.1", optional = true } +wasmtime-wasi = {version = "10.0.1", optional = true } +tracing = { version = "0.1.37", features = ["attributes"]} +tracing-subscriber = { version = "0.3.16", features = ["json", "env-filter"] } +tracing-journald = "0.3.0" [dev-dependencies] -serial_test = "1.0.0" +serial_test = "2.0.0" +tempfile = "3" +scopeguard = "1.2.0" [build-dependencies] -anyhow = "1.0.68" -vergen = "7.5.0" +anyhow = "1.0.72" +vergen = {version ="8.2.4", features =["git","gitcl"]} diff --git a/crates/youki/build.rs b/crates/youki/build.rs index 48120b25a..f29fc540b 100644 --- a/crates/youki/build.rs +++ b/crates/youki/build.rs @@ -1,10 +1,17 @@ use anyhow::Result; -use vergen::{vergen, Config, ShaKind}; +use vergen::EmitBuilder; fn main() -> Result<()> { - let mut config = Config::default(); - *config.git_mut().sha_kind_mut() = ShaKind::Short; - *config.git_mut().skip_if_error_mut() = true; - println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT=unknown"); - vergen(config) + if EmitBuilder::builder() + .fail_on_error() + .git_sha(true) + .emit() + .is_err() + { + // currently we only inject git sha, so just this + // else we will need to think of more elegant way to check + // what failed, and what needs to be added + println!("cargo:rustc-env=VERGEN_GIT_SHA=unknown"); + } + Ok(()) } diff --git a/crates/youki/src/commands/checkpoint.rs b/crates/youki/src/commands/checkpoint.rs index 9b12ca239..eebe643bc 100644 --- a/crates/youki/src/commands/checkpoint.rs +++ b/crates/youki/src/commands/checkpoint.rs @@ -7,7 +7,7 @@ use anyhow::{Context, Result}; use liboci_cli::Checkpoint; pub fn checkpoint(args: Checkpoint, root_path: PathBuf) -> Result<()> { - log::debug!("start checkpointing container {}", args.container_id); + tracing::debug!("start checkpointing container {}", args.container_id); let mut container = load_container(root_path, &args.container_id)?; let opts = libcontainer::container::CheckpointOptions { ext_unix_sk: args.ext_unix_sk, diff --git a/crates/youki/src/commands/create.rs b/crates/youki/src/commands/create.rs index 7d6d37672..009a7854d 100644 --- a/crates/youki/src/commands/create.rs +++ b/crates/youki/src/commands/create.rs @@ -2,23 +2,27 @@ use anyhow::Result; use std::path::PathBuf; -use libcontainer::{container::builder::ContainerBuilder, syscall::syscall::create_syscall}; +use libcontainer::{container::builder::ContainerBuilder, syscall::syscall::SyscallType}; use liboci_cli::Create; +use crate::workload::executor::default_executor; + // One thing to note is that in the end, container is just another process in Linux // it has specific/different control group, namespace, using which program executing in it // can be given impression that is is running on a complete system, but on the system which // it is running, it is just another process, and has attributes such as pid, file descriptors, etc. // associated with it like any other process. pub fn create(args: Create, root_path: PathBuf, systemd_cgroup: bool) -> Result<()> { - let syscall = create_syscall(); - ContainerBuilder::new(args.container_id.clone(), syscall.as_ref()) + ContainerBuilder::new(args.container_id.clone(), SyscallType::default()) + .with_executor(default_executor()) .with_pid_file(args.pid_file.as_ref())? .with_console_socket(args.console_socket.as_ref()) .with_root_path(root_path)? .with_preserved_fds(args.preserve_fds) + .validate_id()? .as_init(&args.bundle) .with_systemd(systemd_cgroup) + .with_detach(true) .build()?; Ok(()) diff --git a/crates/youki/src/commands/delete.rs b/crates/youki/src/commands/delete.rs index 3097f3de7..28a6387db 100644 --- a/crates/youki/src/commands/delete.rs +++ b/crates/youki/src/commands/delete.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; use liboci_cli::Delete; pub fn delete(args: Delete, root_path: PathBuf) -> Result<()> { - log::debug!("start deleting {}", args.container_id); + tracing::debug!("start deleting {}", args.container_id); if !container_exists(&root_path, &args.container_id)? && args.force { return Ok(()); } diff --git a/crates/youki/src/commands/exec.rs b/crates/youki/src/commands/exec.rs index d8b83a0a9..859e88a76 100644 --- a/crates/youki/src/commands/exec.rs +++ b/crates/youki/src/commands/exec.rs @@ -2,15 +2,18 @@ use anyhow::Result; use nix::sys::wait::{waitpid, WaitStatus}; use std::path::PathBuf; -use libcontainer::{container::builder::ContainerBuilder, syscall::syscall::create_syscall}; +use libcontainer::{container::builder::ContainerBuilder, syscall::syscall::SyscallType}; use liboci_cli::Exec; +use crate::workload::executor::default_executor; + pub fn exec(args: Exec, root_path: PathBuf) -> Result { - let syscall = create_syscall(); - let pid = ContainerBuilder::new(args.container_id.clone(), syscall.as_ref()) + let pid = ContainerBuilder::new(args.container_id.clone(), SyscallType::default()) + .with_executor(default_executor()) .with_root_path(root_path)? .with_console_socket(args.console_socket.as_ref()) .with_pid_file(args.pid_file.as_ref())? + .validate_id()? .as_tenant() .with_detach(args.detach) .with_cwd(args.cwd.as_ref()) diff --git a/crates/youki/src/commands/features.rs b/crates/youki/src/commands/features.rs new file mode 100644 index 000000000..bf359bd0a --- /dev/null +++ b/crates/youki/src/commands/features.rs @@ -0,0 +1,8 @@ +//! Contains Functionality of `features` container command +use anyhow::Result; +use liboci_cli::Features; + +/// lists all existing containers +pub fn features(_: Features) -> Result<()> { + Ok(()) +} diff --git a/crates/youki/src/commands/info.rs b/crates/youki/src/commands/info.rs index f975d2953..47dcfab23 100644 --- a/crates/youki/src/commands/info.rs +++ b/crates/youki/src/commands/info.rs @@ -29,7 +29,7 @@ pub fn info(_: Info) -> Result<()> { /// print Version of Youki pub fn print_youki() { println!("{:<18}{}", "Version", env!("CARGO_PKG_VERSION")); - println!("{:<18}{}", "Commit", env!("VERGEN_GIT_SHA_SHORT")); + println!("{:<18}{}", "Commit", env!("VERGEN_GIT_SHA")); } /// Print Kernel Release, Version and Architecture @@ -140,7 +140,7 @@ pub fn print_cgroup_mounts() { v1_mounts.sort(); for cgroup_mount in v1_mounts { - println!("{}", cgroup_mount); + println!("{cgroup_mount}"); } } diff --git a/crates/youki/src/commands/kill.rs b/crates/youki/src/commands/kill.rs index e87a6266c..1d737ac57 100644 --- a/crates/youki/src/commands/kill.rs +++ b/crates/youki/src/commands/kill.rs @@ -1,7 +1,7 @@ //! Contains functionality of kill container command use std::{convert::TryInto, path::PathBuf}; -use anyhow::Result; +use anyhow::{anyhow, Result}; use crate::commands::load_container; use libcontainer::{container::ContainerStatus, signal::Signal}; @@ -15,9 +15,9 @@ pub fn kill(args: Kill, root_path: PathBuf) -> Result<()> { Err(e) => { // see https://github.com/containers/youki/issues/1314 if container.status() == ContainerStatus::Stopped { - return Err(e.context("container not running")); + return Err(anyhow!(e).context("container not running")); } - Err(e) + Err(anyhow!(e).context("failed to kill container")) } } } diff --git a/crates/youki/src/commands/list.rs b/crates/youki/src/commands/list.rs index c2906941e..ea3364e73 100644 --- a/crates/youki/src/commands/list.rs +++ b/crates/youki/src/commands/list.rs @@ -55,7 +55,7 @@ pub fn list(_: List, root_path: PathBuf) -> Result<()> { let mut tab_writer = TabWriter::new(io::stdout()); writeln!(&mut tab_writer, "ID\tPID\tSTATUS\tBUNDLE\tCREATED\tCREATOR")?; - write!(&mut tab_writer, "{}", content)?; + write!(&mut tab_writer, "{content}")?; tab_writer.flush()?; Ok(()) diff --git a/crates/youki/src/commands/mod.rs b/crates/youki/src/commands/mod.rs index 06a1046e4..d99a0ec57 100644 --- a/crates/youki/src/commands/mod.rs +++ b/crates/youki/src/commands/mod.rs @@ -4,7 +4,7 @@ use std::{ path::{Path, PathBuf}, }; -use libcgroups::common::CgroupManager; +use libcgroups::common::AnyCgroupManager; use libcontainer::container::Container; pub mod checkpoint; @@ -13,6 +13,7 @@ pub mod create; pub mod delete; pub mod events; pub mod exec; +pub mod features; pub mod info; pub mod kill; pub mod list; @@ -45,7 +46,7 @@ fn load_container>(root_path: P, container_id: &str) -> Result>(root_path: P, container_id: &str) -> Result { @@ -56,12 +57,13 @@ fn container_exists>(root_path: P, container_id: &str) -> Result< fn create_cgroup_manager>( root_path: P, container_id: &str, -) -> Result> { +) -> Result { let container = load_container(root_path, container_id)?; - let cgroups_path = container.spec()?.cgroup_path; - let systemd_cgroup = container - .systemd() - .context("could not determine cgroup manager")?; - - libcgroups::common::create_cgroup_manager(cgroups_path, systemd_cgroup, container.id()) + Ok(libcgroups::common::create_cgroup_manager( + libcgroups::common::CgroupConfig { + cgroup_path: container.spec()?.cgroup_path, + systemd_cgroup: container.systemd(), + container_name: container.id().to_string(), + }, + )?) } diff --git a/crates/youki/src/commands/pause.rs b/crates/youki/src/commands/pause.rs index be650d12d..be45a4d31 100644 --- a/crates/youki/src/commands/pause.rs +++ b/crates/youki/src/commands/pause.rs @@ -12,7 +12,7 @@ use liboci_cli::Pause; // https://man7.org/linux/man-pages/man7/cgroups.7.html // https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt pub fn pause(args: Pause, root_path: PathBuf) -> Result<()> { - log::debug!("start pausing container {}", args.container_id); + tracing::debug!("start pausing container {}", args.container_id); let mut container = load_container(root_path, &args.container_id)?; container .pause() diff --git a/crates/youki/src/commands/ps.rs b/crates/youki/src/commands/ps.rs index c8cbfb3ba..b51456c4a 100644 --- a/crates/youki/src/commands/ps.rs +++ b/crates/youki/src/commands/ps.rs @@ -1,5 +1,6 @@ use crate::commands::create_cgroup_manager; use anyhow::{bail, Result}; +use libcgroups::common::CgroupManager; use liboci_cli::Ps; use std::{path::PathBuf, process::Command}; @@ -36,7 +37,7 @@ pub fn ps(args: Ps, root_path: PathBuf) -> Result<()> { let fields: Vec<&str> = line.split_whitespace().collect(); let pid: i32 = fields[pid_index].parse()?; if pids.contains(&pid) { - println!("{}", line); + println!("{line}"); } } } diff --git a/crates/youki/src/commands/resume.rs b/crates/youki/src/commands/resume.rs index 3897a253c..21d59d8b4 100644 --- a/crates/youki/src/commands/resume.rs +++ b/crates/youki/src/commands/resume.rs @@ -13,7 +13,7 @@ use liboci_cli::Resume; // https://man7.org/linux/man-pages/man7/cgroups.7.html // https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt pub fn resume(args: Resume, root_path: PathBuf) -> Result<()> { - log::debug!("start resuming container {}", args.container_id); + tracing::debug!("start resuming container {}", args.container_id); let mut container = load_container(root_path, &args.container_id)?; container .resume() diff --git a/crates/youki/src/commands/run.rs b/crates/youki/src/commands/run.rs index ef74cd97d..5f0e33124 100644 --- a/crates/youki/src/commands/run.rs +++ b/crates/youki/src/commands/run.rs @@ -1,21 +1,223 @@ use std::path::PathBuf; use anyhow::{Context, Result}; -use libcontainer::{container::builder::ContainerBuilder, syscall::syscall::create_syscall}; +use libcontainer::{container::builder::ContainerBuilder, syscall::syscall::SyscallType}; use liboci_cli::Run; +use nix::{ + sys::{ + signal::{self, kill}, + signalfd::SigSet, + wait::{waitpid, WaitPidFlag, WaitStatus}, + }, + unistd::Pid, +}; -pub fn run(args: Run, root_path: PathBuf, systemd_cgroup: bool) -> Result<()> { - let syscall = create_syscall(); - let mut container = ContainerBuilder::new(args.container_id.clone(), syscall.as_ref()) +use crate::workload::executor::default_executor; + +pub fn run(args: Run, root_path: PathBuf, systemd_cgroup: bool) -> Result { + let mut container = ContainerBuilder::new(args.container_id.clone(), SyscallType::default()) + .with_executor(default_executor()) .with_pid_file(args.pid_file.as_ref())? .with_console_socket(args.console_socket.as_ref()) .with_root_path(root_path)? .with_preserved_fds(args.preserve_fds) + .validate_id()? .as_init(&args.bundle) .with_systemd(systemd_cgroup) + .with_detach(args.detach) .build()?; container .start() - .with_context(|| format!("failed to start container {}", args.container_id)) + .with_context(|| format!("failed to start container {}", args.container_id))?; + + if args.detach { + return Ok(0); + } + + // Using `debug_assert` here rather than returning an error because this is + // a invariant. The design when the code path arrives to this point, is that + // the container state must have recorded the container init pid. + debug_assert!( + container.pid().is_some(), + "expects a container init pid in the container state" + ); + let foreground_result = handle_foreground(container.pid().unwrap()); + // execute the destruction action after the container finishes running + container.delete(true)?; + // return result + foreground_result +} + +// handle_foreground will match the `runc` behavior running the foreground mode. +// The youki main process will wait and reap the container init process. The +// youki main process also forwards most of the signals to the container init +// process. +#[tracing::instrument(level = "trace")] +fn handle_foreground(init_pid: Pid) -> Result { + tracing::trace!("waiting for container init process to exit"); + // We mask all signals here and forward most of the signals to the container + // init process. + let signal_set = SigSet::all(); + signal_set + .thread_block() + .with_context(|| "failed to call pthread_sigmask")?; + loop { + match signal_set + .wait() + .with_context(|| "failed to call sigwait")? + { + signal::SIGCHLD => { + // Reap all child until either container init process exits or + // no more child to be reaped. Once the container init process + // exits we can then return. + tracing::trace!("reaping child processes"); + loop { + match waitpid(None, Some(WaitPidFlag::WNOHANG))? { + WaitStatus::Exited(pid, status) => { + if pid.eq(&init_pid) { + return Ok(status); + } + + // Else, some random child process exited, ignoring... + } + WaitStatus::Signaled(pid, signal, _) => { + if pid.eq(&init_pid) { + return Ok(signal as i32); + } + + // Else, some random child process exited, ignoring... + } + WaitStatus::StillAlive => { + // No more child to reap. + break; + } + _ => {} + } + } + } + signal::SIGURG => { + // In `runc`, SIGURG is used by go runtime and should not be forwarded to + // the container process. Here, we just ignore the signal. + } + signal::SIGWINCH => { + // TODO: resize the terminal + } + signal => { + tracing::trace!(?signal, "forwarding signal"); + // There is nothing we can do if we fail to forward the signal. + let _ = kill(init_pid, Some(signal)).map_err(|err| { + tracing::warn!( + ?err, + ?signal, + "failed to forward signal to container init process", + ); + }); + } + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use nix::{ + sys::{signal::Signal::SIGINT, wait}, + unistd, + }; + + use super::*; + + #[test] + fn test_foreground_forward_sig() -> Result<()> { + // To set up the test correctly, we need to run the test in dedicated + // process, so the rust unit test runtime and other unit tests will not + // mess with the signal handling. We use `sigkill` as a simple way to + // make sure the signal is properly forwarded. In this test, P0 is the + // rust process that runs this unit test (in a thread). P1 mocks youki + // main and P2 mocks the container init process + match unsafe { unistd::fork()? } { + unistd::ForkResult::Parent { child } => { + // Inside P0 + // + // We need to make sure that the child process has entered into + // the signal forwarding loops. There is no way to 100% sync + // that the child has executed the for loop waiting to forward + // the signal. There are sync mechanisms with condvar or + // channels to make it as close to calling the handle_foreground + // function as possible, but still have a tiny (highly unlikely + // but probable) window that a race can still happen. So instead + // we just wait for 1 second for everything to settle. In + // general, I don't like sleep in tests to avoid race condition, + // but I'd rather not over-engineer this now. We can revisit + // this later if the test becomes flaky. + std::thread::sleep(Duration::from_secs(1)); + // Send the `sigint` signal to P1 who will forward the signal + // to P2. P2 will then exit and send a sigchld to P1. P1 will + // then reap P2 and exits. In P0, we can then reap P1. + kill(child, SIGINT)?; + wait::waitpid(child, None)?; + } + unistd::ForkResult::Child => { + // Inside P1. Fork P2 as mock container init process and run + // signal handler process inside. + match unsafe { unistd::fork()? } { + unistd::ForkResult::Parent { child } => { + // Inside P1. + let _ = handle_foreground(child).map_err(|err| { + // Since we are in a child process, we want to use trace to log the error. + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); + tracing::error!(?err, "failed to handle foreground"); + err + }); + std::process::exit(0); + } + unistd::ForkResult::Child => { + let mut signal_set = SigSet::empty(); + signal_set.add(SIGINT); + signal_set.thread_block()?; + signal_set.wait()?; + std::process::exit(0); + } + }; + } + }; + + Ok(()) + } + + #[test] + fn test_foreground_exit() -> Result<()> { + // The setup is similar to `handle_foreground`, but instead of + // forwarding signal, the container init process will exit. Again, we + // use `sleep` to simulate the conditions to avoid fine grained + // synchronization for now. + match unsafe { unistd::fork()? } { + unistd::ForkResult::Parent { child } => { + // Inside P0 + std::thread::sleep(Duration::from_secs(1)); + wait::waitpid(child, None)?; + } + unistd::ForkResult::Child => { + // Inside P1. Fork P2 as mock container init process and run + // signal handler process inside. + match unsafe { unistd::fork()? } { + unistd::ForkResult::Parent { child } => { + // Inside P1. + handle_foreground(child)?; + wait::waitpid(child, None)?; + } + unistd::ForkResult::Child => { + // Inside P2. The process exits after 1 second. + std::thread::sleep(Duration::from_secs(1)); + } + }; + } + }; + + Ok(()) + } } diff --git a/crates/youki/src/commands/spec_json.rs b/crates/youki/src/commands/spec_json.rs index 327d2a17f..b8d787c85 100644 --- a/crates/youki/src/commands/spec_json.rs +++ b/crates/youki/src/commands/spec_json.rs @@ -1,12 +1,13 @@ use anyhow::Result; -use nix; -use oci_spec::runtime::Mount; -use oci_spec::runtime::{ +use libcontainer::oci_spec::runtime::Mount; +use libcontainer::oci_spec::runtime::{ LinuxBuilder, LinuxIdMappingBuilder, LinuxNamespace, LinuxNamespaceBuilder, LinuxNamespaceType, Spec, }; +use nix; use serde_json::to_writer_pretty; use std::fs::File; +use std::io::{BufWriter, Write}; use std::path::Path; use std::path::PathBuf; @@ -16,12 +17,13 @@ pub fn get_default() -> Result { pub fn get_rootless() -> Result { // Remove network and user namespace from the default spec - let mut namespaces: Vec = oci_spec::runtime::get_default_namespaces() - .into_iter() - .filter(|ns| { - ns.typ() != LinuxNamespaceType::Network && ns.typ() != LinuxNamespaceType::User - }) - .collect(); + let mut namespaces: Vec = + libcontainer::oci_spec::runtime::get_default_namespaces() + .into_iter() + .filter(|ns| { + ns.typ() != LinuxNamespaceType::Network && ns.typ() != LinuxNamespaceType::User + }) + .collect(); // Add user namespace namespaces.push( @@ -49,7 +51,7 @@ pub fn get_rootless() -> Result { // Prepare the mounts - let mut mounts: Vec = oci_spec::runtime::get_default_mounts(); + let mut mounts: Vec = libcontainer::oci_spec::runtime::get_default_mounts(); for mount in &mut mounts { if mount.destination().eq(Path::new("/sys")) { mount @@ -89,7 +91,10 @@ pub fn spec(args: liboci_cli::Spec) -> Result<()> { }; // write data to config.json - to_writer_pretty(&File::create("config.json")?, &spec)?; + let file = File::create("config.json")?; + let mut writer = BufWriter::new(file); + to_writer_pretty(&mut writer, &spec)?; + writer.flush()?; Ok(()) } @@ -97,16 +102,18 @@ pub fn spec(args: liboci_cli::Spec) -> Result<()> { // Tests become unstable if not serial. The cause is not known. mod tests { use super::*; - use libcontainer::utils::create_temp_dir; use serial_test::serial; #[test] #[serial] fn test_spec_json() -> Result<()> { let spec = get_rootless()?; - let tmpdir = create_temp_dir("test_spec_json").expect("failed to create temp dir"); + let tmpdir = tempfile::tempdir().expect("failed to create temp dir"); let path = tmpdir.path().join("config.json"); - to_writer_pretty(&File::create(path)?, &spec)?; + let file = File::create(path)?; + let mut writer = BufWriter::new(file); + to_writer_pretty(&mut writer, &spec)?; + writer.flush()?; Ok(()) } } diff --git a/crates/youki/src/commands/update.rs b/crates/youki/src/commands/update.rs index b8a7d823d..c01dabf80 100644 --- a/crates/youki/src/commands/update.rs +++ b/crates/youki/src/commands/update.rs @@ -4,9 +4,10 @@ use std::path::PathBuf; use crate::commands::create_cgroup_manager; use anyhow::Result; +use libcgroups::common::CgroupManager; use libcgroups::{self, common::ControllerOpt}; +use libcontainer::oci_spec::runtime::{LinuxPidsBuilder, LinuxResources, LinuxResourcesBuilder}; use liboci_cli::Update; -use oci_spec::runtime::{LinuxPidsBuilder, LinuxResources, LinuxResourcesBuilder}; pub fn update(args: Update, root_path: PathBuf) -> Result<()> { let cmanager = create_cgroup_manager(root_path, &args.container_id)?; @@ -16,7 +17,9 @@ pub fn update(args: Update, root_path: PathBuf) -> Result<()> { linux_res = if resources_path.to_string_lossy() == "-" { serde_json::from_reader(io::stdin())? } else { - serde_json::from_reader(fs::File::open(resources_path)?)? + let file = fs::File::open(resources_path)?; + let reader = io::BufReader::new(file); + serde_json::from_reader(reader)? }; } else { let mut builder = LinuxResourcesBuilder::default(); diff --git a/crates/youki/src/logger.rs b/crates/youki/src/logger.rs deleted file mode 100644 index 06caac2c1..000000000 --- a/crates/youki/src/logger.rs +++ /dev/null @@ -1,240 +0,0 @@ -//! Default Youki Logger - -use anyhow::{bail, Context, Result}; -use log::{LevelFilter, Log, Metadata, Record}; -use once_cell::sync::OnceCell; -use std::borrow::Cow; -use std::fs::{File, OpenOptions}; -use std::io::{stderr, Write}; -use std::path::PathBuf; -use std::str::FromStr; - -pub static LOG_FILE: OnceCell> = OnceCell::new(); -const LOG_LEVEL_ENV_NAME: &str = "YOUKI_LOG_LEVEL"; -const LOG_FORMAT_TEXT: &str = "text"; -const LOG_FORMAT_JSON: &str = "json"; -enum LogFormat { - Text, - Json, -} - -/// If in debug mode, default level is debug to get maximum logging -#[cfg(debug_assertions)] -const DEFAULT_LOG_LEVEL: &str = "debug"; - -/// If not in debug mode, default level is warn to get important logs -#[cfg(not(debug_assertions))] -const DEFAULT_LOG_LEVEL: &str = "warn"; - -/// Initialize the logger, must be called before accessing the logger -/// Multiple parts might call this at once, but the actual initialization -/// is done only once due to use of OnceCell -pub fn init( - log_debug_flag: bool, - log_file: Option, - log_format: Option, -) -> Result<()> { - let level = detect_log_level(log_debug_flag).context("failed to parse log level")?; - let format = detect_log_format(log_format).context("failed to detect log format")?; - let _ = LOG_FILE.get_or_init(|| -> Option { - log_file.map(|path| { - OpenOptions::new() - .create(true) - .write(true) - .truncate(false) - .open(path) - .expect("failed opening log file") - }) - }); - - let logger = YoukiLogger::new(level.to_level(), format); - log::set_boxed_logger(Box::new(logger)) - .map(|()| log::set_max_level(level)) - .expect("set logger failed"); - - Ok(()) -} - -fn detect_log_format(log_format: Option) -> Result { - match log_format.as_deref() { - None | Some(LOG_FORMAT_TEXT) => Ok(LogFormat::Text), - Some(LOG_FORMAT_JSON) => Ok(LogFormat::Json), - Some(unknown) => bail!("unknown log format: {}", unknown), - } -} - -fn detect_log_level(is_debug: bool) -> Result { - let filter: Cow = if is_debug { - "debug".into() - } else if let Ok(level) = std::env::var(LOG_LEVEL_ENV_NAME) { - level.into() - } else { - DEFAULT_LOG_LEVEL.into() - }; - Ok(LevelFilter::from_str(filter.as_ref())?) -} - -struct YoukiLogger { - /// Indicates level up to which logs are to be printed - level: Option, - format: LogFormat, -} - -impl YoukiLogger { - /// Create new logger - pub fn new(level: Option, format: LogFormat) -> Self { - Self { level, format } - } -} - -/// Implements Log interface given by log crate, so we can use its functionality -impl Log for YoukiLogger { - /// Check if level of given log is enabled or not - fn enabled(&self, metadata: &Metadata) -> bool { - if let Some(level) = self.level { - metadata.level() <= level - } else { - false - } - } - - /// Function to carry out logging - fn log(&self, record: &Record) { - if self.enabled(record.metadata()) { - let log_msg = match self.format { - LogFormat::Text => text_format(record), - LogFormat::Json => json_format(record), - }; - // if log file is set, write to it, else write to stderr - if let Some(mut log_file) = LOG_FILE.get().unwrap().as_ref() { - let _ = writeln!(log_file, "{}", log_msg); - } else { - let _ = writeln!(stderr(), "{}", log_msg); - } - } - } - - /// Flush logs to file - fn flush(&self) { - if let Some(mut log_file) = LOG_FILE.get().unwrap().as_ref() { - log_file.flush().expect("failed to flush"); - } else { - stderr().flush().expect("failed to flush"); - } - } -} - -fn json_format(record: &log::Record) -> String { - serde_json::to_string(&serde_json::json!({ - "level": record.level().to_string(), - "time": chrono::Local::now().to_rfc3339(), - "message": record.args(), - })) - .expect("serde::to_string with string keys will not fail") -} - -fn text_format(record: &log::Record) -> String { - let log_msg = match (record.file(), record.line()) { - (Some(file), Some(line)) => format!( - "[{} {}:{}] {} {}\r", - record.level(), - file, - line, - chrono::Local::now().to_rfc3339(), - record.args() - ), - (_, _) => format!( - "[{}] {} {}\r", - record.level(), - chrono::Local::now().to_rfc3339(), - record.args() - ), - }; - - log_msg -} - -#[cfg(test)] -mod tests { - use serial_test::serial; - - use super::*; - use libcontainer::utils::create_temp_dir; - use std::{env, path::Path}; - - struct LogLevelGuard { - original_level: Option, - } - - impl LogLevelGuard { - fn new(level: &str) -> Result { - let original_level = env::var(LOG_LEVEL_ENV_NAME).ok(); - env::set_var(LOG_LEVEL_ENV_NAME, level); - Ok(Self { original_level }) - } - } - - impl Drop for LogLevelGuard { - fn drop(self: &mut LogLevelGuard) { - if let Some(level) = self.original_level.as_ref() { - env::set_var(LOG_LEVEL_ENV_NAME, level); - } else { - env::remove_var(LOG_LEVEL_ENV_NAME); - } - } - } - - #[test] - fn test_detect_log_level_is_debug() { - let _guard = LogLevelGuard::new("error").unwrap(); - assert_eq!(detect_log_level(true).unwrap(), LevelFilter::Debug) - } - - #[test] - #[serial] - fn test_detect_log_level_default() { - let _guard = LogLevelGuard::new("error").unwrap(); - env::remove_var(LOG_LEVEL_ENV_NAME); - if cfg!(debug_assertions) { - assert_eq!(detect_log_level(false).unwrap(), LevelFilter::Debug) - } else { - assert_eq!(detect_log_level(false).unwrap(), LevelFilter::Warn) - } - } - - #[test] - #[serial] - fn test_detect_log_level_from_env() { - let _guard = LogLevelGuard::new("error").unwrap(); - assert_eq!(detect_log_level(false).unwrap(), LevelFilter::Error) - } - - #[test] - fn test_logfile() { - let temp_dir = create_temp_dir("logfile").expect("failed to create tempdir for logfile"); - let log_file = Path::join(temp_dir.path(), "test.log"); - - init(true, Some(log_file.to_owned()), None).expect("failed to initialize logger"); - assert!( - log_file - .as_path() - .metadata() - .expect("failed to get logfile metadata") - .len() - == 0, - "a new logfile should be empty" - ); - - log::info!("testing this"); - - assert!( - log_file - .as_path() - .metadata() - .expect("failed to get logfile metadata") - .len() - > 0, - "some log should be written into the logfile" - ); - } -} diff --git a/crates/youki/src/main.rs b/crates/youki/src/main.rs index 8b73404bb..6a92be8d0 100644 --- a/crates/youki/src/main.rs +++ b/crates/youki/src/main.rs @@ -2,25 +2,30 @@ //! Container Runtime written in Rust, inspired by [railcar](https://github.com/oracle/railcar) //! This crate provides a container runtime which can be used by a high-level container runtime to run containers. mod commands; -mod logger; +mod observability; +mod rootpath; +mod workload; -use anyhow::bail; use anyhow::Context; use anyhow::Result; use clap::CommandFactory; use clap::{crate_version, Parser}; -use nix::libc; -use std::fs; -use std::path::{Path, PathBuf}; use crate::commands::info; -use libcontainer::rootless::rootless_required; -use libcontainer::utils::create_dir_all_with_mode; -use nix::sys::stat::Mode; -use nix::unistd::getuid; use liboci_cli::{CommonCmd, GlobalOpts, StandardCmd}; +// Additional options that are not defined in OCI runtime-spec, but are used by Youki. +#[derive(Parser, Debug)] +struct YoukiExtendOpts { + /// Enable logging to systemd-journald + #[clap(long)] + pub systemd_log: bool, + /// set the log level (default is 'error') + #[clap(long)] + pub log_level: Option, +} + // High-level commandline option definition // This takes global options as well as individual commands as specified in [OCI runtime-spec](https://github.com/opencontainers/runtime-spec/blob/master/runtime.md) // Also check [runc commandline documentation](https://github.com/opencontainers/runc/blob/master/man/runc.8.md) for more explanation @@ -30,6 +35,9 @@ struct Opts { #[clap(flatten)] global: GlobalOpts, + #[clap(flatten)] + youki_extend: YoukiExtendOpts, + #[clap(subcommand)] subcmd: SubCommand, } @@ -40,9 +48,9 @@ struct Opts { enum SubCommand { // Standard and common commands handled by the liboci_cli crate #[clap(flatten)] - Standard(liboci_cli::StandardCmd), + Standard(Box), #[clap(flatten)] - Common(liboci_cli::CommonCmd), + Common(Box), // Youki specific extensions Info(info::Info), @@ -61,7 +69,7 @@ macro_rules! youki_version { "\ncommit: ", crate_version!(), "-0-", - env!("VERGEN_GIT_SHA_SHORT") + env!("VERGEN_GIT_SHA") ) }; } @@ -84,21 +92,21 @@ fn main() -> Result<()> { let opts = Opts::parse(); let mut app = Opts::command(); - if let Err(e) = crate::logger::init(opts.global.debug, opts.global.log, opts.global.log_format) - { - eprintln!("log init failed: {:?}", e); - } + crate::observability::init(&opts).map_err(|err| { + eprintln!("failed to initialize observability: {}", err); + err + })?; - log::debug!( + tracing::debug!( "started by user {} with {:?}", nix::unistd::geteuid(), std::env::args_os() ); - let root_path = determine_root_path(opts.global.root)?; + let root_path = rootpath::determine(opts.global.root)?; let systemd_cgroup = opts.global.systemd_cgroup; let cmd_result = match opts.subcmd { - SubCommand::Standard(cmd) => match cmd { + SubCommand::Standard(cmd) => match *cmd { StandardCmd::Create(create) => { commands::create::create(create, root_path, systemd_cgroup) } @@ -107,7 +115,7 @@ fn main() -> Result<()> { StandardCmd::Delete(delete) => commands::delete::delete(delete, root_path), StandardCmd::State(state) => commands::state::state(state, root_path), }, - SubCommand::Common(cmd) => match cmd { + SubCommand::Common(cmd) => match *cmd { CommonCmd::Checkpointt(checkpoint) => { commands::checkpoint::checkpoint(checkpoint, root_path) } @@ -115,15 +123,22 @@ fn main() -> Result<()> { CommonCmd::Exec(exec) => match commands::exec::exec(exec, root_path) { Ok(exit_code) => std::process::exit(exit_code), Err(e) => { - eprintln!("exec failed : {}", e); + eprintln!("exec failed : {e}"); std::process::exit(-1); } }, + CommonCmd::Features(features) => commands::features::features(features), CommonCmd::List(list) => commands::list::list(list, root_path), CommonCmd::Pause(pause) => commands::pause::pause(pause, root_path), CommonCmd::Ps(ps) => commands::ps::ps(ps, root_path), CommonCmd::Resume(resume) => commands::resume::resume(resume, root_path), - CommonCmd::Run(run) => commands::run::run(run, root_path, systemd_cgroup), + CommonCmd::Run(run) => match commands::run::run(run, root_path, systemd_cgroup) { + Ok(exit_code) => std::process::exit(exit_code), + Err(e) => { + eprintln!("run failed : {e}"); + std::process::exit(-1); + } + }, CommonCmd::Spec(spec) => commands::spec_json::spec(spec), CommonCmd::Update(update) => commands::update::update(update, root_path), }, @@ -135,200 +150,7 @@ fn main() -> Result<()> { }; if let Err(ref e) = cmd_result { - log::error!("error in executing command: {:?}", e); + tracing::error!("error in executing command: {:?}", e); } cmd_result } - -fn determine_root_path(root_path: Option) -> Result { - let uid = getuid().as_raw(); - - if let Some(path) = root_path { - if !path.exists() { - create_dir_all_with_mode(&path, uid, Mode::S_IRWXU)?; - } - let path = path.canonicalize()?; - return Ok(path); - } - - if !rootless_required() { - let path = get_default_not_rootless_path(); - create_dir_all_with_mode(&path, uid, Mode::S_IRWXU)?; - return Ok(path); - } - - // see https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html - if let Ok(path) = std::env::var("XDG_RUNTIME_DIR") { - let path = Path::new(&path).join("youki"); - if create_dir_all_with_mode(&path, uid, Mode::S_IRWXU).is_ok() { - return Ok(path); - } - } - - // XDG_RUNTIME_DIR is not set, try the usual location - let path = get_default_rootless_path(uid); - if create_dir_all_with_mode(&path, uid, Mode::S_IRWXU).is_ok() { - return Ok(path); - } - - if let Ok(path) = std::env::var("HOME") { - if let Ok(resolved) = fs::canonicalize(path) { - let run_dir = resolved.join(".youki/run"); - if create_dir_all_with_mode(&run_dir, uid, Mode::S_IRWXU).is_ok() { - return Ok(run_dir); - } - } - } - - let tmp_dir = PathBuf::from(format!("/tmp/youki-{}", uid)); - if create_dir_all_with_mode(&tmp_dir, uid, Mode::S_IRWXU).is_ok() { - return Ok(tmp_dir); - } - - bail!("could not find a storage location with suitable permissions for the current user"); -} - -#[cfg(not(test))] -fn get_default_not_rootless_path() -> PathBuf { - PathBuf::from("/run/youki") -} - -#[cfg(test)] -fn get_default_not_rootless_path() -> PathBuf { - libcontainer::utils::get_temp_dir_path("default_youki_path") -} - -#[cfg(not(test))] -fn get_default_rootless_path(uid: libc::uid_t) -> PathBuf { - PathBuf::from(format!("/run/user/{}/youki", uid)) -} - -#[cfg(test)] -fn get_default_rootless_path(uid: libc::uid_t) -> PathBuf { - libcontainer::utils::get_temp_dir_path(format!("default_rootless_youki_path_{}", uid).as_str()) -} - -#[cfg(test)] -mod tests { - use crate::determine_root_path; - use anyhow::{Context, Result}; - use libcontainer::utils::{get_temp_dir_path, TempDir}; - use nix::sys::stat::Mode; - use nix::unistd::getuid; - use std::fs; - use std::fs::Permissions; - use std::os::unix::fs::PermissionsExt; - use std::path::{Path, PathBuf}; - - #[test] - fn test_determine_root_path_use_specified_by_user() -> Result<()> { - // Create directory if it does not exist and return absolute path. - let specified_path = get_temp_dir_path("provided_path"); - // Make sure directory does not exist. - remove_dir(&specified_path)?; - let non_abs_path = specified_path.join("../provided_path"); - let path = determine_root_path(Some(non_abs_path)).context("failed with specified path")?; - assert_eq!(path, specified_path); - - // Return absolute path if directory exists. - let specified_path = get_temp_dir_path("provided_path2"); - let _temp_dir = TempDir::new(&specified_path).context("failed to create temp dir")?; - let non_abs_path = specified_path.join("../provided_path2"); - let path = determine_root_path(Some(non_abs_path)).context("failed with specified path")?; - assert_eq!(path, specified_path); - - Ok(()) - } - - #[test] - fn test_determine_root_path_non_rootless() -> Result<()> { - // If we do not have root privileges skip the test as it will not succeed. - if !getuid().is_root() { - return Ok(()); - } - - let expected_path = get_temp_dir_path("default_youki_path"); - - let path = determine_root_path(None).context("failed with default non rootless path")?; - assert_eq!(path, expected_path); - assert!(path.exists()); - - fs::remove_dir(&expected_path).context("failed to remove dir")?; - - // Setup TempDir with invalid permissions so it is cleaned up after test. - let _temp_dir = TempDir::new(&expected_path).context("failed to create temp dir")?; - fs::set_permissions(&expected_path, Permissions::from_mode(Mode::S_IRUSR.bits())) - .context("failed to set invalid permissions")?; - - assert!(determine_root_path(None).is_err()); - - Ok(()) - } - - #[test] - fn test_determine_root_path_rootless() -> Result<()> { - std::env::set_var("YOUKI_USE_ROOTLESS", "true"); - - // XDG_RUNTIME_DIR - let xdg_dir = get_temp_dir_path("xdg_runtime"); - std::env::set_var("XDG_RUNTIME_DIR", &xdg_dir); - let path = determine_root_path(None).context("failed with $XDG_RUNTIME_DIR path")?; - assert_eq!(path, xdg_dir.join("youki")); - assert!(path.exists()); - - std::env::remove_var("XDG_RUNTIME_DIR"); - - // Default rootless location - let uid = getuid().as_raw(); - let default_rootless_path = - get_temp_dir_path(format!("default_rootless_youki_path_{}", uid).as_str()); - // Create temp dir so it gets cleaned up. This is needed as we later switch permissions of this directory. - let _temp_dir = - TempDir::new(&default_rootless_path).context("failed to create temp dir")?; - let path = determine_root_path(None).context("failed with default rootless path")?; - assert_eq!(path, default_rootless_path); - assert!(path.exists()); - - // Set invalid permissions to default rootless path so that it fails for the next test. - fs::set_permissions( - default_rootless_path, - Permissions::from_mode(Mode::S_IRUSR.bits()), - ) - .context("failed to set invalid permissions")?; - - // Use HOME env var - let home_path = get_temp_dir_path("youki_home"); - fs::create_dir_all(&home_path).context("failed to create fake home path")?; - std::env::set_var("HOME", &home_path); - let path = determine_root_path(None).context("failed with $HOME path")?; - assert_eq!(path, home_path.join(".youki/run")); - assert!(path.exists()); - - std::env::remove_var("HOME"); - - // Use temp dir - let expected_temp_path = PathBuf::from(format!("/tmp/youki-{}", uid)); - // Create temp dir so it gets cleaned up. This is needed as we later switch permissions of this directory. - let _temp_dir = TempDir::new(&expected_temp_path).context("failed to create temp dir")?; - let path = determine_root_path(None).context("failed with temp path")?; - assert_eq!(path, expected_temp_path); - - // Set invalid permissions to temp path so determine_root_path fails. - fs::set_permissions( - expected_temp_path, - Permissions::from_mode(Mode::S_IRUSR.bits()), - ) - .context("failed to set invalid permissions")?; - - assert!(determine_root_path(None).is_err()); - - Ok(()) - } - - fn remove_dir(path: &Path) -> Result<()> { - if path.exists() { - fs::remove_dir(path).context("failed to remove directory")?; - } - Ok(()) - } -} diff --git a/crates/youki/src/observability.rs b/crates/youki/src/observability.rs new file mode 100644 index 000000000..a9bfd57dd --- /dev/null +++ b/crates/youki/src/observability.rs @@ -0,0 +1,295 @@ +use anyhow::{bail, Context, Result}; +use std::borrow::Cow; +use std::fs::OpenOptions; +use std::path::PathBuf; +use std::str::FromStr; +use tracing::Level; +use tracing_subscriber::prelude::*; + +const LOG_FORMAT_TEXT: &str = "text"; +const LOG_FORMAT_JSON: &str = "json"; +enum LogFormat { + Text, + Json, +} + +/// If in debug mode, default level is debug to get maximum logging +#[cfg(debug_assertions)] +const DEFAULT_LOG_LEVEL: &str = "debug"; + +/// If not in debug mode, default level is warn to get important logs +#[cfg(not(debug_assertions))] +const DEFAULT_LOG_LEVEL: &str = "error"; + +fn detect_log_format(log_format: Option<&str>) -> Result { + match log_format { + None | Some(LOG_FORMAT_TEXT) => Ok(LogFormat::Text), + Some(LOG_FORMAT_JSON) => Ok(LogFormat::Json), + Some(unknown) => bail!("unknown log format: {}", unknown), + } +} + +fn detect_log_level(input: Option, is_debug: bool) -> Result { + // We keep the `debug` flag for backward compatibility, but use `log-level` + // as the main way to set the log level due to the flexibility. If both are + // specified, `log-level` takes precedence. + let log_level: Cow = match input { + None if is_debug => "debug".into(), + None => DEFAULT_LOG_LEVEL.into(), + Some(level) => level.into(), + }; + + Ok(Level::from_str(log_level.as_ref())?) +} + +#[derive(Debug, Default)] +pub struct ObservabilityConfig { + pub log_debug_flag: bool, + pub log_level: Option, + pub log_file: Option, + pub log_format: Option, + pub systemd_log: bool, +} + +impl From<&crate::Opts> for ObservabilityConfig { + fn from(opts: &crate::Opts) -> Self { + Self { + log_debug_flag: opts.global.debug, + log_level: opts.youki_extend.log_level.to_owned(), + log_file: opts.global.log.to_owned(), + log_format: opts.global.log_format.to_owned(), + systemd_log: opts.youki_extend.systemd_log, + } + } +} + +pub fn init(config: T) -> Result<()> +where + T: Into, +{ + let config = config.into(); + let level = detect_log_level(config.log_level, config.log_debug_flag) + .with_context(|| "failed to parse log level")?; + let log_level_filter = tracing_subscriber::filter::LevelFilter::from(level); + let log_format = detect_log_format(config.log_format.as_deref()) + .with_context(|| "failed to detect log format")?; + let systemd_journald = if config.systemd_log { + Some(tracing_journald::layer()?.with_syslog_identifier("youki".to_string())) + } else { + None + }; + let subscriber = tracing_subscriber::registry() + .with(log_level_filter) + .with(systemd_journald); + + // I really dislike how we have to specify individual branch for each + // combination, but I can't find any better way to do this. The tracing + // crate makes it hard to build a single format layer with different + // conditions. + match (config.log_file.as_ref(), log_format) { + (None, LogFormat::Text) => { + // Text to stderr + subscriber + .with( + tracing_subscriber::fmt::layer() + .without_time() + .with_writer(std::io::stderr), + ) + .try_init() + .map_err(|e| anyhow::anyhow!("failed to init logger: {}", e))?; + } + (None, LogFormat::Json) => { + // JSON to stderr + subscriber + .with( + tracing_subscriber::fmt::layer() + .json() + .flatten_event(true) + .with_span_list(false) + .with_writer(std::io::stderr), + ) + .try_init() + .map_err(|e| anyhow::anyhow!("failed to init logger: {}", e))?; + } + (Some(path), LogFormat::Text) => { + // Log file with text format + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(false) + .open(path) + .with_context(|| "failed to open log file")?; + subscriber + .with(tracing_subscriber::fmt::layer().with_writer(file)) + .try_init() + .map_err(|e| anyhow::anyhow!("failed to init logger: {}", e))?; + } + (Some(path), LogFormat::Json) => { + // Log file with JSON format + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(false) + .open(path) + .with_context(|| "failed to open log file")?; + subscriber + .with( + tracing_subscriber::fmt::layer() + .json() + .flatten_event(true) + .with_span_list(false) + .with_writer(file), + ) + .try_init() + .map_err(|e| anyhow::anyhow!("failed to init logger: {}", e))?; + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use libcontainer::test_utils::TestCallbackError; + use std::path::Path; + + #[test] + fn test_detect_log_level() { + let test = vec![ + ("error", tracing::Level::ERROR), + ("warn", tracing::Level::WARN), + ("info", tracing::Level::INFO), + ("debug", tracing::Level::DEBUG), + ("trace", tracing::Level::TRACE), + ]; + for (input, expected) in test { + assert_eq!( + detect_log_level(Some(input.to_string()), false) + .expect("failed to parse log level"), + expected + ) + } + assert_eq!( + detect_log_level(None, true).expect("failed to parse log level"), + tracing::Level::DEBUG + ); + // Invalid log level should fail the parse + assert!(detect_log_level(Some("invalid".to_string()), false).is_err()); + } + + #[test] + fn test_detect_log_level_default() { + if cfg!(debug_assertions) { + assert_eq!( + detect_log_level(None, false).unwrap(), + tracing::Level::DEBUG + ) + } else { + assert_eq!( + detect_log_level(None, false).unwrap(), + tracing::Level::ERROR + ) + } + } + + #[test] + fn test_init_many_times() -> Result<()> { + let cb = || { + let temp_dir = tempfile::tempdir().expect("failed to create temp dir"); + let log_file = Path::join(temp_dir.path(), "test.log"); + let config = ObservabilityConfig { + log_file: Some(log_file), + ..Default::default() + }; + init(config).map_err(|err| TestCallbackError::Other(err.into()))?; + Ok(()) + }; + libcontainer::test_utils::test_in_child_process(cb) + .with_context(|| "failed the first init tracing")?; + libcontainer::test_utils::test_in_child_process(cb) + .with_context(|| "failed the second init tracing")?; + Ok(()) + } + + #[test] + fn test_higher_loglevel_no_log() -> Result<()> { + libcontainer::test_utils::test_in_child_process(|| { + let temp_dir = tempfile::tempdir().expect("failed to create temp dir"); + let log_file = Path::join(temp_dir.path(), "test.log"); + // Note, we can only init the tracing once, so we have to test in a + // single unit test. The orders are important here. + let config = ObservabilityConfig { + log_file: Some(log_file.clone()), + log_level: Some("error".to_string()), + ..Default::default() + }; + init(config).map_err(|err| TestCallbackError::Other(err.into()))?; + assert!( + log_file + .as_path() + .metadata() + .expect("failed to get logfile metadata") + .len() + == 0, + "a new logfile should be empty" + ); + // Test that info level is not logged into the logfile because we set the log level to error. + tracing::info!("testing this"); + if log_file + .as_path() + .metadata() + .map_err(|err| format!("failed to get logfile metadata: {err:?}"))? + .len() + != 0 + { + let data = std::fs::read_to_string(&log_file) + .map_err(|err| format!("failed to read the logfile: {err:?}"))?; + Err(TestCallbackError::Custom(format!( + "info level should not be logged into the logfile, but got: {data}" + )))?; + } + + Ok(()) + })?; + + Ok(()) + } + + #[test] + fn test_json_logfile() -> Result<()> { + libcontainer::test_utils::test_in_child_process(|| { + let temp_dir = tempfile::tempdir().expect("failed to create temp dir"); + let log_file = Path::join(temp_dir.path(), "test.log"); + // Note, we can only init the tracing once, so we have to test in a + // single unit test. The orders are important here. + let config = ObservabilityConfig { + log_file: Some(log_file.clone()), + log_format: Some(LOG_FORMAT_JSON.to_owned()), + ..Default::default() + }; + init(config).map_err(|err| TestCallbackError::Other(err.into()))?; + assert!( + log_file + .as_path() + .metadata() + .expect("failed to get logfile metadata") + .len() + == 0, + "a new logfile should be empty" + ); + // Test that the message logged is actually JSON format. + tracing::error!("testing json log"); + let data = std::fs::read_to_string(&log_file) + .map_err(|err| format!("failed to read the logfile: {err:?}"))?; + if data.is_empty() { + Err("logfile should not be empty")?; + } + serde_json::from_str::(&data) + .map_err(|err| format!("failed to parse {data}: {err:?}"))?; + Ok(()) + })?; + + Ok(()) + } +} diff --git a/crates/youki/src/rootpath.rs b/crates/youki/src/rootpath.rs new file mode 100644 index 000000000..1f30dddd0 --- /dev/null +++ b/crates/youki/src/rootpath.rs @@ -0,0 +1,203 @@ +use anyhow::{bail, Result}; +use libcontainer::rootless::rootless_required; +use libcontainer::utils::create_dir_all_with_mode; +use nix::libc; +use nix::sys::stat::Mode; +use nix::unistd::getuid; +use std::fs; +use std::path::{Path, PathBuf}; + +pub fn determine(root_path: Option) -> Result { + let uid = getuid().as_raw(); + + if let Some(path) = root_path { + if !path.exists() { + create_dir_all_with_mode(&path, uid, Mode::S_IRWXU)?; + } + let path = path.canonicalize()?; + return Ok(path); + } + + if !rootless_required() { + let path = get_default_not_rootless_path(); + create_dir_all_with_mode(&path, uid, Mode::S_IRWXU)?; + return Ok(path); + } + + // see https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + if let Ok(path) = std::env::var("XDG_RUNTIME_DIR") { + let path = Path::new(&path).join("youki"); + if create_dir_all_with_mode(&path, uid, Mode::S_IRWXU).is_ok() { + return Ok(path); + } + } + + // XDG_RUNTIME_DIR is not set, try the usual location + let path = get_default_rootless_path(uid); + if create_dir_all_with_mode(&path, uid, Mode::S_IRWXU).is_ok() { + return Ok(path); + } + + if let Ok(path) = std::env::var("HOME") { + if let Ok(resolved) = fs::canonicalize(path) { + let run_dir = resolved.join(".youki/run"); + if create_dir_all_with_mode(&run_dir, uid, Mode::S_IRWXU).is_ok() { + return Ok(run_dir); + } + } + } + + let tmp_dir = PathBuf::from(format!("/tmp/youki-{uid}")); + if create_dir_all_with_mode(&tmp_dir, uid, Mode::S_IRWXU).is_ok() { + return Ok(tmp_dir); + } + + bail!("could not find a storage location with suitable permissions for the current user"); +} + +#[cfg(not(test))] +fn get_default_not_rootless_path() -> PathBuf { + PathBuf::from("/run/youki") +} + +#[cfg(test)] +fn get_default_not_rootless_path() -> PathBuf { + std::env::temp_dir().join("default_youki_path") +} + +#[cfg(not(test))] +fn get_default_rootless_path(uid: libc::uid_t) -> PathBuf { + PathBuf::from(format!("/run/user/{uid}/youki")) +} + +#[cfg(test)] +fn get_default_rootless_path(uid: libc::uid_t) -> PathBuf { + std::env::temp_dir().join(format!("default_rootless_youki_path_{uid}").as_str()) +} + +#[cfg(test)] +mod tests { + use super::*; + use anyhow::{Context, Result}; + use nix::sys::stat::Mode; + use nix::unistd::getuid; + use std::fs; + use std::fs::Permissions; + use std::os::unix::fs::PermissionsExt; + use std::path::PathBuf; + + #[test] + fn test_user_specified() -> Result<()> { + // If the user specifies a path that does not exist, we should + // create it and return the absolute path. + let tmp = tempfile::tempdir()?; + // Note, the path doesn't exist yet because tempfile generated a random new empty dir. + let specified_path = tmp.path().join("provided_path"); + let non_abs_path = specified_path.join("../provided_path"); + let path = determine(Some(non_abs_path)).context("failed with specified path")?; + assert_eq!(path, specified_path); + Ok(()) + } + + #[test] + fn test_user_specified_exists() -> Result<()> { + // If the user specifies a path that exists, we should return the + // absolute path. + let tmp = tempfile::tempdir()?; + let specified_path = tmp.path().join("provided_path"); + std::fs::create_dir(&specified_path).context("failed to create dir")?; + let non_abs_path = specified_path.join("../provided_path"); + let path = determine(Some(non_abs_path)).context("failed with specified path")?; + assert_eq!(path, specified_path); + + Ok(()) + } + + #[test] + fn test_determine_root_path_non_rootless() -> Result<()> { + // If we do not have root privileges skip the test as it will not succeed. + if !getuid().is_root() { + return Ok(()); + } + + { + let expected_path = super::get_default_not_rootless_path(); + let path = determine(None).context("failed with default non rootless path")?; + assert_eq!(path, expected_path); + assert!(path.exists()); + fs::remove_dir_all(&expected_path).context("failed to remove dir")?; + } + { + let expected_path = get_default_not_rootless_path(); + fs::create_dir(&expected_path).context("failed to create dir")?; + fs::set_permissions(&expected_path, Permissions::from_mode(Mode::S_IRUSR.bits())) + .context("failed to set invalid permissions")?; + assert!(determine(None).is_err()); + fs::remove_dir_all(&expected_path).context("failed to remove dir")?; + } + + Ok(()) + } + + #[test] + fn test_determine_root_path_rootless() -> Result<()> { + std::env::set_var("YOUKI_USE_ROOTLESS", "true"); + + // XDG_RUNTIME_DIR + let tmp = tempfile::tempdir()?; + let xdg_dir = tmp.path().join("xdg_runtime"); + std::env::set_var("XDG_RUNTIME_DIR", &xdg_dir); + let path = determine(None).context("failed with $XDG_RUNTIME_DIR path")?; + assert_eq!(path, xdg_dir.join("youki")); + assert!(path.exists()); + std::env::remove_var("XDG_RUNTIME_DIR"); + + // Default rootless location + let uid = getuid().as_raw(); + let default_rootless_path = get_default_rootless_path(uid); + scopeguard::defer!({ + let _ = fs::remove_dir_all(&default_rootless_path); + }); + let path = determine(None).context("failed with default rootless path")?; + assert_eq!(path, default_rootless_path); + assert!(path.exists()); + + // The `determine` function will default to the rootless default + // path under `/run/user/$uid``. To test the `determine` function to + // not use the default rootless path, we need to make the default + // rootless path fail to create. So we set the path to an invalid + // permission, so the `determine` function will use HOME env var or + // `/tmp` directory instead. + fs::set_permissions( + &default_rootless_path, + Permissions::from_mode(Mode::S_IRUSR.bits()), + ) + .context("failed to set invalid permissions")?; + + // Use HOME env var + let tmp = tempfile::tempdir()?; + let home_path = tmp.path().join("youki_home"); + fs::create_dir_all(&home_path).context("failed to create fake home path")?; + std::env::set_var("HOME", &home_path); + let path = determine(None).context("failed with $HOME path")?; + assert_eq!(path, home_path.join(".youki/run")); + assert!(path.exists()); + std::env::remove_var("HOME"); + + // Use /tmp dir + let uid = getuid().as_raw(); + let expected_temp_path = PathBuf::from(format!("/tmp/youki-{uid}")); + let path = determine(None).context("failed with temp path")?; + assert_eq!(path, expected_temp_path); + // Set invalid permissions to temp path so determine_root_path fails. + fs::set_permissions( + &expected_temp_path, + Permissions::from_mode(Mode::S_IRUSR.bits()), + ) + .context("failed to set invalid permissions")?; + assert!(determine(None).is_err()); + fs::remove_dir_all(&expected_temp_path).context("failed to remove dir")?; + + Ok(()) + } +} diff --git a/crates/youki/src/workload/executor.rs b/crates/youki/src/workload/executor.rs new file mode 100644 index 000000000..9565c16b5 --- /dev/null +++ b/crates/youki/src/workload/executor.rs @@ -0,0 +1,29 @@ +use libcontainer::oci_spec::runtime::Spec; +use libcontainer::workload::{Executor, ExecutorError}; + +pub fn default_executor() -> Executor { + Box::new(|spec: &Spec| -> Result<(), ExecutorError> { + #[cfg(feature = "wasm-wasmer")] + match super::wasmer::get_executor()(spec) { + Ok(_) => return Ok(()), + Err(ExecutorError::CantHandle(_)) => (), + Err(err) => return Err(err), + } + #[cfg(feature = "wasm-wasmedge")] + match super::wasmedge::get_executor()(spec) { + Ok(_) => return Ok(()), + Err(ExecutorError::CantHandle(_)) => (), + Err(err) => return Err(err), + } + #[cfg(feature = "wasm-wasmtime")] + match super::wasmtime::get_executor()(spec) { + Ok(_) => return Ok(()), + Err(ExecutorError::CantHandle(_)) => (), + Err(err) => return Err(err), + } + + // Leave the default executor as the last option, which executes normal + // container workloads. + libcontainer::workload::default::get_executor()(spec) + }) +} diff --git a/crates/youki/src/workload/mod.rs b/crates/youki/src/workload/mod.rs new file mode 100644 index 000000000..ce00f36c9 --- /dev/null +++ b/crates/youki/src/workload/mod.rs @@ -0,0 +1,7 @@ +pub mod executor; +#[cfg(feature = "wasm-wasmedge")] +mod wasmedge; +#[cfg(feature = "wasm-wasmer")] +mod wasmer; +#[cfg(feature = "wasm-wasmtime")] +mod wasmtime; diff --git a/crates/youki/src/workload/wasmedge.rs b/crates/youki/src/workload/wasmedge.rs new file mode 100644 index 000000000..eb813e3a6 --- /dev/null +++ b/crates/youki/src/workload/wasmedge.rs @@ -0,0 +1,100 @@ +use libcontainer::oci_spec::runtime::Spec; +use wasmedge_sdk::{ + config::{CommonConfigOptions, ConfigBuilder, HostRegistrationConfigOptions}, + params, VmBuilder, +}; + +use libcontainer::workload::{Executor, ExecutorError}; + +const EXECUTOR_NAME: &str = "wasmedge"; + +pub fn get_executor() -> Executor { + Box::new(|spec: &Spec| -> Result<(), ExecutorError> { + if !can_handle(spec) { + return Err(ExecutorError::CantHandle(EXECUTOR_NAME)); + } + + tracing::debug!("executing workload with wasmedge handler"); + + // parse wasi parameters + let args = get_args(spec); + let mut cmd = args[0].clone(); + if let Some(stripped) = args[0].strip_prefix(std::path::MAIN_SEPARATOR) { + cmd = stripped.to_string(); + } + let envs = env_to_wasi(spec); + + // create configuration with `wasi` option enabled + let config = ConfigBuilder::new(CommonConfigOptions::default()) + .with_host_registration_config(HostRegistrationConfigOptions::default().wasi(true)) + .build() + .map_err(|err| { + ExecutorError::Other(format!("failed to create wasmedge config: {}", err)) + })?; + + // create a vm with the config settings + let mut vm = VmBuilder::new() + .with_config(config) + .build::<()>() + .map_err(|err| ExecutorError::Other(format!("failed to create wasmedge vm: {}", err)))? + .register_module_from_file("main", cmd) + .map_err(|err| { + ExecutorError::Other(format!( + "failed to register wasmedge module from the file: {}", + err + )) + })?; + // initialize the wasi module with the parsed parameters + let wasi_instance = vm + .wasi_module_mut() + .expect("config doesn't contain HostRegistrationConfigOptions"); + wasi_instance.initialize( + Some(args.iter().map(|s| s as &str).collect()), + Some(envs.iter().map(|s| s as &str).collect()), + None, + ); + + vm.run_func(Some("main"), "_start", params!()) + .map_err(|err| ExecutorError::Execution(err))?; + + Ok(()) + }) +} + +fn can_handle(spec: &Spec) -> bool { + if let Some(annotations) = spec.annotations() { + if let Some(handler) = annotations.get("run.oci.handler") { + return handler == "wasm"; + } + + if let Some(variant) = annotations.get("module.wasm.image/variant") { + return variant == "compat"; + } + } + + false +} + +fn get_args(spec: &Spec) -> &[String] { + let p = match spec.process() { + None => return &[], + Some(p) => p, + }; + + match p.args() { + None => &[], + Some(args) => args.as_slice(), + } +} + +fn env_to_wasi(spec: &Spec) -> Vec { + let default = vec![]; + let env = spec + .process() + .as_ref() + .unwrap() + .env() + .as_ref() + .unwrap_or(&default); + env.to_vec() +} diff --git a/crates/youki/src/workload/wasmer.rs b/crates/youki/src/workload/wasmer.rs new file mode 100644 index 000000000..1f0a4b824 --- /dev/null +++ b/crates/youki/src/workload/wasmer.rs @@ -0,0 +1,139 @@ +use libcontainer::oci_spec::runtime::Spec; +use wasmer::{Instance, Module, Store}; +use wasmer_wasix::WasiEnv; + +use libcontainer::workload::{Executor, ExecutorError, EMPTY}; + +const EXECUTOR_NAME: &str = "wasmer"; + +pub fn get_executor() -> Executor { + Box::new(|spec: &Spec| -> Result<(), ExecutorError> { + if !can_handle(spec) { + return Err(ExecutorError::CantHandle(EXECUTOR_NAME)); + } + + tracing::debug!("executing workload with wasmer handler"); + let process = spec.process().as_ref(); + + let args = process.and_then(|p| p.args().as_ref()).unwrap_or(&EMPTY); + let env = process + .and_then(|p| p.env().as_ref()) + .unwrap_or(&EMPTY) + .iter() + .filter_map(|e| { + e.split_once('=') + .filter(|kv| !kv.0.contains('\u{0}') && !kv.1.contains('\u{0}')) + .map(|kv| (kv.0.trim(), kv.1.trim())) + }); + + if args.is_empty() { + tracing::error!("at least one process arg must be specified"); + return Err(ExecutorError::InvalidArg); + } + + if !args[0].ends_with(".wasm") && !args[0].ends_with(".wat") { + tracing::error!( + "first argument must be a wasm or wat module, but was {}", + args[0] + ); + return Err(ExecutorError::InvalidArg); + } + + let mut store = Store::default(); + let module = Module::from_file(&store, &args[0]).map_err(|err| { + tracing::error!(err = ?err, file = ?args[0], "could not load wasm module from file"); + ExecutorError::Other("could not load wasm module from file".to_string()) + })?; + + let mut wasi_env = WasiEnv::builder("youki_wasm_app") + .args(args.iter().skip(1)) + .envs(env) + .finalize(&mut store) + .map_err(|err| ExecutorError::Other(format!("could not create wasi env: {}", err)))?; + + let imports = wasi_env.import_object(&mut store, &module).map_err(|err| { + ExecutorError::Other(format!("could not retrieve wasm imports: {}", err)) + })?; + let instance = Instance::new(&mut store, &module, &imports).map_err(|err| { + ExecutorError::Other(format!("could not instantiate wasm module: {}", err)) + })?; + + wasi_env + .initialize(&mut store, instance.clone()) + .map_err(|err| { + ExecutorError::Other(format!("could not initialize wasi env: {}", err)) + })?; + + let start = instance.exports.get_function("_start").map_err(|err| { + ExecutorError::Other(format!( + "could not retrieve wasm module main function: {err}" + )) + })?; + start + .call(&mut store, &[]) + .map_err(|err| ExecutorError::Execution(err.into()))?; + + wasi_env.cleanup(&mut store, None); + + Ok(()) + }) +} + +fn can_handle(spec: &Spec) -> bool { + if let Some(annotations) = spec.annotations() { + if let Some(handler) = annotations.get("run.oci.handler") { + return handler == "wasm"; + } + + if let Some(variant) = annotations.get("module.wasm.image/variant") { + return variant == "compat"; + } + } + + false +} + +#[cfg(test)] +mod tests { + use super::*; + use anyhow::{Context, Result}; + use libcontainer::oci_spec::runtime::SpecBuilder; + use std::collections::HashMap; + + #[test] + fn test_can_handle_oci_handler() -> Result<()> { + let mut annotations = HashMap::with_capacity(1); + annotations.insert("run.oci.handler".to_owned(), "wasm".to_owned()); + let spec = SpecBuilder::default() + .annotations(annotations) + .build() + .context("build spec")?; + + assert!(can_handle(&spec)); + + Ok(()) + } + + #[test] + fn test_can_handle_compat_wasm_spec() -> Result<()> { + let mut annotations = HashMap::with_capacity(1); + annotations.insert("module.wasm.image/variant".to_owned(), "compat".to_owned()); + let spec = SpecBuilder::default() + .annotations(annotations) + .build() + .context("build spec")?; + + assert!(can_handle(&spec)); + + Ok(()) + } + + #[test] + fn test_can_handle_no_execute() -> Result<()> { + let spec = SpecBuilder::default().build().context("build spec")?; + + assert!(!can_handle(&spec)); + + Ok(()) + } +} diff --git a/crates/youki/src/workload/wasmtime.rs b/crates/youki/src/workload/wasmtime.rs new file mode 100644 index 000000000..6834ea617 --- /dev/null +++ b/crates/youki/src/workload/wasmtime.rs @@ -0,0 +1,104 @@ +use libcontainer::oci_spec::runtime::Spec; +use wasmtime::*; +use wasmtime_wasi::WasiCtxBuilder; + +use libcontainer::workload::{Executor, ExecutorError, EMPTY}; + +const EXECUTOR_NAME: &str = "wasmtime"; + +pub fn get_executor() -> Executor { + Box::new(|spec: &Spec| -> Result<(), ExecutorError> { + if !can_handle(spec) { + return Err(ExecutorError::CantHandle(EXECUTOR_NAME)); + } + + tracing::debug!("executing workload with wasmtime handler"); + let process = spec.process().as_ref(); + + let args = spec + .process() + .as_ref() + .and_then(|p| p.args().as_ref()) + .unwrap_or(&EMPTY); + if args.is_empty() { + tracing::error!("at least one process arg must be specified"); + return Err(ExecutorError::InvalidArg); + } + + if !args[0].ends_with(".wasm") && !args[0].ends_with(".wat") { + tracing::error!( + "first argument must be a wasm or wat module, but was {}", + args[0] + ); + return Err(ExecutorError::InvalidArg); + } + + let mut cmd = args[0].clone(); + let stripped = args[0].strip_prefix(std::path::MAIN_SEPARATOR); + if let Some(cmd_stripped) = stripped { + cmd = cmd_stripped.to_string(); + } + + let envs: Vec<(String, String)> = process + .and_then(|p| p.env().as_ref()) + .unwrap_or(&EMPTY) + .iter() + .filter_map(|e| { + e.split_once('=') + .map(|kv| (kv.0.trim().to_string(), kv.1.trim().to_string())) + }) + .collect(); + + let engine = Engine::default(); + let module = Module::from_file(&engine, &cmd).map_err(|err| { + tracing::error!(err = ?err, file = ?cmd, "could not load wasm module from file"); + ExecutorError::Other("could not load wasm module from file".to_string()) + })?; + + let mut linker = Linker::new(&engine); + wasmtime_wasi::add_to_linker(&mut linker, |s| s).map_err(|err| { + tracing::error!(err = ?err, "cannot add wasi context to linker"); + ExecutorError::Other("cannot add wasi context to linker".to_string()) + })?; + + let wasi = WasiCtxBuilder::new() + .inherit_stdio() + .args(args) + .map_err(|err| { + ExecutorError::Other(format!("cannot add args to wasi context: {}", err)) + })? + .envs(&envs) + .map_err(|err| { + ExecutorError::Other(format!("cannot add envs to wasi context: {}", err)) + })? + .build(); + + let mut store = Store::new(&engine, wasi); + + let instance = linker.instantiate(&mut store, &module).map_err(|err| { + tracing::error!(err = ?err, "wasm module could not be instantiated"); + ExecutorError::Other("wasm module could not be instantiated".to_string()) + })?; + let start = instance.get_func(&mut store, "_start").ok_or_else(|| { + ExecutorError::Other("could not retrieve wasm module main function".into()) + })?; + + start + .call(&mut store, &[], &mut []) + .map_err(|err| ExecutorError::Execution(err.into())) + }) +} + +fn can_handle(spec: &Spec) -> bool { + if let Some(annotations) = spec.annotations() { + if let Some(handler) = annotations.get("run.oci.handler") { + return handler == "wasm"; + } + + if let Some(variant) = annotations.get("module.wasm.image/variant") { + return variant == "compat"; + } + } + + false +} diff --git a/docs/archive/youki.png b/docs/archive/youki.png new file mode 100644 index 000000000..40da2062b Binary files /dev/null and b/docs/archive/youki.png differ diff --git a/docs/youki_flat.png b/docs/archive/youki_flat.png similarity index 100% rename from docs/youki_flat.png rename to docs/archive/youki_flat.png diff --git a/docs/youki_flat.svg b/docs/archive/youki_flat.svg similarity index 100% rename from docs/youki_flat.svg rename to docs/archive/youki_flat.svg diff --git a/docs/youki_flat_full.png b/docs/archive/youki_flat_full.png similarity index 100% rename from docs/youki_flat_full.png rename to docs/archive/youki_flat_full.png diff --git a/docs/doc-draft.md b/docs/doc-draft.md index 762cae9d6..2e92db412 100644 --- a/docs/doc-draft.md +++ b/docs/doc-draft.md @@ -25,7 +25,7 @@ sequenceDiagram participant U as User participant D as Docker participant Y_Main as Youki(Main Process) -participant Y_Intermediate as Youki(Intermeidate Process) +participant Y_Intermediate as Youki(Intermediate Process) participant Y_init as Youki(Init Process) @@ -122,5 +122,7 @@ This contains functionality regarding pausing and resuming container. Pausing a - [cgroups man page](https://man7.org/linux/man-pages/man7/cgroups.7.html) - [freezer cgroup kernel documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) -[oci runtime specification]: https://github.com/opencontainers/runtime-spec/blob/master/runtime.md -[runc man pages]: (https://github.com/opencontainers/runc/blob/master/man/runc.8.md) +## Other references + +- [oci runtime specification](https://github.com/opencontainers/runtime-spec/blob/master/runtime.md) +- [runc man pages](https://github.com/opencontainers/runc/blob/master/man/runc.8.md) diff --git a/docs/src/assets/youki.png b/docs/src/assets/youki.png index 40da2062b..4f4f98c94 100644 Binary files a/docs/src/assets/youki.png and b/docs/src/assets/youki.png differ diff --git a/docs/src/developer/basics.md b/docs/src/developer/basics.md index 874fce3bc..7dd8fff06 100644 --- a/docs/src/developer/basics.md +++ b/docs/src/developer/basics.md @@ -6,7 +6,7 @@ This section has the general information and resources needed to work with any p Youki is a low level container runtime, which deals with the creation and management of Linux containers. Some of other such low-level runtimes are [runc](https://github.com/opencontainers/runc) and [crun](https://github.com/containers/crun). These are usually used by a higher-level runtime such as Docker or Podman to actually create and manage containers, where the higher level runtime provides a much easier interface for users. -Before you start working on developing youki, you should go through [the User documentation](../user/introduction) as it specifies the requirements and setup for running youki. For developing youki, you will need to install the dependencies and clone the repo, as specified in the [Basic Setup](../user/basic_setup.md) and [Basic Usage](../user/basic_usage.md) sections. +Before you start working on developing youki, you should go through [the User documentation](../user/introduction.md) as it specifies the requirements and setup for running youki. For developing youki, you will need to install the dependencies and clone the repo, as specified in the [Basic Setup](../user/basic_setup.md) and [Basic Usage](../user/basic_usage.md) sections. ## Testing while developing diff --git a/docs/src/developer/libcgroups.md b/docs/src/developer/libcgroups.md index 6300ed147..a1f666780 100644 --- a/docs/src/developer/libcgroups.md +++ b/docs/src/developer/libcgroups.md @@ -16,7 +16,7 @@ This crates exposes several functions and modules that can be used to work with - CPU stats including usage and throttling - Memory stats including usage of normal and swap memory, usage of kernel memory, page cache in bytes etc - - Pid stat including current active pids nd maximum allowed pids + - Pid stat including current active pids and maximum allowed pids - Block IO stats such as number of bytest transferred to/from a device in the cgroup, io operations performed by a device in the cgroup, amount of time cgroup had access to a device etc - Huge TLB stats such as usage and maximum usage etc. - Function to get pid stats diff --git a/docs/src/developer/libcontainer.md b/docs/src/developer/libcontainer.md index 09ca887e0..e1221e1c2 100644 --- a/docs/src/developer/libcontainer.md +++ b/docs/src/developer/libcontainer.md @@ -23,7 +23,21 @@ This crate also provides an interface for Apparmor which is another Linux Kernel - tty module which daels with providing terminal interface to the container process - [pseudoterminal man page](https://man7.org/linux/man-pages/man7/pty.7.html) : Information about the pseudoterminal system, useful to understand console_socket parameter in create subcommand -#### Namespaces : namespaces provide isolation of resources such as filesystem, process ids networks etc on kernel level. This module contains structs and functions related to applying or un-applying namespaces to the calling process. +#### Executor + +By default and traditionally, the executor forks and execs into the binary +command that specified in the oci spec. Using executors, we can override this +behavior. For example, `youki` uses executor to implement running wasm +workloads. Instead of running the command specified in the process section of +the OCI spec, the wasm related executors can choose to execute wasm code +instead. The executor will run at the end of the container init process. + +The API accepts only a single executor, so when using multiple executors, (try +wasm first, then defaults to running a binary), the users should compose +multiple executors into a single executor. The executor will return an error +when the executor can't handle the workload. + +#### Namespaces : namespaces provide isolation of resources such as filesystem, process ids networks etc on kernel level. This module contains structs and functions related to applying or un-applying namespaces to the calling process - [pid namespace man page](https://man7.org/linux/man-pages/man7/pid_namespaces.7.html) - [CLONE_NEWUSER flag](https://man7.org/linux/man-pages/man2/clone.2.html) @@ -40,7 +54,7 @@ Pausing a container indicates suspending all processes in it. This can be done w - [cgroups man page](https://man7.org/linux/man-pages/man7/cgroups.7.html) - [freezer cgroup kernel documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) -#### The following are some resources that can help understand with various Linux features used in the code of this crate. +#### The following are some resources that can help understand with various Linux features used in the code of this crate - [oom-score-adj](https://dev.to/rrampage/surviving-the-linux-oom-killer-2ki9) - [unshare man page](https://man7.org/linux/man-pages/man1/unshare.1.html) diff --git a/docs/src/developer/youki.md b/docs/src/developer/youki.md index 15aad63c9..478e82a8e 100644 --- a/docs/src/developer/youki.md +++ b/docs/src/developer/youki.md @@ -8,7 +8,7 @@ The simple control flow of youki can be explained as :

-When given the create command, Youki will load the specification, configuration, sockets etc., and use clone syscall to create an intermediate process. This process will set the cgroups and capabilities, and then fork to the init process. Reason to create this intermediate prcoess is that the clone syscall cannot enter into existing pid namespace that has been created for the container. Thus first we need to make a transition to that namespace in the intermediate process and fork that to the container process. After that the main youki process is requested the uid and gid mappings, and after receiving them the intermediate process sets these mapping, fork the init process and return pid of this init process to the main youki process before exiting. +When given the create command, Youki will load the specification, configuration, sockets etc., and use clone syscall to create an intermediate process. This process will set the cgroups and capabilities, and then fork to the init process. Reason to create this intermediate process is that the clone syscall cannot enter into existing pid namespace that has been created for the container. Thus first we need to make a transition to that namespace in the intermediate process and fork that to the container process. After that the main youki process is requested the uid and gid mappings, and after receiving them the intermediate process sets these mapping, fork the init process and return pid of this init process to the main youki process before exiting. The init process then transition completely into the new namespace setup for the container (the init process only transitions the pid namespace). It changes the root mountpoint for the process using [pivot_root](https://man7.org/linux/man-pages/man2/pivot_root.2.html), so that the container process can get impression that it has a complete root path access. After that the init process sets up the capabilities and seccomp, and sends the seccomp notify fd to the main youki process. When the seccomp agent running on the host system sets up the seccomp profile, it notifies the init process, after which it can execute the programto be executed inside the container. Thus the init process then sends ready notification to the main youki process, and waits for the start signal. diff --git a/docs/src/user/basic_setup.md b/docs/src/user/basic_setup.md index 2ac41a84e..d89fe0976 100644 --- a/docs/src/user/basic_setup.md +++ b/docs/src/user/basic_setup.md @@ -6,7 +6,7 @@ Youki currently only supports Linux Platform, and to use it on other platform yo Also note that Youki currently only supports and expects systemd as init system, and would not work on other systems. There is currently work on-going to put systemd dependent features behind a feature flag, but till then you will need a systemd enabled system to work with Youki. -### Requirements +## Requirements As Youki is written in Rust, you will need to install and setup Rust toolchain to compile it. The instructions for that can be found on Rust's official site [here](https://www.rust-lang.org/tools/install). @@ -14,61 +14,76 @@ You can use Youki by itself to start and run containers, but it can be a little To compile and run, Youki itself depends on some underlying libraries being installed. You can install them using your respective package manager as shown below. -#### Debian, Ubuntu and related distributions +### Debian, Ubuntu and related distributions ```console -$ sudo apt-get install \ - pkg-config \ - libsystemd-dev \ - libdbus-glib-1-dev \ - build-essential \ - libelf-dev \ - libseccomp-dev \ - libclang-dev +$ sudo apt-get install \ + pkg-config \ + libsystemd-dev \ + libdbus-glib-1-dev \ + build-essential \ + libelf-dev \ + libseccomp-dev \ + libclang-dev \ + libssl-dev ``` -#### Fedora, Centos, RHEL and related distributions +### Fedora, CentOS, RHEL and related distributions ```console -$ sudo dnf install \ - pkg-config \ - systemd-devel \ - dbus-devel \ +$ sudo dnf install \ + pkg-config \ + systemd-devel \ + dbus-devel \ elfutils-libelf-devel \ - libseccomp-devel \ - libclang-dev + libseccomp-devel \ + clang-devel \ + openssl-devel ``` --- -### Getting the source +## Quick install + +Install from the GitHub release. +Note that this way also requires the aforementioned installation. + +```console +$ wget https://github.com/containers/youki/releases/download/v0.1.0/youki_0_1_0_linux.tar.gz +$ tar -zxvf youki_0_1_0_linux.tar.gz youki_0_1_0_linux/youki-0.1.0/youki +# Maybe you need root privileges. +$ mv youki_0_1_0_linux/youki-0.1.0/youki /usr/local/bin/youki +$ rm -rf youki_0_1_0_linux.tar.gz youki_0_1_0_linux +``` + +## Getting the source Currently Youki can only be installed from the source code itself, so you will need to clone the Youki GitHub repository to get the source code for using it as a runtime. If you are using any crates of Youki as dependency you need to do this step, as Cargo will automatically clone the repository for you. To clone the repository, run ```console -git clone https://github.com/containers/youki.git +$ git clone https://github.com/containers/youki.git ``` This will create a directory named youki in the directory you ran the command in. This youki directory will be referred to as root directory throughout the documentation. -### Installing the source +## Installing the source Once you have cloned the source, you can build it using ```console # go into the cloned directory -cd youki -make youki-dev # or youki-release -./youki -h # get information about youki command +$ cd youki +$ make youki-dev # or youki-release +$ ./youki -h # get information about youki command ``` This will build the Youki binary, and put it at the root level of the cloned directory, that is in the youki/ . --- -### Using sub-crates as dependency +## Using sub-crates as dependency To use any of the sub-crate as a dependency in your own project, you can specify the dependency as follows, @@ -89,7 +104,7 @@ use liboci_cli::{...} --- -### Using Vagrant to run Youki on non-Linux Platform +## Using Vagrant to run Youki on non-Linux Platform As explained before, Youki only support Linux, and to build/use it on non-Linux Platforms, you will need to use some kind of virtualization. The repo provides a Vagrantfile to do the required VM setup using Vagrant, which can be installed from [here](https://www.vagrantup.com/docs/installation). @@ -99,12 +114,12 @@ Once installed and setup, you can run vagrant commands in the cloned directory t # in the youki directory # for rootless mode, which is default -vagrant up -vagrant ssh +$ vagrant up +$ vagrant ssh # or if you want to develop in rootful mode -VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant up -VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant ssh +$ VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant up +$ VAGRANT_VAGRANTFILE=Vagrantfile.root vagrant ssh # in virtual machine $ cd youki diff --git a/docs/src/user/basic_usage.md b/docs/src/user/basic_usage.md index 6fbc14ce6..1c3c058b3 100644 --- a/docs/src/user/basic_usage.md +++ b/docs/src/user/basic_usage.md @@ -34,7 +34,7 @@ This will start the daemon and hang up the console. You can either start this as In case you don't stop the original daemon, you can get an error message after previous command -``` +```console failed to start daemon: pid file found, ensure docker is not running or delete /var/run/docker.pid ``` @@ -63,19 +63,29 @@ let docker know youki ([source](https://docs.docker.com/engine/reference/commandline/dockerd/#on-linux)). You may need to create this file, if it does not yet exist. A sample content of it: -``` +```json { "default-runtime": "runc", "runtimes": { "youki": { - "path": "/path/to/youki/youki" + "path": "/path/to/youki/youki", + "runtimeArgs": [ + "--debug", + "--systemd-log" + ] } } } ``` After this (need to restart docker at the first time), you can use youki -with docker: `docker run --runtime youki ...`. +with docker: `docker run --runtime youki ...`. You can verify the runtime includes `youki`: + +```console +$ docker info|grep -i runtime + Runtimes: youki runc + Default Runtime: runc +``` #### Using Youki Standalone @@ -143,7 +153,7 @@ sudo ./youki list sudo ./youki delete tutorial_container ``` -The example above shows how to run Youki in a 'rootful' way. To run it without root permissions, that is, in rootless mode, few chagnes are required. +The example above shows how to run Youki in a 'rootful' way. To run it without root permissions, that is, in rootless mode, few changes are required. First, after exporting the rootfs from docker, while generating the config, you will need to pass the rootless flag. This will generate the config withe the options needed for rootless operation of the container. @@ -161,3 +171,13 @@ cd .. ./youki list ./youki delete rootless_container ``` + +#### Log level + +`youki` defaults the log level to `error` in the release build. In the debug +build, the log level defaults to `debug`. The `--log-level` flag can be used to +set the log-level. For least amount of log, we recommend using the `error` log +level. For the most spammy logging, we have a `trace` level. + +For compatibility with `runc` and `crun`, we have a `--debug` flag to set the +log level to `debug`. This flag is ignored if `--log-level` is also set. diff --git a/docs/src/user/libcontainer.md b/docs/src/user/libcontainer.md index 0cbfce1d3..04dcd5636 100644 --- a/docs/src/user/libcontainer.md +++ b/docs/src/user/libcontainer.md @@ -32,4 +32,4 @@ This exposes several modules, each dealing with a specific aspect of working wit - `tty` : this deals with setting up the tty for the container process. -- `utils` : provides various utility functions, such as `parse_env` to parse the env variables, `do_exec` to do an exec syscall and execute a binary in the container process, `get_cgroups_path`, `create_dir_all_with_mode` etc. +- `utils` : provides various utility functions, such as `parse_env` to parse the env variables, `get_cgroups_path`, `create_dir_all_with_mode` etc. diff --git a/docs/src/user/webassembly.md b/docs/src/user/webassembly.md index 8646292f1..456dce021 100644 --- a/docs/src/user/webassembly.md +++ b/docs/src/user/webassembly.md @@ -10,15 +10,19 @@ There are 3 things you need to do to run a WebAssembly module with youki. - Run `build.sh` with `-f wasm-wasmedge` option. - ```console + ```bash ./scripts/build.sh -o . -r -f wasm-wasmedge ``` - > The `wasm-wasmedge` feature will install WasmEdge Runtime library in the `$HOME/.wasmedge` directory. - > To make the library avaible in your system, run the following command: + + > The `wasm-wasmedge` feature will install WasmEdge Runtime library in the `$HOME/.wasmedge` directory. + > To make the library available in your system, run the following command: + > > ```bash > export LD_LIBRARY_PATH=$HOME/.wasmedge/lib > ``` + > > or + > > ```bash > source $HOME/.wasmedge/env > ``` @@ -26,34 +30,35 @@ There are 3 things you need to do to run a WebAssembly module with youki. - Run `build.sh` with `-f wasm-wasmer` option. - ```console + ```bash ./scripts/build.sh -o . -r -f wasm-wasmer ``` ## Build a container image with the WebAssembly module -If you want to run a webassembly module with youki, your config.json has to include either **runc.oci.handler** or **module.wasm.image/variant=compat"**. - -It also needs to specifiy a valid .wasm (webassembly binary) or .wat (webassembly test) module as entrypoint for the container. If a wat module is specified it will be compiled to a wasm module by youki before it is executed. The module also needs to be available in the root filesystem of the container obviously. +If you want to run a webassembly module with youki, your config.json has to include either **runc.oci.handler** or **module.wasm.image/variant=compat"**. +It also needs to specify a valid .wasm (webassembly binary) or .wat (webassembly test) module as entrypoint for the container. If a wat module is specified it will be compiled to a wasm module by youki before it is executed. The module also needs to be available in the root filesystem of the container obviously. ```json "ociVersion": "1.0.2-dev", "annotations": { - "run.oci.handler": "wasm" + "run.oci.handler": "wasm" }, "process": { "args": [ - "hello.wasm", - "hello", + "hello.wasm", + "hello", "world" - ], + ], +... +} ... ``` ### Compile a sample wasm module -A simple wasm module can be created by running +A simple wasm module can be created by running ```console rustup target add wasm32-wasi @@ -75,7 +80,9 @@ fn main() { } } ``` + Then compile the program to WASI. + ```console cargo build --target wasm32-wasi ``` @@ -104,7 +111,7 @@ sudo buildah build --annotation "module.wasm.image/variant=compat" -t wasm-modul Run podman with youki as runtime. [^1] -```console +```bash sudo podman --runtime /PATH/WHARE/YOU/BUILT/WITH/WASM-WASMER/youki run localhost/wasm-module 1 2 3 ``` diff --git a/docs/src/youki.md b/docs/src/youki.md index 60d0a3edc..b6502facd 100644 --- a/docs/src/youki.md +++ b/docs/src/youki.md @@ -1,7 +1,7 @@ # Youki

- +

youki is an implementation of the [OCI runtime-spec](https://github.com/opencontainers/runtime-spec) in Rust, similar to [runc](https://github.com/opencontainers/runc). diff --git a/docs/youki.png b/docs/youki.png index 40da2062b..4f4f98c94 100644 Binary files a/docs/youki.png and b/docs/youki.png differ diff --git a/hack/debug.bt b/hack/debug.bt new file mode 100755 index 000000000..4d2e78dc8 --- /dev/null +++ b/hack/debug.bt @@ -0,0 +1,86 @@ +#!/usr/bin/env bpftrace + +BEGIN +{ + printf("Tracing Youki syscalls... Hit Ctrl-C to end.\n"); + printf("%-12s %15s %-8s %-9s %s\n", "TIME", "COMMAND", "PID", "EVENT", "CONTENT"); +} + +tracepoint:syscalls:sys_enter_write +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + + $s = str(args->buf, args->count); + if ($s != "\n") { + printf("%-12ld %15s %-8d %-9s ", elapsed , comm, pid, "write"); + printf("fd=%d, %s\n", args->fd, $s); + } +} + +tracepoint:syscalls:sys_enter_open, +tracepoint:syscalls:sys_enter_openat +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + @filename[tid] = args->filename; +} + + +tracepoint:syscalls:sys_exit_open, +tracepoint:syscalls:sys_exit_openat +/@filename[tid]/ +{ + $ret = args->ret; + $fd = $ret >= 0 ? $ret : -1; + $errno = $ret >= 0 ? 0 : - $ret; + + printf("%-12ld %15s %-8d %-9s ", elapsed , comm, pid, "open"); + printf("errno=%d, fd=%d, file=%s\n", $errno, $fd, str(@filename[tid])); + delete(@filename[tid]); +} + +tracepoint:syscalls:sys_enter_clone3 +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + printf("%-12ld %15s %-8d %-9s\n", elapsed , comm, pid, "clone3"); +} + +tracepoint:syscalls:sys_enter_setns +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + printf("%-12ld %15s %-8d %-9s ", elapsed , comm, pid, "setns"); + printf("fd=%d, flag=%d\n", args->fd, args->flags); +} + +tracepoint:syscalls:sys_enter_capset +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + printf("%-12ld %15s %-8d %-9s\n", elapsed , comm, pid, "capset"); +} + +tracepoint:syscalls:sys_enter_pivot_root +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + printf("%-12ld %15s %-8d %-9s ", elapsed , comm, pid, "pivt_root"); + printf("new_root=%s, put_old=%s\n", str(args->new_root), str(args->put_old)); +} + +tracepoint:syscalls:sys_enter_mount +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + printf("%-12ld %15s %-8d %-9s ", elapsed , comm, pid, "mount"); + printf("dev_name=%s, dir_name=%s\n", str(args->dev_name), str(args->dir_name)); +} + +tracepoint:syscalls:sys_enter_setresuid +/comm == "4"|| comm == "youki" || comm == "youki:[1:INTER]" || comm == "youki:[2:INIT]"/ +{ + printf("%-12ld %15s %-8d %-9s ", elapsed , comm, pid, "setresuid"); + printf("ruid=%d, euid=%d, suid=%d\n", args->ruid, args->euid, args->suid); +} + +END +{ + clear(@filename); + printf("Tracing ended.\n"); +} + diff --git a/hack/stress_cargo_test.sh b/hack/stress_cargo_test.sh index b9c4bb467..20923452f 100755 --- a/hack/stress_cargo_test.sh +++ b/hack/stress_cargo_test.sh @@ -1,4 +1,5 @@ -#!/bin/bash -ue +#!/usr/bin/env bash +set -euo pipefail # This is a simple script to stress test `cargo test` to rule out flaky tests. diff --git a/justfile b/justfile new file mode 100644 index 000000000..fc347b6bf --- /dev/null +++ b/justfile @@ -0,0 +1,169 @@ +alias build := youki-release +alias youki := youki-dev + +KIND_CLUSTER_NAME := 'youki' + +cwd := justfile_directory() + +# build + +# build all binaries +build-all: youki-release rust-oci-tests-bin runtimetest + +# build youki in dev mode +youki-dev: + {{ cwd }}/scripts/build.sh -o {{ cwd }} -c youki + +# build youki in release mode +youki-release: + {{ cwd }}/scripts/build.sh -o {{ cwd }} -r -c youki + +# build runtimetest binary +runtimetest: + {{ cwd }}/scripts/build.sh -o {{ cwd }} -r -c runtimetest + +# build rust oci tests binary +rust-oci-tests-bin: + {{ cwd }}/scripts/build.sh -o {{ cwd }} -r -c integration-test + +# Tests + +# run oci tests +test-oci: oci-tests rust-oci-tests + +# run all tests except rust-oci +test-all: unittest test-features oci-tests containerd-test # currently not doing rust-oci here + +# run cargo unittests +unittest: + cd ./crates + LD_LIBRARY_PATH=${HOME}/.wasmedge/lib cargo test --all --all-targets --all-features + +# run permutated feature compilation tests +test-features: + {{ cwd }}/scripts/features_test.sh + +# run test against musl target +test-musl: + {{ cwd }}/scripts/musl_test.sh + +# run oci integration tests +oci-tests: + {{ cwd }}/scripts/oci_integration_tests.sh {{ cwd }} + +# run rust oci integration tests +rust-oci-tests: youki-release runtimetest rust-oci-tests-bin + {{ cwd }}/scripts/rust_integration_tests.sh {{ cwd }}/youki + +# validate rust oci integration tests on runc +validate-rust-oci-runc: runtimetest rust-oci-tests-bin + {{ cwd }}/scripts/rust_integration_tests.sh runc + +# run containerd integration tests +containerd-test: youki-dev + VAGRANT_VAGRANTFILE=Vagrantfile.containerd2youki vagrant up + VAGRANT_VAGRANTFILE=Vagrantfile.containerd2youki vagrant provision --provision-with test + +[private] +kind-cluster: bin-kind + #!/usr/bin/env bash + set -euo pipefail + + mkdir -p tests/k8s/_out/ + docker buildx build -f tests/k8s/Dockerfile --iidfile=tests/k8s/_out/img --load . + image=$(cat tests/k8s/_out/img) + bin/kind create cluster --name {{ KIND_CLUSTER_NAME }} --image=$image + +# run youki with kind +test-kind: kind-cluster + kubectl --context=kind-{{ KIND_CLUSTER_NAME }} apply -f tests/k8s/deploy.yaml + kubectl --context=kind-{{ KIND_CLUSTER_NAME }} wait deployment nginx-deployment --for condition=Available=True --timeout=90s + kubectl --context=kind-{{ KIND_CLUSTER_NAME }} get pods -o wide + kubectl --context=kind-{{ KIND_CLUSTER_NAME }} delete -f tests/k8s/deploy.yaml + +# Bin + +[private] +bin-kind: + docker buildx build --output=bin/ -f tests/k8s/Dockerfile --target kind-bin . + +# Clean + +# Clean kind test env +clean-test-kind: + kind delete cluster --name {{ KIND_CLUSTER_NAME }} + +# misc + +# run bpftrace hack +hack-bpftrace: + BPFTRACE_STRLEN=120 ./hack/debug.bt + +# run linting on project +lint: + cargo fmt --all -- --check + cargo clippy --all --all-targets --all-features -- -D warnings + +# run spellcheck +spellcheck: + typos + +# run format on project +format: + cargo fmt --all + +# cleans up generated artifacts +clean: + {{ cwd }}/scripts/clean.sh {{ cwd }} + +# install tools used in dev +dev-prepare: + cargo install typos-cli + +# setup dependencies in CI +ci-prepare: + #!/usr/bin/env bash + set -euo pipefail + + # Check if system is Ubuntu + if [[ -f /etc/lsb-release ]]; then + source /etc/lsb-release + if [[ $DISTRIB_ID == "Ubuntu" ]]; then + echo "System is Ubuntu" + apt-get -y update + apt-get install -y \ + pkg-config \ + libsystemd-dev \ + libdbus-glib-1-dev \ + build-essential \ + libelf-dev \ + libseccomp-dev \ + libclang-dev \ + libssl-dev \ + criu + exit 0 + fi + fi + + echo "Unknown system. The CI is only configured for Ubuntu. You will need to forge your own path. Good luck!" + exit 1 + +ci-musl-prepare: ci-prepare + #!/usr/bin/env bash + set -euo pipefail + + # Check if system is Ubuntu + if [[ -f /etc/lsb-release ]]; then + source /etc/lsb-release + if [[ $DISTRIB_ID == "Ubuntu" ]]; then + echo "System is Ubuntu" + apt-get -y update + apt-get install -y \ + musl-dev \ + musl-tools + exit 0 + fi + fi + + echo "Unknown system. The CI is only configured for Ubuntu. You will need to forge your own path. Good luck!" + exit 1 diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 000000000..873a49d64 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +profile="default" +channel="1.71.0" diff --git a/scripts/build.sh b/scripts/build.sh index 4217fff91..3654c9f28 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,6 +1,5 @@ -#!/bin/bash - -set -e +#!/usr/bin/env bash +set -euo pipefail ROOT=$(git rev-parse --show-toplevel) @@ -13,6 +12,7 @@ VERSION=debug TARGET="$(uname -m)-unknown-linux-gnu" CRATE="youki" RUNTIMETEST_TARGET="$ROOT/runtimetest-target" +features="" while getopts f:ro:c:h OPT; do case $OPT in f) features=${OPTARG} diff --git a/scripts/clean.sh b/scripts/clean.sh index 83f5fae79..0b7857ee5 100755 --- a/scripts/clean.sh +++ b/scripts/clean.sh @@ -1,10 +1,9 @@ -#! /bin/bash -# we don't set -eu here, as some of the binaries might be potentially be missing -# and that is fine, that means they are already removed. +#!/usr/bin/env bash +set -euo pipefail for bin in youki integration_test runtimetest test.log; do if [ -f $bin ]; then - rm ${1}/$bin + rm -f ${1}/$bin fi done diff --git a/scripts/features_test.sh b/scripts/features_test.sh index 14bc3de2f..6f8275fc8 100755 --- a/scripts/features_test.sh +++ b/scripts/features_test.sh @@ -1,19 +1,33 @@ -#!/bin/bash +#!/usr/bin/env bash +set -euo pipefail -set -eu +test_package_features() { + echo "[feature test] testing $1 with features $2" + cargo build --no-default-features --package "$1" --features "$2" +} -# Build the different features individually -cargo build --no-default-features -F v1 -cargo build --no-default-features -F v2 -cargo build --no-default-features -F systemd -cargo build --no-default-features -F v2 -F cgroupsv2_devices -cargo build --no-default-features -F systemd -F cgroupsv2_devices +test_package_features "libcontainer" "v1" +test_package_features "libcontainer" "v2" +test_package_features "libcontainer" "systemd" +test_package_features "libcontainer" "v2 cgroupsv2_devices" +test_package_features "libcontainer" "systemd cgroupsv2_devices" -# Test the different features individually -cargo test --no-default-features -F v1 -cargo test --no-default-features -F v2 -cargo test --no-default-features -F systemd -cargo test --no-default-features -F v2 -F cgroupsv2_devices -cargo test --no-default-features -F systemd -F cgroupsv2_devices +test_package_features "libcgroups" "v1" +test_package_features "libcgroups" "v2" +test_package_features "libcgroups" "systemd" +test_package_features "libcgroups" "v2 cgroupsv2_devices" +test_package_features "libcgroups" "systemd cgroupsv2_devices" + +test_features() { + echo "[feature test] testing features $1" + cargo build --no-default-features --features "$1" + cargo test run --no-default-features --features "$1" +} + +test_features "v1" +test_features "v2" +test_features "systemd" +test_features "v2 cgroupsv2_devices" +test_features "systemd cgroupsv2_devices" exit 0 \ No newline at end of file diff --git a/scripts/musl_test.sh b/scripts/musl_test.sh new file mode 100755 index 000000000..3007b097c --- /dev/null +++ b/scripts/musl_test.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail + +test_musl() { + echo "[musl test] testing $1 with features $2" + cargo +nightly build \ + -Zbuild-std \ + --target $(uname -m)-unknown-linux-musl \ + --package "$1" \ + --no-default-features -F "$2" + cargo +nightly test \ + -Zbuild-std \ + --target $(uname -m)-unknown-linux-musl \ + --package "$1" \ + --no-default-features -F "$2" +} + +test_musl "libcontainer" "v1" +test_musl "libcontainer" "v2" +test_musl "libcontainer" "v1 v2" + diff --git a/scripts/oci_integration_tests.sh b/scripts/oci_integration_tests.sh index 9539824f4..c96a110e6 100755 --- a/scripts/oci_integration_tests.sh +++ b/scripts/oci_integration_tests.sh @@ -108,7 +108,7 @@ done for case in "${test_cases[@]}"; do if ! check_environment $case; then - echo "Skip $case bacause your environment doesn't support this test case" + echo "Skip $case because your environment doesn't support this test case" continue fi @@ -122,7 +122,7 @@ for case in "${test_cases[@]}"; do sudo RUST_BACKTRACE=1 RUNTIME=${RUNTIME} ${OCI_TEST_DIR}/validation/$case >$logfile 2>&1 || (cat $logfile && exit 1) if [ 0 -ne $(grep "not ok" $logfile | wc -l ) ]; then if [ 0 -eq $(grep "# cgroupv2 is not supported yet " $logfile | wc -l ) ]; then - echo "Skip $case bacause oci-runtime-tools doesn't support cgroup v2" + echo "Skip $case because oci-runtime-tools doesn't support cgroup v2" continue; fi cat $logfile diff --git a/scripts/rust_integration_tests.sh b/scripts/rust_integration_tests.sh index 4d36ca270..553ea4402 100755 --- a/scripts/rust_integration_tests.sh +++ b/scripts/rust_integration_tests.sh @@ -24,7 +24,7 @@ if [ ! -f ${ROOT}/bundle.tar.gz ]; then fi touch ${LOGFILE} -sudo YOUKI_LOG_LEVEL="error" ${ROOT}/integration_test run --runtime "$RUNTIME" --runtimetest ${ROOT}/runtimetest > $LOGFILE +sudo ${ROOT}/integration_test run --runtime "$RUNTIME" --runtimetest ${ROOT}/runtimetest > $LOGFILE if [ 0 -ne $(grep "not ok" $LOGFILE | wc -l ) ]; then cat $LOGFILE diff --git a/tests/k8s/Dockerfile b/tests/k8s/Dockerfile new file mode 100644 index 000000000..072950827 --- /dev/null +++ b/tests/k8s/Dockerfile @@ -0,0 +1,38 @@ +# syntax=docker/dockerfile:1.4 + +ARG KIND_NODE_VERSION=v1.23.13 + +FROM kindest/node:${KIND_NODE_VERSION} AS kind-base + +FROM kind-base AS shim-build +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > /tmp/rustup.sh && sh /tmp/rustup.sh -y --profile=minimal +ENV PATH="/root/.cargo/bin:${PATH}" +WORKDIR /shim +COPY ./youki /shim/youki + +FROM scratch AS shim +COPY --from=shim-build /shim/youki / + +FROM kind-base AS kind-fetch +ARG TARGETARCH +ARG KIND_VERSION=v0.17.0 +RUN curl -sSLf https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-linux-${TARGETARCH} > /root/kind && chmod +x /root/kind + +FROM scratch AS kind-bin +COPY --from=kind-fetch /root/kind /kind + +FROM kind-base +RUN <> /etc/containerd/config.toml +echo ' runtime_type = "io.containerd.runc.v2"' >> /etc/containerd/config.toml +echo ' [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.youki.options]' >> /etc/containerd/config.toml +echo ' BinaryName = "/usr/local/bin/youki"' >> /etc/containerd/config.toml +sed -i 's,SystemdCgroup = true,,' /etc/containerd/config.toml +EOF +COPY justfile justfile +RUN curl -o just.tar.gz -L https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz +RUN tar zxvf just.tar.gz just +RUN ./just ci-prepare +COPY --link --from=shim /* /usr/local/bin/ + diff --git a/tests/k8s/deploy.yaml b/tests/k8s/deploy.yaml new file mode 100644 index 000000000..5ff800958 --- /dev/null +++ b/tests/k8s/deploy.yaml @@ -0,0 +1,26 @@ +apiVersion: node.k8s.io/v1 +kind: RuntimeClass +metadata: + name: youki +handler: youki +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + runtimeClassName: youki + containers: + - name: nginx + image: nginx:alpine + ports: + - containerPort: 80 diff --git a/tests/oci-runtime-tests/src/github.com/opencontainers/runtime-tools b/tests/oci-runtime-tests/src/github.com/opencontainers/runtime-tools index 0105384f6..a6a073817 160000 --- a/tests/oci-runtime-tests/src/github.com/opencontainers/runtime-tools +++ b/tests/oci-runtime-tests/src/github.com/opencontainers/runtime-tools @@ -1 +1 @@ -Subproject commit 0105384f68e16803891d0a17d9067b1def6a2778 +Subproject commit a6a073817ab0311b14dadeb6491b968c5bc35d7e diff --git a/tests/rust-integration-tests/Makefile b/tests/rust-integration-tests/Makefile deleted file mode 100644 index 25904e630..000000000 --- a/tests/rust-integration-tests/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -.PHONY: runtimetest integration-test - - -TGT = x86_64-unknown-linux-gnu -FLAG = -ifeq ("$(FLAG)","--release") -DIR = release -else -DIR = debug -endif - - -all: runtimetest integration-test - -runtimetest: - cd ./runtimetest && cargo build $(FLAG) && cp ./target/$(TGT)/$(DIR)/runtimetest ../runtimetest_bin - -integration-test: - cd ./integration_test && cargo build $(FLAG) && cp ./target/$(DIR)/integration_test ../integration_test_bin - -clean: - rm ./integration_test_bin && rm ./runtimetest_bin \ No newline at end of file diff --git a/tests/rust-integration-tests/integration_test/Cargo.toml b/tests/rust-integration-tests/integration_test/Cargo.toml index aa181cde5..c9be1b97f 100644 --- a/tests/rust-integration-tests/integration_test/Cargo.toml +++ b/tests/rust-integration-tests/integration_test/Cargo.toml @@ -5,27 +5,30 @@ edition = "2021" [dependencies] anyhow = "1.0" -chrono = { version="0.4" } +chrono = { version = "0.4", default-features = false, features = ["clock"] } flate2 = "1.0" libcgroups = { path = "../../../crates/libcgroups" } libcontainer = { path = "../../../crates/libcontainer" } -log = { version = "0.4", features = ["std"] } -nix = "0.25.0" -num_cpus = "1.15" -oci-spec = "0.6.0" -once_cell = "1.17.0" -pnet_datalink = "0.31.0" -procfs = "0.14.2" +nix = "0.26.2" +num_cpus = "1.16" +oci-spec = { version = "0.6.1", features = ["runtime"] } +once_cell = "1.18.0" +pnet_datalink = "0.34.0" +procfs = "0.15.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tar = "0.4" test_framework = { path = "../test_framework" } -uuid = "1.3" +uuid = "1.4" which = "4.4.0" +tempfile = "3" +scopeguard = "1.2.0" +tracing = { version = "0.1.37", features = ["attributes"]} +tracing-subscriber = { version = "0.3.16", features = ["json", "env-filter"] } [dependencies.clap] -version = "4.0.32" +version = "4.1.6" default-features = false features = ["std", "suggestions", "derive", "cargo", "help", "usage", "error-context"] diff --git a/tests/rust-integration-tests/integration_test/src/logger.rs b/tests/rust-integration-tests/integration_test/src/logger.rs index 61d7e74b2..e85934616 100644 --- a/tests/rust-integration-tests/integration_test/src/logger.rs +++ b/tests/rust-integration-tests/integration_test/src/logger.rs @@ -1,8 +1,7 @@ use anyhow::{Context, Result}; -use log::{LevelFilter, Log, Metadata, Record}; use std::borrow::Cow; -use std::io::{stderr, Write}; use std::str::FromStr; +use tracing::metadata::LevelFilter; const LOG_LEVEL_ENV_NAME: &str = "YOUKI_INTEGRATION_LOG_LEVEL"; @@ -11,10 +10,7 @@ const LOG_LEVEL_ENV_NAME: &str = "YOUKI_INTEGRATION_LOG_LEVEL"; /// is done only once due to use of OnceCell pub fn init(debug: bool) -> Result<()> { let level = detect_log_level(debug).context("failed to parse log level")?; - let logger = IntegrationLogger::new(level.to_level()); - log::set_boxed_logger(Box::new(logger)) - .map(|()| log::set_max_level(level)) - .expect("set logger failed"); + tracing_subscriber::fmt().with_max_level(level).init(); Ok(()) } @@ -30,62 +26,3 @@ fn detect_log_level(is_debug: bool) -> Result { Ok(LevelFilter::from_str(filter.as_ref())?) } - -struct IntegrationLogger { - /// Indicates level up to which logs are to be printed - level: Option, -} - -impl IntegrationLogger { - /// Create new logger - pub fn new(level: Option) -> Self { - Self { level } - } -} - -/// Implements Log interface given by log crate, so we can use its functionality -impl Log for IntegrationLogger { - /// Check if level of given log is enabled or not - fn enabled(&self, metadata: &Metadata) -> bool { - if let Some(level) = self.level { - metadata.level() <= level - } else { - false - } - } - - /// Function to carry out logging - fn log(&self, record: &Record) { - if self.enabled(record.metadata()) { - let log_msg = text_format(record); - // if log file is set, write to it, else write to stderr - let _ = writeln!(stderr(), "{}", log_msg); - } - } - - /// Flush logs to file - fn flush(&self) { - stderr().flush().expect("failed to flush"); - } -} - -fn text_format(record: &log::Record) -> String { - let log_msg = match (record.file(), record.line()) { - (Some(file), Some(line)) => format!( - "[{} {}:{}] {} {}\r", - record.level(), - file, - line, - chrono::Local::now().to_rfc3339(), - record.args() - ), - (_, _) => format!( - "[{}] {} {}\r", - record.level(), - chrono::Local::now().to_rfc3339(), - record.args() - ), - }; - - log_msg -} diff --git a/tests/rust-integration-tests/integration_test/src/main.rs b/tests/rust-integration-tests/integration_test/src/main.rs index 986a5a375..7571bad26 100644 --- a/tests/rust-integration-tests/integration_test/src/main.rs +++ b/tests/rust-integration-tests/integration_test/src/main.rs @@ -4,6 +4,7 @@ mod utils; use crate::tests::domainname::get_domainname_tests; use crate::tests::hooks::get_hooks_tests; use crate::tests::hostname::get_hostname_test; +use crate::tests::intel_rdt::get_intel_rdt_test; use crate::tests::lifecycle::{ContainerCreate, ContainerLifecycle}; use crate::tests::linux_ns_itype::get_ns_itype_tests; use crate::tests::mounts_recursive::get_mounts_recursive_test; @@ -72,7 +73,7 @@ fn main() -> Result<()> { let opts: Opts = Opts::parse(); if let Err(e) = logger::init(opts.debug) { - eprintln!("logger could not be initialized: {:?}", e); + eprintln!("logger could not be initialized: {e:?}"); } let mut tm = TestManager::new(); @@ -94,6 +95,7 @@ fn main() -> Result<()> { let hostname = get_hostname_test(); let mounts_recursive = get_mounts_recursive_test(); let domainname = get_domainname_tests(); + let intel_rdt = get_intel_rdt_test(); tm.add_test_group(Box::new(cl)); tm.add_test_group(Box::new(cc)); @@ -112,6 +114,7 @@ fn main() -> Result<()> { tm.add_test_group(Box::new(hostname)); tm.add_test_group(Box::new(mounts_recursive)); tm.add_test_group(Box::new(domainname)); + tm.add_test_group(Box::new(intel_rdt)); tm.add_cleanup(Box::new(cgroups::cleanup_v1)); tm.add_cleanup(Box::new(cgroups::cleanup_v2)); @@ -132,7 +135,7 @@ fn get_abs_path(rel_path: &Path) -> PathBuf { Err(_) => match which::which(rel_path) { Ok(path) => path, Err(e) => { - eprintln!("Error in finding path {:?} : {}\nexiting.", rel_path, e); + eprintln!("Error in finding path {rel_path:?} : {e}\nexiting."); std::process::exit(66); } }, @@ -158,7 +161,7 @@ fn run(opts: Run, test_manager: &TestManager) -> Result<()> { fn list(test_manager: &TestManager) -> Result<()> { for test_group in test_manager.tests_groups() { - println!("{}", test_group); + println!("{test_group}"); } Ok(()) diff --git a/tests/rust-integration-tests/integration_test/src/tests/cgroups/blkio.rs b/tests/rust-integration-tests/integration_test/src/tests/cgroups/blkio.rs index 7e8248117..dd095314f 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/cgroups/blkio.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/cgroups/blkio.rs @@ -77,25 +77,16 @@ fn supports_throttle_iops() -> bool { fn parse_device_data<'a>(device_type: &'static str, line: &'a str) -> Result<(i64, i64, &'a str)> { let (device_id, value) = line .split_once(' ') - .with_context(|| format!("invalid {} device format : found {}", device_type, line))?; + .with_context(|| format!("invalid {device_type} device format : found {line}"))?; let (major_str, minor_str) = device_id.split_once(':').with_context(|| { - format!( - "invalid major-minor number format for {} device : found {}", - device_type, device_id - ) + format!("invalid major-minor number format for {device_type} device : found {device_id}") })?; let major: i64 = major_str.parse().with_context(|| { - format!( - "Error in parsing {} device major number : found {}", - device_type, major_str - ) + format!("Error in parsing {device_type} device major number : found {major_str}") })?; let minor: i64 = minor_str.parse().with_context(|| { - format!( - "Error in parsing {} device minor number : found {}", - device_type, minor_str - ) + format!("Error in parsing {device_type} device minor number : found {minor_str}") })?; Ok((major, minor, value)) @@ -130,24 +121,18 @@ fn get_blkio_data(path: &Path) -> Result { // weight let weight_path = path.join("blkio.weight"); let weight_string = fs::read_to_string(&weight_path) - .with_context(|| format!("error in reading block io weight from {:?}", weight_path))?; - device.weight = weight_string.parse().with_context(|| { - format!("error in parsing block io weight : found {}", weight_string) - })?; + .with_context(|| format!("error in reading block io weight from {weight_path:?}"))?; + device.weight = weight_string + .parse() + .with_context(|| format!("error in parsing block io weight : found {weight_string}"))?; // leaf weight let leaf_weight_path = path.join("blkio.leaf_weight"); let leaf_weight_string = fs::read_to_string(&leaf_weight_path).with_context(|| { - format!( - "error in reading block io leaf weight from {:?}", - leaf_weight_path - ) + format!("error in reading block io leaf weight from {leaf_weight_path:?}") })?; device.leaf_weight = leaf_weight_string.parse().with_context(|| { - format!( - "error in parsing block io weight : found {}", - leaf_weight_string - ) + format!("error in parsing block io weight : found {leaf_weight_string}") })?; } @@ -157,10 +142,7 @@ fn get_blkio_data(path: &Path) -> Result { // device weight let device_weight_path = path.join("blkio.weight_device"); let device_weight_string = fs::read_to_string(&device_weight_path).with_context(|| { - format!( - "error in reading block io weight device from {:?}", - device_weight_path - ) + format!("error in reading block io weight device from {device_weight_path:?}") })?; let mut weight_devices = Vec::new(); // format is : @@ -170,10 +152,7 @@ fn get_blkio_data(path: &Path) -> Result { major, minor, weight: Some(weight_str.parse().with_context(|| { - format!( - "error in parsing weight of weight device, found {}", - weight_str - ) + format!("error in parsing weight of weight device, found {weight_str}") })?), leaf_weight: None, }); @@ -184,18 +163,14 @@ fn get_blkio_data(path: &Path) -> Result { let device_leaf_weight_string = fs::read_to_string(&device_leaf_weight_path).with_context(|| { format!( - "error in reading block io leaf weight device from {:?}", - device_leaf_weight_path + "error in reading block io leaf weight device from {device_leaf_weight_path:?}" ) })?; for line in device_leaf_weight_string.lines() { let (major, minor, weight_str) = parse_device_data("weight", line)?; let leaf_weight: u16 = weight_str.parse().with_context(|| { - format!( - "error in parsing leaf weight of weight device : found {}", - weight_str - ) + format!("error in parsing leaf weight of weight device : found {weight_str}") })?; let mut found = false; for dev in &mut weight_devices { @@ -226,8 +201,7 @@ fn get_blkio_data(path: &Path) -> Result { let throttle_read_bps_string = fs::read_to_string(&throttle_read_bps_path).with_context(|| { format!( - "error in reading block io read bps device from {:?}", - throttle_read_bps_path + "error in reading block io read bps device from {throttle_read_bps_path:?}" ) })?; let mut throttle_devices = Vec::new(); @@ -237,10 +211,7 @@ fn get_blkio_data(path: &Path) -> Result { major, minor, rate: rate_str.parse().with_context(|| { - format!( - "error in parsing throttle read bps rate : found {}", - rate_str - ) + format!("error in parsing throttle read bps rate : found {rate_str}") })?, }); } @@ -251,8 +222,7 @@ fn get_blkio_data(path: &Path) -> Result { let throttle_write_bps_string = fs::read_to_string(&throttle_write_bps_path).with_context(|| { format!( - "error in reading block io write bps device from {:?}", - throttle_write_bps_path + "error in reading block io write bps device from {throttle_write_bps_path:?}" ) })?; let mut throttle_devices = Vec::new(); @@ -262,10 +232,7 @@ fn get_blkio_data(path: &Path) -> Result { major, minor, rate: rate_str.parse().with_context(|| { - format!( - "error in parsing throttle write bps rate : found {}", - rate_str - ) + format!("error in parsing throttle write bps rate : found {rate_str}") })?, }); } @@ -279,8 +246,7 @@ fn get_blkio_data(path: &Path) -> Result { let throttle_read_iops_string = fs::read_to_string(&throttle_read_iops_path).with_context(|| { format!( - "error in reading block io read iops device from {:?}", - throttle_read_iops_path + "error in reading block io read iops device from {throttle_read_iops_path:?}" ) })?; let mut throttle_devices = Vec::new(); @@ -290,10 +256,7 @@ fn get_blkio_data(path: &Path) -> Result { major, minor, rate: rate_str.parse().with_context(|| { - format!( - "error in parsing throttle read iops rate : found {}", - rate_str - ) + format!("error in parsing throttle read iops rate : found {rate_str}") })?, }); } @@ -304,8 +267,7 @@ fn get_blkio_data(path: &Path) -> Result { let throttle_write_iops_string = fs::read_to_string(&throttle_write_iops_path) .with_context(|| { format!( - "error in reading block io write iops device from {:?}", - throttle_write_iops_path + "error in reading block io write iops device from {throttle_write_iops_path:?}" ) })?; let mut throttle_devices = Vec::new(); @@ -315,10 +277,7 @@ fn get_blkio_data(path: &Path) -> Result { major, minor, rate: rate_str.parse().with_context(|| { - format!( - "error in parsing throttle write iops rate : found {}", - rate_str - ) + format!("error in parsing throttle write iops rate : found {rate_str}") })?, }); } diff --git a/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v1.rs b/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v1.rs index 5bf4d708c..724121dad 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v1.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v1.rs @@ -35,7 +35,7 @@ fn get_realtime_runtime() -> Option { fn test_cpu_cgroups() -> TestResult { let cgroup_name = "test_cpu_cgroups"; // Kernel counts 0 as a CPU, so on a system with 8 logical cores you will need `0-7` range set. - let cpu_range = format!("0-{}", num_cpus::get() - 1); + let cpu_range = format!("0-{}", num_cpus::get().saturating_sub(1)); let realtime_period = get_realtime_period(); let realtime_runtime = get_realtime_runtime(); diff --git a/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v2.rs b/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v2.rs index 0dff09acc..0ecc5e4e4 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v2.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/cgroups/cpu/v2.rs @@ -16,9 +16,9 @@ use libcgroups::{ v2::controller_type::ControllerType, }; use libcontainer::utils::PathBufExt; -use log::debug; use oci_spec::runtime::{LinuxCpuBuilder, Spec}; use test_framework::{assert_result_eq, test_result, ConditionalTest, TestGroup, TestResult}; +use tracing::debug; use super::create_spec; @@ -143,7 +143,7 @@ fn test_cpu_quota_valid_set() -> TestResult { }) } -/// Tests if the cpu quota is the defalt value (max) if a cpu quota of zero has been specified +/// Tests if the cpu quota is the default value (max) if a cpu quota of zero has been specified fn test_cpu_quota_zero_default_set() -> TestResult { let cpu_quota = 0; let cpu = test_result!(LinuxCpuBuilder::default() @@ -266,7 +266,7 @@ fn check_cpu_weight(cgroup_name: &str, expected_weight: u64) -> Result<()> { let actual_weight = data .parse::() - .with_context(|| format!("failed to parse {:?}", data))?; + .with_context(|| format!("failed to parse {data:?}"))?; assert_result_eq!(actual_weight, expected_weight, "unexpected cpu weight") } @@ -274,7 +274,7 @@ fn check_cpu_idle(cgroup_name: &str, expected_value: i64) -> Result<()> { let data = read_cgroup_data(cgroup_name, "cpu.idle")?; assert_result_eq!( data.parse::() - .with_context(|| format!("failed to parse {:?}", data))?, + .with_context(|| format!("failed to parse {data:?}"))?, expected_value ) } @@ -300,14 +300,14 @@ fn check_cpu_max(cgroup_name: &str, expected_quota: i64, expected_period: u64) - } else { let actual_quota = quota .parse::() - .with_context(|| format!("failed to parse {:?}", quota))?; + .with_context(|| format!("failed to parse {quota:?}"))?; assert_result_eq!(expected_quota, actual_quota, "unexpected cpu quota")?; } let period = parts[1].trim(); let actual_period = period .parse::() - .with_context(|| format!("failed to parse {:?}", period))?; + .with_context(|| format!("failed to parse {period:?}"))?; assert_result_eq!(expected_period, actual_period, "unexpected cpu period") } @@ -317,9 +317,9 @@ fn read_cgroup_data(cgroup_name: &str, cgroup_file: &str) -> Result { .join(cgroup_name) .join(cgroup_file); - log::debug!("reading value from {:?}", cgroup_path); + debug!("reading value from {:?}", cgroup_path); let content = fs::read_to_string(&cgroup_path) - .with_context(|| format!("failed to read {:?}", cgroup_path))?; + .with_context(|| format!("failed to read {cgroup_path:?}"))?; let trimmed = content.trim(); Ok(trimmed.to_owned()) } @@ -337,12 +337,12 @@ fn prepare_cpu_max(spec: &Spec, quota: &str, period: &str) -> Result<()> { let full_cgroup_path = PathBuf::from(common::DEFAULT_CGROUP_ROOT).join_safely(cgroups_path)?; fs::create_dir_all(&full_cgroup_path) - .with_context(|| format!("could not create cgroup {:?}", full_cgroup_path))?; + .with_context(|| format!("could not create cgroup {full_cgroup_path:?}"))?; attach_controller(Path::new(DEFAULT_CGROUP_ROOT), cgroups_path, "cpu")?; let cpu_max_path = full_cgroup_path.join("cpu.max"); - fs::write(&cpu_max_path, format!("{} {}", quota, period)) - .with_context(|| format!("failed to write to {:?}", cpu_max_path))?; + fs::write(&cpu_max_path, format!("{quota} {period}")) + .with_context(|| format!("failed to write to {cpu_max_path:?}"))?; Ok(()) } diff --git a/tests/rust-integration-tests/integration_test/src/tests/cgroups/mod.rs b/tests/rust-integration-tests/integration_test/src/tests/cgroups/mod.rs index bf906001a..035c23613 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/cgroups/mod.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/cgroups/mod.rs @@ -14,7 +14,7 @@ pub fn cleanup_v1() -> Result<()> { let runtime_test = subsystem.join("runtime-test"); if runtime_test.exists() { fs::remove_dir(&runtime_test) - .with_context(|| format!("failed to delete {:?}", runtime_test))?; + .with_context(|| format!("failed to delete {runtime_test:?}"))?; } } @@ -25,7 +25,7 @@ pub fn cleanup_v2() -> Result<()> { let runtime_test = Path::new("/sys/fs/cgroup/runtime-test"); if runtime_test.exists() { let _: Result, _> = fs::read_dir(runtime_test) - .with_context(|| format!("failed to read {:?}", runtime_test))? + .with_context(|| format!("failed to read {runtime_test:?}"))? .filter_map(|e| e.ok()) .map(|e| e.path()) .filter(|e| e.is_dir()) @@ -33,7 +33,7 @@ pub fn cleanup_v2() -> Result<()> { .collect(); fs::remove_dir(runtime_test) - .with_context(|| format!("failed to delete {:?}", runtime_test))?; + .with_context(|| format!("failed to delete {runtime_test:?}"))?; } Ok(()) @@ -60,7 +60,6 @@ pub fn attach_controller(cgroup_root: &Path, cgroup_path: &Path, controller: &st let mut components = cgroup_path .components() - .into_iter() .filter(|c| c.ne(&RootDir)) .peekable(); @@ -77,10 +76,6 @@ pub fn attach_controller(cgroup_root: &Path, cgroup_path: &Path, controller: &st fn write_controller(cgroup_path: &Path, controller: &str) -> Result<()> { let controller_file = cgroup_path.join("cgroup.subtree_control"); - fs::write(controller_file, format!("+{}", controller)).with_context(|| { - format!( - "failed to attach {} controller to {:?}", - controller, cgroup_path - ) - }) + fs::write(controller_file, format!("+{controller}")) + .with_context(|| format!("failed to attach {controller} controller to {cgroup_path:?}")) } diff --git a/tests/rust-integration-tests/integration_test/src/tests/cgroups/pids.rs b/tests/rust-integration-tests/integration_test/src/tests/cgroups/pids.rs index 8fac789d7..815127076 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/cgroups/pids.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/cgroups/pids.rs @@ -85,7 +85,7 @@ fn check_pid_limit_set(cgroup_name: &str, expected: i64) -> Result<()> { .join(cgroup_name) .join("pids.max"); let content = fs::read_to_string(&cgroup_path) - .with_context(|| format!("failed to read {:?}", cgroup_path))?; + .with_context(|| format!("failed to read {cgroup_path:?}"))?; let trimmed = content.trim(); if trimmed.is_empty() { @@ -106,7 +106,7 @@ fn check_pid_limit_set(cgroup_name: &str, expected: i64) -> Result<()> { let actual: i64 = trimmed .parse() - .with_context(|| format!("could not parse {:?}", trimmed))?; + .with_context(|| format!("could not parse {trimmed:?}"))?; if expected != actual { bail!( "expected {:?} to contain a pid limit of {}, but the limit was {}", @@ -125,7 +125,7 @@ fn check_pids_are_unlimited(cgroup_name: &str) -> Result<()> { .join(cgroup_name) .join("pids.max"); let content = fs::read_to_string(&cgroup_path) - .with_context(|| format!("failed to read {:?}", cgroup_path))?; + .with_context(|| format!("failed to read {cgroup_path:?}"))?; let trimmed = content.trim(); if trimmed.is_empty() { diff --git a/tests/rust-integration-tests/integration_test/src/tests/hooks/invoke.rs b/tests/rust-integration-tests/integration_test/src/tests/hooks/invoke.rs index fbb22299f..e47ad2249 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/hooks/invoke.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/hooks/invoke.rs @@ -26,7 +26,7 @@ fn write_log_hook(content: &str) -> Hook { .args(vec![ "sh".to_string(), "-c".to_string(), - format!("echo '{}' >> {}", content, output,), + format!("echo '{content}' >> {output}",), ]) .build() .expect("could not build hook") @@ -69,7 +69,7 @@ fn get_test(test_name: &'static str) -> Test { let spec = get_spec(); let id = generate_uuid(); let id_str = id.to_string(); - let bundle = prepare_bundle(&id).unwrap(); + let bundle = prepare_bundle().unwrap(); set_config(&bundle, &spec).unwrap(); create_container(&id_str, &bundle).unwrap().wait().unwrap(); start_container(&id_str, &bundle).unwrap().wait().unwrap(); @@ -85,7 +85,7 @@ fn get_test(test_name: &'static str) -> Test { delete_hook_output_file(); if log != "pre-start1 called\npre-start2 called\npost-start1 called\npost-start2 called\npost-stop1 called\npost-stop2 called\n" { return TestResult::Failed(anyhow!( - "error : hooks must be called in the listed order" + "error : hooks must be called in the listed order, {log:?}" )); } TestResult::Passed diff --git a/tests/rust-integration-tests/integration_test/src/tests/intel_rdt/intel_rdt_test.rs b/tests/rust-integration-tests/integration_test/src/tests/intel_rdt/intel_rdt_test.rs new file mode 100644 index 000000000..9e996dbfe --- /dev/null +++ b/tests/rust-integration-tests/integration_test/src/tests/intel_rdt/intel_rdt_test.rs @@ -0,0 +1,60 @@ +use anyhow::{Context, Result}; +use libcontainer::process::intel_rdt::find_resctrl_mount_point; + +use oci_spec::runtime::{LinuxBuilder, LinuxIntelRdt, Spec, SpecBuilder}; +use test_framework::{test_result, TestResult}; + +use crate::utils::{test_outside_container, test_utils::check_container_created}; + +fn create_spec( + maybe_l3_cache: Option<&str>, + maybe_mem_bw: Option<&str>, + maybe_clos_id: Option<&str>, +) -> Result { + let mut intel_rdt = LinuxIntelRdt::default(); + intel_rdt.set_l3_cache_schema(maybe_l3_cache.map(|x| x.to_owned())); + intel_rdt.set_mem_bw_schema(maybe_mem_bw.map(|x| x.to_owned())); + intel_rdt.set_clos_id(maybe_clos_id.map(|x| x.to_owned())); + + // Create the Linux Spec + let linux_spec = LinuxBuilder::default() + .intel_rdt(intel_rdt) + .build() + .context("failed to build linux spec")?; + + // Create the top level Spec + let spec = SpecBuilder::default() + .linux(linux_spec) + .build() + .context("failed to build spec")?; + + Ok(spec) +} + +pub fn test_intel_rdt() -> TestResult { + let cases = vec![ + test_result!(create_spec(Some("L3:0=fff"), Some("MB:0=70"), None)), + test_result!(create_spec(Some("L3:0=fff"), None, None)), + test_result!(create_spec(None, Some("MB:0=70"), None)), + test_result!(create_spec(None, None, None)), + ]; + + for spec in cases.into_iter() { + let test_result = test_outside_container(spec, &|data| { + test_result!(check_container_created(&data)); + + TestResult::Passed + }); + if let TestResult::Failed(_) = test_result { + return test_result; + } + } + + TestResult::Passed +} + +pub fn can_run() -> bool { + // Ensure the resctrl pseudo-filesystem is mounted. + let res = find_resctrl_mount_point(); + res.is_ok() +} diff --git a/tests/rust-integration-tests/integration_test/src/tests/intel_rdt/mod.rs b/tests/rust-integration-tests/integration_test/src/tests/intel_rdt/mod.rs new file mode 100644 index 000000000..9ba805b56 --- /dev/null +++ b/tests/rust-integration-tests/integration_test/src/tests/intel_rdt/mod.rs @@ -0,0 +1,14 @@ +use test_framework::{ConditionalTest, TestGroup}; + +use self::intel_rdt_test::{can_run, test_intel_rdt}; + +mod intel_rdt_test; + +pub fn get_intel_rdt_test() -> TestGroup { + let mut test_group = TestGroup::new("intel_rdt"); + let intel_rdt = ConditionalTest::new("intel_rdt", Box::new(can_run), Box::new(test_intel_rdt)); + + test_group.add(vec![Box::new(intel_rdt)]); + + test_group +} diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/checkpoint.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/checkpoint.rs index 57ba7b703..fc3ddd087 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/checkpoint.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/checkpoint.rs @@ -1,7 +1,6 @@ use super::get_result_from_output; use crate::utils::get_runtime_path; use crate::utils::test_utils::State; -use crate::utils::{create_temp_dir, generate_uuid}; use anyhow::anyhow; use std::path::Path; use std::process::{Command, Stdio}; @@ -62,7 +61,7 @@ fn setup_network_namespace(project_path: &Path, id: &str) -> Result<(), TestResu .stdout(Stdio::piped()) .stderr(Stdio::piped()) .arg("-t") - .arg(format!("{}", pid)) + .arg(format!("{pid}")) .arg("-a") .args(vec!["/bin/ip", "link", "set", "up", "dev", "lo"]) .spawn() @@ -88,7 +87,7 @@ fn checkpoint( return e; } - let temp_dir = match create_temp_dir(&generate_uuid()) { + let temp_dir = match tempfile::tempdir() { Ok(td) => td, Err(e) => { return TestResult::Failed(anyhow::anyhow!( @@ -131,9 +130,8 @@ fn checkpoint( .expect("failed to execute checkpoint command") .wait_with_output(); - let result = get_result_from_output(checkpoint); - if let TestResult::Failed(_) = result { - return result; + if let Err(e) = get_result_from_output(checkpoint) { + return TestResult::Failed(anyhow::anyhow!("failed to execute checkpoint command: {e}")); } // Check for complete checkpoint diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_create.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_create.rs index 53afbbb58..6b4eacb10 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_create.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_create.rs @@ -1,6 +1,6 @@ use super::{create, delete, kill}; -use crate::utils::TempDir; use crate::utils::{generate_uuid, prepare_bundle}; +use tempfile::TempDir; use test_framework::{TestResult, TestableGroup}; pub struct ContainerCreate { @@ -17,7 +17,7 @@ impl Default for ContainerCreate { impl ContainerCreate { pub fn new() -> Self { let id = generate_uuid(); - let temp_dir = prepare_bundle(&id).unwrap(); + let temp_dir = prepare_bundle().unwrap(); ContainerCreate { project_path: temp_dir, container_id: id.to_string(), @@ -26,39 +26,52 @@ impl ContainerCreate { // runtime should not create container with empty id fn create_empty_id(&self) -> TestResult { - let temp = create::create(&self.project_path, ""); - match temp { - TestResult::Passed => TestResult::Failed(anyhow::anyhow!( - "Container should not have been created with empty id, but was created." + match create::create(self.project_path.path(), "") { + Ok(()) => TestResult::Failed(anyhow::anyhow!( + "container should not have been created with empty id, but was created." )), - TestResult::Failed(_) => TestResult::Passed, - TestResult::Skipped => TestResult::Skipped, + Err(_) => TestResult::Passed, } } // runtime should create container with valid id fn create_valid_id(&self) -> TestResult { - let temp = create::create(&self.project_path, &self.container_id); - if let TestResult::Passed = temp { - kill::kill(&self.project_path, &self.container_id); - delete::delete(&self.project_path, &self.container_id); + match create::create(self.project_path.path(), &self.container_id) { + Ok(_) => { + let _ = kill::kill(self.project_path.path(), &self.container_id); + let _ = delete::delete(self.project_path.path(), &self.container_id); + TestResult::Passed + } + Err(err) => { + TestResult::Failed(err.context( + "container should have been created with valid id, but was not created.", + )) + } } - temp } // runtime should not create container with is that already exists fn create_duplicate_id(&self) -> TestResult { let id = generate_uuid().to_string(); - let _ = create::create(&self.project_path, &id); - let temp = create::create(&self.project_path, &id); - kill::kill(&self.project_path, &id); - delete::delete(&self.project_path, &id); - match temp { - TestResult::Passed => TestResult::Failed(anyhow::anyhow!( - "Container should not have been created with same id, but was created." + // First create which should be successful + if let Err(err) = create::create(self.project_path.path(), &id) { + return TestResult::Failed( + err.context( + "container should have been created with valid id, but was not created", + ), + ); + } + // Second create which should fail + let ret = create::create(self.project_path.path(), &id); + // Clean up the container from the first create. No error handling since + // there is nothing we can do. + let _ = kill::kill(self.project_path.path(), &id); + let _ = delete::delete(self.project_path.path(), &id); + match ret { + Ok(()) => TestResult::Failed(anyhow::anyhow!( + "container should not have been created with same id, but was created." )), - TestResult::Failed(_) => TestResult::Passed, - TestResult::Skipped => TestResult::Skipped, + Err(_) => TestResult::Passed, } } } @@ -83,7 +96,7 @@ impl TestableGroup for ContainerCreate { "empty_id" => ret.push(("empty_id", self.create_empty_id())), "valid_id" => ret.push(("valid_id", self.create_valid_id())), "duplicate_id" => ret.push(("duplicate_id", self.create_duplicate_id())), - _ => eprintln!("No test named {} in lifecycle", name), + _ => eprintln!("No test named {name} in lifecycle"), }; } ret diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_lifecycle.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_lifecycle.rs index f8c8e88fa..d22b0af34 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_lifecycle.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/container_lifecycle.rs @@ -1,4 +1,4 @@ -use crate::utils::{generate_uuid, prepare_bundle, TempDir}; +use crate::utils::{generate_uuid, prepare_bundle}; use std::thread::sleep; use std::time::Duration; use test_framework::{TestResult, TestableGroup}; @@ -11,7 +11,7 @@ use super::{checkpoint, create, delete, exec, kill, start, state}; const SLEEP_TIME: Duration = Duration::from_millis(75); pub struct ContainerLifecycle { - project_path: TempDir, + project_path: tempfile::TempDir, container_id: String, } @@ -24,7 +24,7 @@ impl Default for ContainerLifecycle { impl ContainerLifecycle { pub fn new() -> Self { let id = generate_uuid(); - let bundle_dir = prepare_bundle(&id).unwrap(); + let bundle_dir = prepare_bundle().unwrap(); ContainerLifecycle { project_path: bundle_dir, container_id: id.to_string(), @@ -32,40 +32,49 @@ impl ContainerLifecycle { } pub fn create(&self) -> TestResult { - create::create(&self.project_path, &self.container_id) + create::create(self.project_path.path(), &self.container_id).into() } #[allow(dead_code)] pub fn exec(&self, cmd: Vec<&str>, expected_output: Option<&str>) -> TestResult { - exec::exec(&self.project_path, &self.container_id, cmd, expected_output) + exec::exec( + self.project_path.path(), + &self.container_id, + cmd, + expected_output, + ) + .into() } pub fn start(&self) -> TestResult { - start::start(&self.project_path, &self.container_id) + start::start(self.project_path.path(), &self.container_id).into() } pub fn state(&self) -> TestResult { - state::state(&self.project_path, &self.container_id) + state::state(self.project_path.path(), &self.container_id).into() } pub fn kill(&self) -> TestResult { - let ret = kill::kill(&self.project_path, &self.container_id); + let ret = kill::kill(self.project_path.path(), &self.container_id); // sleep a little, so the youki process actually gets the signal and shuts down // otherwise, the tester moves on to next tests before the youki has gotten signal, and delete test can fail sleep(SLEEP_TIME); - ret + ret.into() } pub fn delete(&self) -> TestResult { - delete::delete(&self.project_path, &self.container_id) + delete::delete(self.project_path.path(), &self.container_id).into() } pub fn checkpoint_leave_running(&self) -> TestResult { - checkpoint::checkpoint_leave_running(&self.project_path, &self.container_id) + checkpoint::checkpoint_leave_running(self.project_path.path(), &self.container_id) } pub fn checkpoint_leave_running_work_path_tmp(&self) -> TestResult { - checkpoint::checkpoint_leave_running_work_path_tmp(&self.project_path, &self.container_id) + checkpoint::checkpoint_leave_running_work_path_tmp( + self.project_path.path(), + &self.container_id, + ) } } @@ -110,7 +119,7 @@ impl TestableGroup for ContainerLifecycle { "kill" => ret.push(("kill", self.kill())), "state" => ret.push(("state", self.state())), "delete" => ret.push(("delete", self.delete())), - _ => eprintln!("No test named {} in lifecycle", name), + _ => eprintln!("No test named {name} in lifecycle"), }; } ret diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/create.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/create.rs index c690ff7f8..b28d7fd9e 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/create.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/create.rs @@ -1,14 +1,13 @@ use crate::utils::get_runtime_path; +use anyhow::{bail, Result}; use std::io; use std::path::Path; use std::process::{Command, Stdio}; -use test_framework::TestResult; -// There are still some issues here -// in case we put stdout and stderr as piped -// the youki process created halts indefinitely -// which is why we pass null, and use wait instead of wait_with_output -pub fn create(project_path: &Path, id: &str) -> TestResult { +// There are still some issues here in case we put stdout and stderr as piped +// the youki process created halts indefinitely which is why we pass null, and +// use wait instead of wait_with_output +pub fn create(project_path: &Path, id: &str) -> Result<()> { let res = Command::new(get_runtime_path()) .stdin(Stdio::null()) .stdout(Stdio::null()) @@ -25,14 +24,11 @@ pub fn create(project_path: &Path, id: &str) -> TestResult { match res { io::Result::Ok(status) => { if status.success() { - TestResult::Passed + Ok(()) } else { - TestResult::Failed(anyhow::anyhow!( - "Error : create exited with nonzero status : {}", - status - )) + bail!("create exited with nonzero status : {}", status) } } - io::Result::Err(e) => TestResult::Failed(anyhow::Error::new(e)), + io::Result::Err(e) => bail!("create failed : {}", e), } } diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/delete.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/delete.rs index 31c7dcce2..d8c2bd590 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/delete.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/delete.rs @@ -1,9 +1,9 @@ use super::get_result_from_output; use crate::utils::delete_container; +use anyhow::Result; use std::path::Path; -use test_framework::TestResult; -pub fn delete(project_path: &Path, id: &str) -> TestResult { +pub fn delete(project_path: &Path, id: &str) -> Result<()> { let res = delete_container(id, project_path) .expect("failed to execute delete command") .wait_with_output(); diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/exec.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/exec.rs index d604c5a4f..af93ff0c7 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/exec.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/exec.rs @@ -1,15 +1,16 @@ use super::get_result_from_output; use crate::utils::get_runtime_path; +use anyhow::Result; use std::path::Path; use std::process::{Command, Stdio}; -use test_framework::{assert_result_eq, TestResult}; +use test_framework::assert_result_eq; pub fn exec( project_path: &Path, id: &str, exec_cmd: Vec<&str>, expected_output: Option<&str>, -) -> TestResult { +) -> Result<()> { let res = Command::new(get_runtime_path()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/kill.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/kill.rs index 6b930e595..2f971f9f1 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/kill.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/kill.rs @@ -1,9 +1,10 @@ +use anyhow::Result; + use super::get_result_from_output; use crate::utils::kill_container; use std::path::Path; -use test_framework::TestResult; -pub fn kill(project_path: &Path, id: &str) -> TestResult { +pub fn kill(project_path: &Path, id: &str) -> Result<()> { let res = kill_container(id, project_path) .expect("failed to execute kill command") .wait_with_output(); diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/start.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/start.rs index 5b92de228..68c2729be 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/start.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/start.rs @@ -1,9 +1,9 @@ use super::get_result_from_output; use crate::utils::test_utils::start_container; +use anyhow::Result; use std::path::Path; -use test_framework::TestResult; -pub fn start(project_path: &Path, id: &str) -> TestResult { +pub fn start(project_path: &Path, id: &str) -> Result<()> { let res = start_container(id, project_path) .expect("failed to execute start command") .wait_with_output(); diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/state.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/state.rs index 57318ec11..6d0bbf8b3 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/state.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/state.rs @@ -1,24 +1,23 @@ use crate::utils::get_state; -use anyhow::{anyhow, Result}; +use anyhow::{bail, Result}; use std::path::Path; -use test_framework::TestResult; -pub fn state(project_path: &Path, id: &str) -> TestResult { +pub fn state(project_path: &Path, id: &str) -> Result<()> { match get_state(id, project_path) { - Result::Ok((stdout, stderr)) => { + Ok((stdout, stderr)) => { if stderr.contains("Error") || stderr.contains("error") { - TestResult::Failed(anyhow!("Error :\nstdout : {}\nstderr : {}", stdout, stderr)) + bail!("Error :\nstdout : {}\nstderr : {}", stdout, stderr) } else { // confirm that the status is stopped, as this is executed after the kill command - if !(stdout.contains(&format!(r#""id": "{}""#, id)) + if !(stdout.contains(&format!(r#""id": "{id}""#)) && stdout.contains(r#""status": "stopped""#)) { - TestResult::Failed(anyhow!("Expected state stopped, got : {}", stdout)) + bail!("Expected state stopped, got : {}", stdout) } else { - TestResult::Passed + Ok(()) } } } - Result::Err(e) => TestResult::Failed(e.context("failed to get container state")), + Err(e) => Err(e.context("failed to get container state")), } } diff --git a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/util.rs b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/util.rs index 2c2fc1d9b..81d59e97c 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/lifecycle/util.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/lifecycle/util.rs @@ -1,21 +1,17 @@ +use anyhow::{bail, Result}; use std::{io, process}; -use test_framework::TestResult; -pub fn get_result_from_output(res: io::Result) -> TestResult { +pub fn get_result_from_output(res: io::Result) -> Result<()> { match res { io::Result::Ok(output) => { let stderr = String::from_utf8(output.stderr).unwrap(); if stderr.contains("Error") || stderr.contains("error") { let stdout = String::from_utf8(output.stdout).unwrap(); - TestResult::Failed(anyhow::anyhow!( - "Error :\nstdout : {}\nstderr : {}", - stdout, - stderr - )) + bail!("Error :\nstdout : {}\nstderr : {}", stdout, stderr) } else { - TestResult::Passed + Ok(()) } } - io::Result::Err(e) => TestResult::Failed(anyhow::Error::new(e)), + io::Result::Err(e) => Err(anyhow::Error::new(e)), } } diff --git a/tests/rust-integration-tests/integration_test/src/tests/mod.rs b/tests/rust-integration-tests/integration_test/src/tests/mod.rs index d04127cba..017070a25 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/mod.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/mod.rs @@ -2,6 +2,7 @@ pub mod cgroups; pub mod domainname; pub mod hooks; pub mod hostname; +pub mod intel_rdt; pub mod lifecycle; pub mod linux_ns_itype; pub mod mounts_recursive; diff --git a/tests/rust-integration-tests/integration_test/src/tests/mounts_recursive/mod.rs b/tests/rust-integration-tests/integration_test/src/tests/mounts_recursive/mod.rs index ca7abf4ac..107ecab87 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/mounts_recursive/mod.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/mounts_recursive/mod.rs @@ -1,4 +1,6 @@ use crate::utils::test_inside_container; +use anyhow::Context; +use nix::libc; use nix::mount::{mount, umount, MsFlags}; use nix::sys::stat::Mode; use nix::unistd::{chown, Uid}; @@ -8,6 +10,8 @@ use oci_spec::runtime::{ }; use std::collections::hash_set::HashSet; use std::fs; +use std::fs::File; +use std::os::unix::fs::symlink; use std::os::unix::prelude::PermissionsExt; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -97,10 +101,10 @@ fn check_recursive_readonly() -> TestResult { let mount_options = vec!["rbind".to_string(), "rro".to_string()]; let mut mount_spec = Mount::default(); mount_spec - .set_destination(mount_dest_path.clone()) + .set_destination(mount_dest_path) .set_typ(None) .set_source(Some(rro_dir_path.clone())) - .set_options(Some(mount_options.clone())); + .set_options(Some(mount_options)); let spec = get_spec( vec![mount_spec], vec!["runtimetest".to_string(), "mounts_recursive".to_string()], @@ -129,7 +133,7 @@ fn check_recursive_nosuid() -> TestResult { .set_destination(mount_dest_path.clone()) .set_typ(None) .set_source(Some(rnosuid_dir_path.clone())) - .set_options(Some(mount_options.clone())); + .set_options(Some(mount_options)); let spec = get_spec( vec![mount_spec], vec![ @@ -202,6 +206,36 @@ fn check_recursive_nosuid() -> TestResult { result } +fn check_recursive_rsuid() -> TestResult { + let rsuid_dir_path = PathBuf::from_str("/tmp/rsuid_dir").unwrap(); + let mount_dest_path = PathBuf::from_str("/mnt/rsuid_dir").unwrap(); + fs::create_dir_all(rsuid_dir_path.clone()).unwrap(); + scopeguard::defer!(fs::remove_dir_all(rsuid_dir_path.clone()).unwrap()); + + let mount_options = vec!["rbind".to_string(), "rsuid".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rsuid_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + test_inside_container(spec, &|_| { + let original_file_path = rsuid_dir_path.join("file"); + let file = File::create(original_file_path)?; + let mut permission = file.metadata()?.permissions(); + // chmod +s /tmp/rsuid_dir/file && chmod +g /tmp/rsuid_dir/file + permission.set_mode(permission.mode() | libc::S_ISUID | libc::S_ISGID); + file.set_permissions(permission) + .with_context(|| "failed to set permission")?; + + Ok(()) + }) +} + fn check_recursive_noexec() -> TestResult { let rnoexec_test_base_dir = PathBuf::from_str("/tmp").unwrap(); let rnoexec_dir_path = rnoexec_test_base_dir.join("rnoexec_dir"); @@ -211,10 +245,10 @@ fn check_recursive_noexec() -> TestResult { let mount_options = vec!["rbind".to_string(), "rnoexec".to_string()]; let mut mount_spec = Mount::default(); mount_spec - .set_destination(mount_dest_path.clone()) + .set_destination(mount_dest_path) .set_typ(None) .set_source(Some(rnoexec_dir_path.clone())) - .set_options(Some(mount_options.clone())); + .set_options(Some(mount_options)); let spec = get_spec( vec![mount_spec], vec!["runtimetest".to_string(), "mounts_recursive".to_string()], @@ -229,10 +263,51 @@ fn check_recursive_noexec() -> TestResult { let in_container_executable_subdir_file_path = rnoexec_subdir_path.join(executable_file_name); - fs::copy(&executable_file_path, &in_container_executable_file_path)?; + fs::copy(&executable_file_path, in_container_executable_file_path)?; fs::copy( &executable_file_path, - &in_container_executable_subdir_file_path, + in_container_executable_subdir_file_path, + )?; + + Ok(()) + }); + + clean_mount(&rnoexec_dir_path, &rnoexec_subdir_path); + + result +} + +fn check_recursive_rexec() -> TestResult { + let rnoexec_test_base_dir = PathBuf::from_str("/tmp").unwrap(); + let rnoexec_dir_path = rnoexec_test_base_dir.join("rexec_dir"); + let rnoexec_subdir_path = rnoexec_dir_path.join("rexec_subdir"); + let mount_dest_path = PathBuf::from_str("/mnt").unwrap(); + + let mount_options = vec!["rbind".to_string(), "rexec".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rnoexec_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + let result = test_inside_container(spec, &|bundle_path| { + setup_mount(&rnoexec_dir_path, &rnoexec_subdir_path); + + let executable_file_name = "echo"; + let executable_file_path = bundle_path.join("bin").join(executable_file_name); + let in_container_executable_file_path = rnoexec_dir_path.join(executable_file_name); + let in_container_executable_subdir_file_path = + rnoexec_subdir_path.join(executable_file_name); + + fs::copy(&executable_file_path, in_container_executable_file_path)?; + fs::copy( + &executable_file_path, + in_container_executable_subdir_file_path, )?; Ok(()) @@ -243,16 +318,332 @@ fn check_recursive_noexec() -> TestResult { result } +/// rdiratime If set in attr_clr, removes the restriction that prevented updating access time for directories. +fn check_recursive_rdiratime() -> TestResult { + let rdiratime_base_dir = PathBuf::from_str("/tmp/rdiratime").unwrap(); + let mount_dest_path = PathBuf::from_str("/rdiratime").unwrap(); + fs::create_dir(rdiratime_base_dir.clone()).unwrap(); + + let mount_options = vec!["rbind".to_string(), "rdiratime".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rdiratime_base_dir.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + let result = test_inside_container(spec, &|_| Ok(())); + + fs::remove_dir(rdiratime_base_dir).unwrap(); + result +} + +/// If set in attr_set, prevents updating access time for directories on this mount +fn check_recursive_rnodiratime() -> TestResult { + let rnodiratime_base_dir = PathBuf::from_str("/tmp/rnodiratime").unwrap(); + let mount_dest_path = PathBuf::from_str("/rnodiratime").unwrap(); + fs::create_dir(rnodiratime_base_dir.clone()).unwrap(); + + let mount_options = vec!["rbind".to_string(), "rnodiratime".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rnodiratime_base_dir.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + let result = test_inside_container(spec, &|_| Ok(())); + fs::remove_dir(rnodiratime_base_dir).unwrap(); + result +} + +fn check_recursive_rdev() -> TestResult { + let rdev_base_dir = PathBuf::from_str("/dev").unwrap(); + let mount_dest_path = PathBuf::from_str("/rdev").unwrap(); + + let mount_options = vec!["rbind".to_string(), "rdev".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rdev_base_dir)) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + test_inside_container(spec, &|_| Ok(())) +} + +fn check_recursive_rnodev() -> TestResult { + let rnodev_base_dir = PathBuf::from_str("/dev").unwrap(); + let mount_dest_path = PathBuf::from_str("/rnodev").unwrap(); + + let mount_options = vec!["rbind".to_string(), "rnodev".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rnodev_base_dir)) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + test_inside_container(spec, &|_| Ok(())) +} + +fn check_recursive_readwrite() -> TestResult { + let rrw_test_base_dir = PathBuf::from_str("/tmp").unwrap(); + let rrw_dir_path = rrw_test_base_dir.join("rrw_dir"); + let rrw_subdir_path = rrw_dir_path.join("rrw_subdir"); + let mount_dest_path = PathBuf::from_str("/rrw").unwrap(); + + let mount_options = vec!["rbind".to_string(), "rrw".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rrw_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + let result = test_inside_container(spec, &|_| { + setup_mount(&rrw_dir_path, &rrw_subdir_path); + Ok(()) + }); + + clean_mount(&rrw_dir_path, &rrw_subdir_path); + + result +} + +fn check_recursive_rrelatime() -> TestResult { + let rrelatime_base_dir = PathBuf::from_str("/tmp").unwrap(); + let rrelatime_dir_path = rrelatime_base_dir.join("rrelatime_dir"); + let rrelatime_suddir_path = rrelatime_dir_path.join("rrelatime_subdir"); + let mount_dest_path = PathBuf::from_str("/rrelatime").unwrap(); + fs::create_dir_all(rrelatime_suddir_path).unwrap(); + + let mount_options = vec!["rbind".to_string(), "rrelatime".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rrelatime_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + let result = test_inside_container(spec, &|_| Ok(())); + + fs::remove_dir_all(rrelatime_dir_path).unwrap(); + result +} + +fn check_recursive_rnorelatime() -> TestResult { + let rnorelatime_base_dir = PathBuf::from_str("/tmp").unwrap(); + let rnorelatime_dir_path = rnorelatime_base_dir.join("rnorelatime_dir"); + let mount_dest_path = PathBuf::from_str("/rnorelatime").unwrap(); + fs::create_dir(rnorelatime_dir_path.clone()).unwrap(); + + let mount_options = vec!["rbind".to_string(), "rnorelatime".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rnorelatime_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + let result = test_inside_container(spec, &|_| Ok(())); + + fs::remove_dir_all(rnorelatime_dir_path).unwrap(); + result +} + +fn check_recursive_rnoatime() -> TestResult { + let rnoatime_base_dir = PathBuf::from_str("/tmp").unwrap(); + let rnoatime_dir_path = rnoatime_base_dir.join("rnoatime_dir"); + let mount_dest_path = PathBuf::from_str("/rnoatime").unwrap(); + fs::create_dir(rnoatime_dir_path.clone()).unwrap(); + + let mount_options = vec!["rbind".to_string(), "rnoatime".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rnoatime_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + + let result = test_inside_container(spec, &|_| Ok(())); + + fs::remove_dir_all(rnoatime_dir_path).unwrap(); + result +} + +fn check_recursive_rstrictatime() -> TestResult { + let rstrictatime_base_dir = PathBuf::from_str("/tmp").unwrap(); + let rstrictatime_dir_path = rstrictatime_base_dir.join("rstrictatime_dir"); + let mount_dest_path = PathBuf::from_str("/rstrictatime").unwrap(); + fs::create_dir(rstrictatime_dir_path.clone()).unwrap(); + + let mount_options = vec!["rbind".to_string(), "rstrictatime".to_string()]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rstrictatime_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + let result = test_inside_container(spec, &|_| Ok(())); + + fs::remove_dir_all(rstrictatime_dir_path).unwrap(); + result +} + +fn check_recursive_rnosymfollow() -> TestResult { + let rnosymfollow_dir_path = PathBuf::from_str("/tmp/rnosymfollow").unwrap(); + let mount_dest_path = PathBuf::from_str("/mnt/rnosymfollow").unwrap(); + fs::create_dir_all(rnosymfollow_dir_path.clone()).unwrap(); + + let mount_options = vec![ + "rbind".to_string(), + "rnosymfollow".to_string(), + "rsuid".to_string(), + ]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rnosymfollow_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + let result = test_inside_container(spec, &|_| { + let original_file_path = format!("{}/{}", rnosymfollow_dir_path.to_str().unwrap(), "file"); + let file = File::create(&original_file_path)?; + let link_file_path = format!("{}/{}", rnosymfollow_dir_path.to_str().unwrap(), "link"); + println!("original file: {original_file_path:?},link file: {link_file_path:?}"); + let mut permission = file.metadata()?.permissions(); + permission.set_mode(permission.mode() | libc::S_ISUID | libc::S_ISGID); + file.set_permissions(permission) + .with_context(|| "failed to set permission")?; + + symlink(original_file_path, link_file_path)?; + println!("symlink success"); + Ok(()) + }); + + fs::remove_dir_all(rnosymfollow_dir_path).unwrap(); + result +} + +fn check_recursive_rsymfollow() -> TestResult { + let rsymfollow_dir_path = PathBuf::from_str("/tmp/rsymfollow").unwrap(); + let mount_dest_path = PathBuf::from_str("/mnt/rsymfollow").unwrap(); + fs::create_dir_all(rsymfollow_dir_path.clone()).unwrap(); + + let mount_options = vec![ + "rbind".to_string(), + "rsymfollow".to_string(), + "rsuid".to_string(), + ]; + let mut mount_spec = Mount::default(); + mount_spec + .set_destination(mount_dest_path) + .set_typ(None) + .set_source(Some(rsymfollow_dir_path.clone())) + .set_options(Some(mount_options)); + let spec = get_spec( + vec![mount_spec], + vec!["runtimetest".to_string(), "mounts_recursive".to_string()], + ); + let result = test_inside_container(spec, &|_| { + let original_file_path = format!("{}/{}", rsymfollow_dir_path.to_str().unwrap(), "file"); + let file = File::create(&original_file_path)?; + let link_file_path = format!("{}/{}", rsymfollow_dir_path.to_str().unwrap(), "link"); + let mut permission = file.metadata()?.permissions(); + permission.set_mode(permission.mode() | libc::S_ISUID | libc::S_ISGID); + file.set_permissions(permission) + .with_context(|| "failed to set permission")?; + + symlink(original_file_path, link_file_path)?; + println!("symlink success"); + Ok(()) + }); + + fs::remove_dir_all(rsymfollow_dir_path).unwrap(); + result +} + +/// this mount test how to work? +/// 1. Create mount_options based on the mount properties of the test +/// 2. Create OCI.Spec content, container one process is runtimetest,(runtimetest is cargo model, file path `tests/rust-integration-tests/runtimetest/`) +/// 3. inside container to check if the actual mount matches the spec, (spec https://man7.org/linux/man-pages/man2/mount_setattr.2.html), +/// eg. tests/rust-integration-tests/runtimetest/src/tests.rs pub fn get_mounts_recursive_test() -> TestGroup { let rro_test = Test::new("rro_test", Box::new(check_recursive_readonly)); let rnosuid_test = Test::new("rnosuid_test", Box::new(check_recursive_nosuid)); + let rsuid_test = Test::new("rsuid_test", Box::new(check_recursive_rsuid)); let rnoexec_test = Test::new("rnoexec_test", Box::new(check_recursive_noexec)); + let rnodiratime_test = Test::new("rnodiratime_test", Box::new(check_recursive_rnodiratime)); + let rdiratime_test = Test::new("rdiratime_test", Box::new(check_recursive_rdiratime)); + let rdev_test = Test::new("rdev_test", Box::new(check_recursive_rdev)); + let rnodev_test = Test::new("rnodev_test", Box::new(check_recursive_rnodev)); + let rrw_test = Test::new("rrw_test", Box::new(check_recursive_readwrite)); + let rexec_test = Test::new("rexec_test", Box::new(check_recursive_rexec)); + let rrelatime_test = Test::new("rrelatime_test", Box::new(check_recursive_rrelatime)); + let rnorelatime_test = Test::new("rnorelatime_test", Box::new(check_recursive_rnorelatime)); + let rnoatime_test = Test::new("rnoatime_test", Box::new(check_recursive_rnoatime)); + let rstrictatime_test = Test::new("rstrictatime_test", Box::new(check_recursive_rstrictatime)); + let rnosymfollow_test = Test::new("rnosymfollow_test", Box::new(check_recursive_rnosymfollow)); + let rsymfollow_test = Test::new("rsymfollow_test", Box::new(check_recursive_rsymfollow)); let mut tg = TestGroup::new("mounts_recursive"); tg.add(vec![ Box::new(rro_test), Box::new(rnosuid_test), + Box::new(rsuid_test), Box::new(rnoexec_test), + Box::new(rdiratime_test), + Box::new(rnodiratime_test), + Box::new(rdev_test), + Box::new(rnodev_test), + Box::new(rrw_test), + Box::new(rexec_test), + Box::new(rrelatime_test), + Box::new(rnorelatime_test), + Box::new(rnoatime_test), + Box::new(rstrictatime_test), + Box::new(rnosymfollow_test), + Box::new(rsymfollow_test), ]); tg diff --git a/tests/rust-integration-tests/integration_test/src/tests/pidfile/pidfile_test.rs b/tests/rust-integration-tests/integration_test/src/tests/pidfile/pidfile_test.rs index aff539468..fc6ea9ce2 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/pidfile/pidfile_test.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/pidfile/pidfile_test.rs @@ -1,6 +1,6 @@ use crate::utils::{ - create_temp_dir, delete_container, generate_uuid, get_runtime_path, get_state, kill_container, - prepare_bundle, State, TempDir, + delete_container, generate_uuid, get_runtime_path, get_state, kill_container, prepare_bundle, + State, }; use anyhow::anyhow; use std::{ @@ -11,7 +11,7 @@ use test_framework::{Test, TestGroup, TestResult}; use uuid::Uuid; #[inline] -fn cleanup(id: &Uuid, bundle: &TempDir) { +fn cleanup(id: &Uuid, bundle: &tempfile::TempDir) { let str_id = id.to_string(); kill_container(&str_id, bundle).unwrap().wait().unwrap(); delete_container(&str_id, bundle).unwrap().wait().unwrap(); @@ -22,11 +22,10 @@ fn cleanup(id: &Uuid, bundle: &TempDir) { fn test_pidfile() -> TestResult { // create id for the container and pidfile let container_id = generate_uuid(); - let pidfile_uuid = generate_uuid(); // create temp dir for bundle and for storing the pid - let bundle = prepare_bundle(&container_id).unwrap(); - let pidfile_dir = create_temp_dir(&pidfile_uuid).unwrap(); + let bundle = prepare_bundle().unwrap(); + let pidfile_dir = tempfile::tempdir().unwrap(); let pidfile_path = pidfile_dir.as_ref().join("pidfile"); let _ = File::create(&pidfile_path).unwrap(); diff --git a/tests/rust-integration-tests/integration_test/src/tests/seccomp_notify/seccomp_agent.rs b/tests/rust-integration-tests/integration_test/src/tests/seccomp_notify/seccomp_agent.rs index c1cefa415..6d5370f58 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/seccomp_notify/seccomp_agent.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/seccomp_notify/seccomp_agent.rs @@ -82,8 +82,9 @@ pub fn recv_seccomp_listener(seccomp_listener: &Path) -> SeccompAgentResult { if msg.bytes >= DEFAULT_BUFFER_SIZE { bail!("received more than the DEFAULT_BUFFER_SIZE"); } + let msg_bytes = msg.bytes; - buf.truncate(msg.bytes); + buf.truncate(msg_bytes); let container_process_state: libcontainer::container::ContainerProcessState = serde_json::from_slice(&buf[..]) diff --git a/tests/rust-integration-tests/integration_test/src/tests/tlb/tlb_test.rs b/tests/rust-integration-tests/integration_test/src/tests/tlb/tlb_test.rs index 000ed7134..c8335b0c9 100644 --- a/tests/rust-integration-tests/integration_test/src/tests/tlb/tlb_test.rs +++ b/tests/rust-integration-tests/integration_test/src/tests/tlb/tlb_test.rs @@ -95,7 +95,7 @@ fn get_tlb_sizes() -> Vec { fn validate_tlb(id: &str, size: &str, limit: i64) -> TestResult { let root = "/sys/fs/cgroup/hugetlb"; - let path = format!("{}/{}/hugetlb.{}.limit_in_bytes", root, id, size); + let path = format!("{root}/{id}/hugetlb.{size}.limit_in_bytes"); let val_str = std::fs::read_to_string(path).unwrap(); let val: i64 = val_str.trim().parse().unwrap(); if val == limit { diff --git a/tests/rust-integration-tests/integration_test/src/utils/mod.rs b/tests/rust-integration-tests/integration_test/src/utils/mod.rs index 68ae575af..77789fe55 100644 --- a/tests/rust-integration-tests/integration_test/src/utils/mod.rs +++ b/tests/rust-integration-tests/integration_test/src/utils/mod.rs @@ -1,11 +1,9 @@ pub mod support; -pub mod temp_dir; pub mod test_utils; pub use support::{ generate_uuid, get_project_path, get_runtime_path, get_runtimetest_path, prepare_bundle, set_config, set_runtime_path, }; -pub use temp_dir::{create_temp_dir, TempDir}; pub use test_utils::{ create_container, delete_container, get_state, kill_container, test_inside_container, test_outside_container, ContainerData, State, diff --git a/tests/rust-integration-tests/integration_test/src/utils/support.rs b/tests/rust-integration-tests/integration_test/src/utils/support.rs index 298761408..9f735d71b 100644 --- a/tests/rust-integration-tests/integration_test/src/utils/support.rs +++ b/tests/rust-integration-tests/integration_test/src/utils/support.rs @@ -1,4 +1,3 @@ -use super::{create_temp_dir, TempDir}; use anyhow::{Context, Result}; use flate2::read::GzDecoder; use oci_spec::runtime::{Process, Spec}; @@ -8,6 +7,7 @@ use std::env; use std::fs::File; use std::path::{Path, PathBuf}; use tar::Archive; +use tempfile::TempDir; use uuid::Uuid; static RUNTIME_PATH: OnceCell = OnceCell::new(); @@ -34,7 +34,7 @@ pub fn get_project_path() -> PathBuf { let current_dir_path_result = env::current_dir(); match current_dir_path_result { Ok(path_buf) => path_buf, - Err(e) => panic!("directory is not found, {}", e), + Err(e) => panic!("directory is not found, {e}"), } } @@ -52,18 +52,18 @@ pub fn generate_uuid() -> Uuid { match Uuid::parse_str(&rand_string) { Ok(uuid) => uuid, - Err(e) => panic!("can not parse uuid, {}", e), + Err(e) => panic!("can not parse uuid, {e}"), } } /// Creates a bundle directory in a temp directory -pub fn prepare_bundle(id: &Uuid) -> Result { - let temp_dir = create_temp_dir(id)?; +pub fn prepare_bundle() -> Result { + let temp_dir = tempfile::tempdir()?; let tar_file_name = "bundle.tar.gz"; let tar_source = std::env::current_dir()?.join(tar_file_name); let tar_target = temp_dir.as_ref().join(tar_file_name); std::fs::copy(&tar_source, &tar_target) - .with_context(|| format!("could not copy {:?} to {:?}", tar_source, tar_target))?; + .with_context(|| format!("could not copy {tar_source:?} to {tar_target:?}"))?; let tar_gz = File::open(&tar_source)?; let tar = GzDecoder::new(tar_gz); diff --git a/tests/rust-integration-tests/integration_test/src/utils/temp_dir.rs b/tests/rust-integration-tests/integration_test/src/utils/temp_dir.rs deleted file mode 100644 index 158f31f2a..000000000 --- a/tests/rust-integration-tests/integration_test/src/utils/temp_dir.rs +++ /dev/null @@ -1,60 +0,0 @@ -///! Thin wrapper struct for creating temp directories -///! Taken after cgroups/tempdir -use anyhow::{Context, Result}; -use std::{ - fs, - ops::Deref, - path::{Path, PathBuf}, -}; -use uuid::Uuid; - -pub struct TempDir { - path: Option, -} - -impl TempDir { - pub fn new>(path: P) -> Result { - let p = path.into(); - std::fs::create_dir_all(&p) - .with_context(|| format!("failed to create diectory {:?}", p))?; - Ok(Self { path: Some(p) }) - } - - pub fn path(&self) -> &Path { - self.path - .as_ref() - .expect("temp dir has already been removed") - } - - pub fn remove(&mut self) { - if let Some(p) = &self.path { - let _ = fs::remove_dir_all(p); - self.path = None; - } - } -} - -impl Drop for TempDir { - fn drop(&mut self) { - self.remove(); - } -} - -impl AsRef for TempDir { - fn as_ref(&self) -> &Path { - self.path() - } -} - -impl Deref for TempDir { - type Target = Path; - - fn deref(&self) -> &Self::Target { - self.path() - } -} - -pub fn create_temp_dir(id: &Uuid) -> Result { - let dir = TempDir::new(std::env::temp_dir().join(id.to_string()))?; - Ok(dir) -} diff --git a/tests/rust-integration-tests/integration_test/src/utils/test_utils.rs b/tests/rust-integration-tests/integration_test/src/utils/test_utils.rs index ca8fd89ec..565c468aa 100644 --- a/tests/rust-integration-tests/integration_test/src/utils/test_utils.rs +++ b/tests/rust-integration-tests/integration_test/src/utils/test_utils.rs @@ -1,5 +1,5 @@ -///! Contains utility functions for testing -///! Similar to https://github.com/opencontainers/runtime-tools/blob/master/validation/util/test.go +//! Contains utility functions for testing +//! Similar to https://github.com/opencontainers/runtime-tools/blob/master/validation/util/test.go use super::{generate_uuid, prepare_bundle, set_config}; use super::{get_runtime_path, get_runtimetest_path}; use anyhow::{anyhow, bail, Context, Result}; @@ -48,9 +48,6 @@ pub fn create_container>(id: &str, dir: P) -> Result { // in test_inside_container function .stdout(Stdio::piped()) .stderr(Stdio::piped()) - // set log level to error only, otherwise - // we get warnings in stderr - .env("YOUKI_LOG_LEVEL", "error") .arg("--root") .arg(dir.as_ref().join("runtime")) .arg("create") @@ -121,7 +118,7 @@ pub fn test_outside_container( ) -> TestResult { let id = generate_uuid(); let id_str = id.to_string(); - let bundle = prepare_bundle(&id).unwrap(); + let bundle = prepare_bundle().unwrap(); set_config(&bundle, &spec).unwrap(); let create_result = create_container(&id_str, &bundle).unwrap().wait(); let (out, err) = get_state(&id_str, &bundle).unwrap(); @@ -148,7 +145,7 @@ pub fn test_inside_container( ) -> TestResult { let id = generate_uuid(); let id_str = id.to_string(); - let bundle = prepare_bundle(&id).unwrap(); + let bundle = prepare_bundle().unwrap(); // This will do the required setup for the test test_result!(setup_for_test( @@ -199,6 +196,13 @@ pub fn test_inside_container( .context("getting output after starting the container failed") .unwrap(); + let stdout = String::from_utf8_lossy(&create_output.stdout); + if !stdout.is_empty() { + println!( + "{:?}", + anyhow!("container stdout was not empty, found : {}", stdout) + ) + } let stderr = String::from_utf8_lossy(&create_output.stderr); if !stderr.is_empty() { return TestResult::Failed(anyhow!( diff --git a/tests/rust-integration-tests/runtimetest/Cargo.toml b/tests/rust-integration-tests/runtimetest/Cargo.toml index f4a5153ab..99f0e7967 100644 --- a/tests/rust-integration-tests/runtimetest/Cargo.toml +++ b/tests/rust-integration-tests/runtimetest/Cargo.toml @@ -4,8 +4,8 @@ version = "0.0.1" edition = "2021" [dependencies] -oci-spec = { version = "0.6.0", features = ["runtime"] } -nix = "0.25.0" +oci-spec = { version = "0.6.1", features = ["runtime"] } +nix = "0.26.2" anyhow = "1.0" libc = "0.2.139" diff --git a/tests/rust-integration-tests/runtimetest/README.md b/tests/rust-integration-tests/runtimetest/README.md index 45b66d549..33651f1ef 100644 --- a/tests/rust-integration-tests/runtimetest/README.md +++ b/tests/rust-integration-tests/runtimetest/README.md @@ -17,7 +17,7 @@ There is currently no convention of explicit indication of tests passing, the pa This package must be compiled as a statically linked binary, as otherwise the rust compile will make it dynamically link to /lib64/ld-linux-x86-64.so , which is not available inside the container, and thus making the binary not usable inside the container process. -**Note** that the dynamically linked binary does not give a `segmentation fault` or similar error when tried to run inside the container, but instead gives `no such file or directory found` or `executable not found` error, even though the executable exists in the container. This made this tricky to debug correctly when originally developing, so if you decide on chaing the compilation or configuration of this , please make absolutely sure that the changes work and do not accidentally break something. +**Note** that the dynamically linked binary does not give a `segmentation fault` or similar error when tried to run inside the container, but instead gives `no such file or directory found` or `executable not found` error, even though the executable exists in the container. This made this tricky to debug correctly when originally developing, so if you decide on chaining the compilation or configuration of this , please make absolutely sure that the changes work and do not accidentally break something. you can use @@ -32,8 +32,8 @@ Reading the Readme of integration tests can be helpful to understand how the int see -https://stackoverflow.com/questions/31770604/how-to-generate-statically-linked-executables -https://superuser.com/questions/248512/why-do-i-get-command-not-found-when-the-binary-file-exists -https://doc.rust-lang.org/cargo/reference/config.html + + + for more info diff --git a/tests/rust-integration-tests/runtimetest/src/main.rs b/tests/rust-integration-tests/runtimetest/src/main.rs index 9f57d8c53..132c2ebf3 100644 --- a/tests/rust-integration-tests/runtimetest/src/main.rs +++ b/tests/rust-integration-tests/runtimetest/src/main.rs @@ -12,7 +12,7 @@ fn get_spec() -> Spec { match Spec::load(path) { Ok(spec) => spec, Err(e) => { - eprintln!("Error in loading spec, {:?}", e); + eprintln!("Error in loading spec, {e:?}"); std::process::exit(66); } } @@ -31,9 +31,6 @@ fn main() { "set_host_name" => tests::validate_hostname(&spec), "mounts_recursive" => tests::validate_mounts_recursive(&spec), "domainname_test" => tests::validate_domainname(&spec), - _ => eprintln!( - "error due to unexpected execute test name: {}", - execute_test - ), + _ => eprintln!("error due to unexpected execute test name: {execute_test}"), } } diff --git a/tests/rust-integration-tests/runtimetest/src/tests.rs b/tests/rust-integration-tests/runtimetest/src/tests.rs index 3b90d96f1..8cd05f885 100644 --- a/tests/rust-integration-tests/runtimetest/src/tests.rs +++ b/tests/rust-integration-tests/runtimetest/src/tests.rs @@ -32,8 +32,7 @@ pub fn validate_readonly_paths(spec: &Spec) { /* This is expected */ } else { eprintln!( - "in readonly paths, error in testing read access for path {} : {:?}", - path, e + "in readonly paths, error in testing read access for path {path} : {e:?}" ); return; } @@ -50,16 +49,12 @@ pub fn validate_readonly_paths(spec: &Spec) { /* This is expected */ } else { eprintln!( - "in readonly paths, error in testing write access for path {} : {:?}", - path, e + "in readonly paths, error in testing write access for path {path} : {e:?}" ); return; } } else { - eprintln!( - "in readonly paths, path {} expected to not be writable, found writable", - path - ); + eprintln!("in readonly paths, path {path} expected to not be writable, found writable"); return; } } @@ -75,8 +70,7 @@ pub fn validate_hostname(spec: &Spec) { let actual_hostname = actual_hostname.to_str().unwrap(); if actual_hostname != expected_hostname { eprintln!( - "Unexpected hostname, expected: {:?} found: {:?}", - expected_hostname, actual_hostname + "Unexpected hostname, expected: {expected_hostname:?} found: {actual_hostname:?}" ); } } @@ -147,7 +141,25 @@ pub fn validate_mounts_recursive(spec: &Spec) { Ok(()) }) { - eprintln!("error in testing rro recursive mounting : {}", e); + eprintln!("error in testing rro recursive mounting : {e}"); + } + } + "rrw" => { + if let Err(e) = + do_test_mounts_recursive(mount.destination(), &|test_file_path| { + if utils::test_write_access(test_file_path.to_str().unwrap()) + .is_err() + { + // Return Err if not writeable + bail!( + "path {:?} expected to be writable, found read-only", + test_file_path + ); + } + Ok(()) + }) + { + eprintln!("error in testing rro recursive mounting : {e}"); } } "rnoexec" => { @@ -162,7 +174,112 @@ pub fn validate_mounts_recursive(spec: &Spec) { Ok(()) }, ) { - eprintln!("error in testing rnoexec recursive mounting: {}", e); + eprintln!("error in testing rnoexec recursive mounting: {e}"); + } + } + "rexec" => { + if let Err(e) = do_test_mounts_recursive( + mount.destination(), + &|test_file_path| { + if let Err(ee) = utils::test_file_executable( + test_file_path.to_str().unwrap(), + ) { + bail!("path {:?} expected to be executable, found not executable, error: {ee}", test_file_path); + } + Ok(()) + }, + ) { + eprintln!("error in testing rexec recursive mounting: {e}"); + } + } + "rdiratime" => { + println!("test_dir_update_access_time: {mount:?}"); + let rest = utils::test_dir_update_access_time( + mount.destination().to_str().unwrap(), + ); + if let Err(e) = rest { + eprintln!("error in testing rdiratime recursive mounting: {e}"); + } + } + "rnodiratime" => { + println!("test_dir_not_update_access_time: {mount:?}"); + let rest = utils::test_dir_not_update_access_time( + mount.destination().to_str().unwrap(), + ); + if let Err(e) = rest { + eprintln!("error in testing rnodiratime recursive mounting: {e}"); + } + } + "rdev" => { + println!("test_device_access: {mount:?}"); + let rest = + utils::test_device_access(mount.destination().to_str().unwrap()); + if let Err(e) = rest { + eprintln!("error in testing rdev recursive mounting: {e}"); + } + } + "rnodev" => { + println!("test_device_unaccess: {mount:?}"); + let rest = + utils::test_device_unaccess(mount.destination().to_str().unwrap()); + if rest.is_ok() { + // because /rnodev/null device not access,so rest is err + eprintln!("error in testing rnodev recursive mounting"); + } + } + "rrelatime" => { + println!("rrelatime: {mount:?}"); + if let Err(e) = utils::test_mount_releatime_option( + mount.destination().to_str().unwrap(), + ) { + eprintln!("path expected to be rrelatime, found not rrelatime, error: {e}"); + } + } + "rnorelatime" => { + println!("rnorelatime: {mount:?}"); + if let Err(e) = utils::test_mount_noreleatime_option( + mount.destination().to_str().unwrap(), + ) { + eprintln!("path expected to be rnorelatime, found not rnorelatime, error: {e}"); + } + } + "rnoatime" => { + println!("rnoatime: {mount:?}"); + if let Err(e) = utils::test_mount_rnoatime_option( + mount.destination().to_str().unwrap(), + ) { + eprintln!( + "path expected to be rnoatime, found not rnoatime, error: {e}" + ); + } + } + "rstrictatime" => { + println!("rstrictatime: {mount:?}"); + if let Err(e) = utils::test_mount_rstrictatime_option( + mount.destination().to_str().unwrap(), + ) { + eprintln!("path expected to be rstrictatime, found not rstrictatime, error: {e}"); + } + } + "rnosymfollow" => { + if let Err(e) = utils::test_mount_rnosymfollow_option( + mount.destination().to_str().unwrap(), + ) { + eprintln!("path expected to be rnosymfollow, found not rnosymfollow, error: {e}"); + } + } + "rsymfollow" => { + if let Err(e) = utils::test_mount_rsymfollow_option( + mount.destination().to_str().unwrap(), + ) { + eprintln!("path expected to be rsymfollow, found not rsymfollow, error: {e}"); + } + } + "rsuid" => { + if let Err(e) = utils::test_mount_rsuid_option( + mount.destination().to_str().unwrap(), + ) { + eprintln!("path expected to be rsuid, found not rsuid, error: {e}"); } } _ => {} diff --git a/tests/rust-integration-tests/runtimetest/src/utils.rs b/tests/rust-integration-tests/runtimetest/src/utils.rs index b3713ab8a..ab991a079 100644 --- a/tests/rust-integration-tests/runtimetest/src/utils.rs +++ b/tests/rust-integration-tests/runtimetest/src/utils.rs @@ -1,8 +1,11 @@ -use std::path::PathBuf; -use std::process::Command; - use nix::sys::stat::stat; use nix::sys::stat::SFlag; +use std::fs; +use std::fs::metadata; +use std::fs::symlink_metadata; +use std::os::unix::prelude::MetadataExt; +use std::path::PathBuf; +use std::process::Command; fn test_file_read_access(path: &str) -> Result<(), std::io::Error> { let _ = std::fs::OpenOptions::new() @@ -40,10 +43,7 @@ pub fn test_read_access(path: &str) -> Result<(), std::io::Error> { Err(std::io::Error::new( std::io::ErrorKind::Other, - format!( - "cannot test read access for {:?}, has mode {:x}", - path, mode - ), + format!("cannot test read access for {path:?}, has mode {mode:x}"), )) } @@ -72,10 +72,7 @@ pub fn test_write_access(path: &str) -> Result<(), std::io::Error> { Err(std::io::Error::new( std::io::ErrorKind::Other, - format!( - "cannot test write access for {:?}, has mode {:x}", - path, mode - ), + format!("cannot test write access for {path:?}, has mode {mode:x}"), )) } @@ -84,10 +81,391 @@ pub fn test_file_executable(path: &str) -> Result<(), std::io::Error> { let mode = fstat.st_mode; if is_file_like(mode) { Command::new(path).output()?; + return Ok(()); + } + + Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("{path:?} is directory, so cannot execute"), + )) +} + +pub fn test_dir_update_access_time(path: &str) -> Result<(), std::io::Error> { + println!("test_dir_update_access_time path: {path:?}"); + let metadata = fs::metadata(PathBuf::from(path))?; + let rest = metadata.accessed(); + let first_access_time = rest.unwrap(); + println!("{path:?} dir first access time is {first_access_time:?}"); + // execute ls command to update access time + Command::new("ls") + .arg(path) + .output() + .expect("execute ls command error"); + // second get access time + let metadata = fs::metadata(PathBuf::from(path))?; + let rest = metadata.accessed(); + let second_access_time = rest.unwrap(); + println!("{path:?} dir second access time is {second_access_time:?}"); + if first_access_time == second_access_time { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("cannot update access time for path {path:?}"), + )); + } + Ok(()) +} + +pub fn test_dir_not_update_access_time(path: &str) -> Result<(), std::io::Error> { + println!("test_dir_not_update_access_time path: {path:?}"); + let metadata = fs::metadata(PathBuf::from(path))?; + let rest = metadata.accessed(); + let first_access_time = rest.unwrap(); + println!("{path:?} dir first access time is {first_access_time:?}"); + // execute ls command to update access time + Command::new("ls") + .arg(path) + .output() + .expect("execute ls command error"); + // second get access time + let metadata = fs::metadata(PathBuf::from(path))?; + let rest = metadata.accessed(); + let second_access_time = rest.unwrap(); + println!("{path:?} dir second access time is {second_access_time:?}"); + if first_access_time != second_access_time { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("cannot update access time for path {path:?}"), + )); + } + Ok(()) +} + +pub fn test_device_access(path: &str) -> Result<(), std::io::Error> { + println!("test_device_access path: {path:?}"); + let _ = std::fs::OpenOptions::new() + .create(true) + .write(true) + .open(PathBuf::from(path).join("null"))?; + Ok(()) +} + +pub fn test_device_unaccess(path: &str) -> Result<(), std::io::Error> { + println!("test_device_unaccess path: {path:?}"); + let _ = std::fs::OpenOptions::new() + .create(true) + .write(true) + .open(PathBuf::from(path).join("null"))?; + Ok(()) +} + +// https://man7.org/linux/man-pages/man2/mount_setattr.2.html +// When a file is accessed via this mount, update the +// file's last access time (atime) only if the current +// value of atime is less than or equal to the file's +// last modification time (mtime) or last status +// change time (ctime). +// case: +// 1. create test.txt file, get one atime +// 2. cat a.txt, get two atime; check atime whether update, conditions are met atime less than or equal mtime or ctime +// 3. cat a.txt, get three atime, check now two atime whether equal three atime +pub fn test_mount_releatime_option(path: &str) -> Result<(), std::io::Error> { + let test_file_path = PathBuf::from(path).join("test.txt"); + Command::new("touch") + .arg(test_file_path.to_str().unwrap()) + .output()?; + let one_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file one metadata atime is {:?},mtime is {:?},current time is{:?}", + test_file_path, + one_metadata.atime(), + one_metadata.mtime(), + std::time::SystemTime::now() + ); + std::thread::sleep(std::time::Duration::from_millis(1000)); + + // execute cat command to update access time + Command::new("cat") + .arg(test_file_path.to_str().unwrap()) + .output() + .expect("execute cat command error"); + let two_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file two metadata atime is {:?},mtime is {:?},current time is{:?}", + test_file_path, + two_metadata.atime(), + two_metadata.mtime(), + std::time::SystemTime::now() + ); + + if one_metadata.atime() == two_metadata.atime() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "not update access time for file {:?}", + test_file_path.to_str() + ), + )); + } + + // execute cat command to update access time + std::thread::sleep(std::time::Duration::from_millis(1000)); + Command::new("cat") + .arg(test_file_path.to_str().unwrap()) + .output() + .expect("execute cat command error"); + let three_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file three metadata atime is {:?}", + test_file_path, + three_metadata.atime() + ); + if two_metadata.atime() != three_metadata.atime() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("update access time for file {:?}", test_file_path.to_str()), + )); + } + + Ok(()) +} + +// case: because filesystem having relatime option +// 1. create test.txt file, get one atime +// 2. cat a.txt, get two atime; check atime whether update +// 3. cat a.txt, get three atime, check now two atime whether equal three atime +pub fn test_mount_noreleatime_option(path: &str) -> Result<(), std::io::Error> { + let test_file_path = PathBuf::from(path).join("noreleatime.txt"); + Command::new("touch") + .arg(test_file_path.to_str().unwrap()) + .output()?; + let one_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file one atime is {:?},mtime is {:?}, current time is {:?}", + test_file_path, + one_metadata.atime(), + one_metadata.mtime(), + std::time::SystemTime::now() + ); + + std::thread::sleep(std::time::Duration::from_millis(1000)); + // execute cat command to update access time + Command::new("cat") + .arg(test_file_path.to_str().unwrap()) + .output() + .expect("execute cat command error"); + let two_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file two atime is {:?},mtime is {:?},current time is {:?}", + test_file_path, + two_metadata.atime(), + two_metadata.mtime(), + std::time::SystemTime::now() + ); + + if one_metadata.atime() == two_metadata.atime() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "not update access time for file {:?}", + test_file_path.to_str() + ), + )); + } + + // execute cat command to update access time + std::thread::sleep(std::time::Duration::from_millis(1000)); + Command::new("cat") + .arg(test_file_path.to_str().unwrap()) + .output() + .expect("execute cat command error"); + let three_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file three atime is {:?},mtime is {:?},current time is {:?}", + test_file_path, + three_metadata.atime(), + three_metadata.mtime(), + std::time::SystemTime::now() + ); + + if two_metadata.atime() != three_metadata.atime() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("update access time for file {:?}", test_file_path.to_str()), + )); } + Ok(()) +} + +// Do not update access times for (all types of) files on this mount. +// case: +// 1. touch rnoatime.txt file, get atime +// 2. cat rnoatime.txt, check atime whether update, if update return error, else return Ok +pub fn test_mount_rnoatime_option(path: &str) -> Result<(), std::io::Error> { + let test_file_path = PathBuf::from(path).join("rnoatime.txt"); + Command::new("touch") + .arg(test_file_path.to_str().unwrap()) + .output()?; + let one_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file one atime is {:?},mtime is {:?}, current time is {:?}", + test_file_path, + one_metadata.atime(), + one_metadata.mtime(), + std::time::SystemTime::now() + ); + std::thread::sleep(std::time::Duration::from_millis(1000)); + + // execute cat command to update access time + Command::new("cat") + .arg(test_file_path.to_str().unwrap()) + .output() + .expect("execute cat command error"); + let two_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file two atime is {:?},mtime is {:?},current time is {:?}", + test_file_path, + two_metadata.atime(), + two_metadata.mtime(), + std::time::SystemTime::now() + ); + if one_metadata.atime() != two_metadata.atime() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "update access time for file {:?}, expected not update", + test_file_path.to_str() + ), + )); + } + Ok(()) +} +// Always update the last access time (atime) when files are accessed on this mount. +pub fn test_mount_rstrictatime_option(path: &str) -> Result<(), std::io::Error> { + let test_file_path = PathBuf::from(path).join("rstrictatime.txt"); + Command::new("touch") + .arg(test_file_path.to_str().unwrap()) + .output()?; + let one_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file one atime is {:?},mtime is {:?}, current time is {:?}", + test_file_path, + one_metadata.atime(), + one_metadata.mtime(), + std::time::SystemTime::now() + ); + + std::thread::sleep(std::time::Duration::from_millis(1000)); + // execute cat command to update access time + Command::new("cat") + .arg(test_file_path.to_str().unwrap()) + .output() + .expect("execute cat command error"); + let two_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file two atime is {:?},mtime is {:?},current time is {:?}", + test_file_path, + two_metadata.atime(), + two_metadata.mtime(), + std::time::SystemTime::now() + ); + + if one_metadata.atime() == two_metadata.atime() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "not update access time for file {:?}", + test_file_path.to_str() + ), + )); + } + + // execute cat command to update access time + std::thread::sleep(std::time::Duration::from_millis(1000)); + Command::new("cat") + .arg(test_file_path.to_str().unwrap()) + .output() + .expect("execute cat command error"); + let three_metadata = fs::metadata(test_file_path.clone())?; + println!( + "{:?} file three atime is {:?},mtime is {:?},current time is {:?}", + test_file_path, + two_metadata.atime(), + two_metadata.mtime(), + std::time::SystemTime::now() + ); + + if two_metadata.atime() == three_metadata.atime() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("update access time for file {:?}", test_file_path.to_str()), + )); + } + Ok(()) +} + +pub fn test_mount_rnosymfollow_option(path: &str) -> Result<(), std::io::Error> { + let path = format!("{}/{}", path, "link"); + let metadata = match symlink_metadata(path.clone()) { + Ok(metadata) => metadata, + Err(e) => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("get file symlink_metadata err {path:?}, {e}"), + )); + } + }; + // check symbolic is followed + if metadata.file_type().is_symlink() && metadata.mode() & 0o777 == 0o777 { + Ok(()) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("get file symlink_metadata err {path:?}"), + )) + } +} + +pub fn test_mount_rsymfollow_option(path: &str) -> Result<(), std::io::Error> { + let path = format!("{}/{}", path, "link"); + let metadata = match symlink_metadata(path.clone()) { + Ok(metadata) => metadata, + Err(e) => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("get file symlink_metadata err {path:?}, {e}"), + )); + } + }; + // check symbolic is followed + if metadata.file_type().is_symlink() && metadata.mode() & 0o777 == 0o777 { + Ok(()) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("get file symlink_metadata err {path:?}"), + )) + } +} + +pub fn test_mount_rsuid_option(path: &str) -> Result<(), std::io::Error> { + let path = PathBuf::from(path).join("file"); + + let metadata = match metadata(path.clone()) { + Ok(metadata) => metadata, + Err(e) => { + return Err(std::io::Error::new(std::io::ErrorKind::Other, e)); + } + }; + // check suid and sgid + let suid = metadata.mode() & 0o4000 == 0o4000; + let sgid = metadata.mode() & 0o2000 == 0o2000; + println!("suid: {suid:?},sgid: {sgid:?}"); + if suid && sgid { + return Ok(()); + } Err(std::io::Error::new( std::io::ErrorKind::Other, - format!("{:?} is directory, so cannot execute", path), + format!("rsuid error {path:?}"), )) } diff --git a/tests/rust-integration-tests/test_framework/Cargo.toml b/tests/rust-integration-tests/test_framework/Cargo.toml index 185cd07e0..18d847ce3 100644 --- a/tests/rust-integration-tests/test_framework/Cargo.toml +++ b/tests/rust-integration-tests/test_framework/Cargo.toml @@ -6,5 +6,5 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -anyhow = "1.0.68" +anyhow = "1.0.72" crossbeam = "0.8.2" diff --git a/tests/rust-integration-tests/test_framework/src/conditional_test.rs b/tests/rust-integration-tests/test_framework/src/conditional_test.rs index 0732bdbc3..67c5a3755 100644 --- a/tests/rust-integration-tests/test_framework/src/conditional_test.rs +++ b/tests/rust-integration-tests/test_framework/src/conditional_test.rs @@ -1,4 +1,4 @@ -///! Contains definition for a tests which should be conditionally run +//! Contains definition for a tests which should be conditionally run use crate::testable::{TestResult, Testable}; // type aliases for test function signature diff --git a/tests/rust-integration-tests/test_framework/src/test.rs b/tests/rust-integration-tests/test_framework/src/test.rs index 1aeaa65df..35dc93af8 100644 --- a/tests/rust-integration-tests/test_framework/src/test.rs +++ b/tests/rust-integration-tests/test_framework/src/test.rs @@ -1,4 +1,4 @@ -///! Contains definition for a simple and commonly usable test structure +//! Contains definition for a simple and commonly usable test structure use crate::testable::{TestResult, Testable}; // type alias for the test function diff --git a/tests/rust-integration-tests/test_framework/src/test_group.rs b/tests/rust-integration-tests/test_framework/src/test_group.rs index 4b3f0eeed..e61cd9753 100644 --- a/tests/rust-integration-tests/test_framework/src/test_group.rs +++ b/tests/rust-integration-tests/test_framework/src/test_group.rs @@ -1,4 +1,4 @@ -///! Contains structure for a test group +//! Contains structure for a test group use crate::testable::{TestResult, Testable, TestableGroup}; use crossbeam::thread; use std::collections::BTreeMap; diff --git a/tests/rust-integration-tests/test_framework/src/test_manager.rs b/tests/rust-integration-tests/test_framework/src/test_manager.rs index ac9d847bd..f2c87ec94 100644 --- a/tests/rust-integration-tests/test_framework/src/test_manager.rs +++ b/tests/rust-integration-tests/test_framework/src/test_manager.rs @@ -1,4 +1,4 @@ -///! This exposes the main control wrapper to control the tests +//! This exposes the main control wrapper to control the tests use crate::testable::{TestResult, TestableGroup}; use anyhow::Result; use crossbeam::thread; @@ -39,7 +39,7 @@ impl TestManager { /// Prints the given test results, usually used to print /// results of a test group fn print_test_result(&self, name: &str, res: &[(&'static str, TestResult)]) { - println!("# Start group {}", name); + println!("# Start group {name}"); let len = res.len(); for (idx, (name, res)) in res.iter().enumerate() { print!("{} / {} : {} : ", idx + 1, len, name); @@ -51,11 +51,11 @@ impl TestManager { println!("skipped"); } TestResult::Failed(e) => { - println!("not ok\n\t{}", e); + println!("not ok\n\t{e}"); } } } - println!("# End group {}\n", name); + println!("# End group {name}\n"); } /// Run all tests from all tests group pub fn run_all(&self) { @@ -72,7 +72,7 @@ impl TestManager { .unwrap(); for cleaner in &self.cleanup { if let Err(e) = cleaner() { - print!("Failed to cleanup: {}", e); + print!("Failed to cleanup: {e}"); } } } @@ -89,7 +89,7 @@ impl TestManager { }; collector.push((test_group_name, r)); } else { - eprintln!("Error : Test Group {} not found, skipping", test_group_name); + eprintln!("Error : Test Group {test_group_name} not found, skipping"); } } for (name, handle) in collector { @@ -100,7 +100,7 @@ impl TestManager { for cleaner in &self.cleanup { if let Err(e) = cleaner() { - print!("Failed to cleanup: {}", e); + print!("Failed to cleanup: {e}"); } } } diff --git a/tests/rust-integration-tests/test_framework/src/testable.rs b/tests/rust-integration-tests/test_framework/src/testable.rs index fd3f17465..2c90986b8 100644 --- a/tests/rust-integration-tests/test_framework/src/testable.rs +++ b/tests/rust-integration-tests/test_framework/src/testable.rs @@ -1,6 +1,6 @@ +//! Contains Basic setup for testing, testable trait and its result type use std::fmt::Debug; -///! Contains Basic setup for testing, testable trait and its result type use anyhow::{bail, Error, Result}; #[derive(Debug)] diff --git a/tools/wasm-sample/src/main.rs b/tools/wasm-sample/src/main.rs index 7b5953b72..fa0f47a7e 100644 --- a/tools/wasm-sample/src/main.rs +++ b/tools/wasm-sample/src/main.rs @@ -1,11 +1,11 @@ fn main() { println!("Printing args"); for arg in std::env::args().skip(1) { - println!("{}", arg); + println!("{arg}"); } println!("Printing envs"); for envs in std::env::vars() { - println!("{:?}", envs); + println!("{envs:?}"); } }