From f998ece2479c66fbd13b669b2f7e601b20d0f390 Mon Sep 17 00:00:00 2001 From: Tyler Titsworth Date: Thu, 4 Jan 2024 08:27:27 -0800 Subject: [PATCH 1/3] Split Preset Automation by Recipe (#178) * split docker compose for all presets * return license info * update setup-test * update actions * return to test runner * undo artifact name change --- .../workflows/container-pipeline-tester.yaml | 4 +- preset/{ => classical-ml}/.actions.json | 0 preset/classical-ml/docker-compose.yaml | 57 +++++++++++++ preset/data-analytics/.actions.json | 4 + preset/data-analytics/docker-compose.yaml | 56 +++++++++++++ preset/deep-learning/.actions.json | 4 + .../{ => deep-learning}/docker-compose.yaml | 46 +---------- preset/inference-optimization/.actions.json | 4 + .../docker-compose.yaml | 82 +++++++++++++++++++ 9 files changed, 211 insertions(+), 46 deletions(-) rename preset/{ => classical-ml}/.actions.json (100%) create mode 100644 preset/classical-ml/docker-compose.yaml create mode 100644 preset/data-analytics/.actions.json create mode 100644 preset/data-analytics/docker-compose.yaml create mode 100644 preset/deep-learning/.actions.json rename preset/{ => deep-learning}/docker-compose.yaml (60%) create mode 100644 preset/inference-optimization/.actions.json create mode 100644 preset/inference-optimization/docker-compose.yaml diff --git a/.github/workflows/container-pipeline-tester.yaml b/.github/workflows/container-pipeline-tester.yaml index fd6be1be..f1cf8e04 100644 --- a/.github/workflows/container-pipeline-tester.yaml +++ b/.github/workflows/container-pipeline-tester.yaml @@ -89,7 +89,7 @@ jobs: - uses: actions/checkout@v4 - name: Get Recipes id: recipes - run: echo "RECIPES=$(find ${{ inputs.group_dir }} -type f -name 'tests.yaml' -exec dirname {} \; | awk -F/ '{print $NF}' | jq -R -s -c 'split("\n")[:-1]')" >> $GITHUB_OUTPUT + run: echo "RECIPES=$(find ${{ inputs.group_dir }} -type f -name 'tests.yaml' -exec dirname {} \; | jq -R -s -c 'split("\n")[:-1]')" >> $GITHUB_OUTPUT test-containers: needs: [ setup-test ] if: ${{ needs.setup-test.outputs.recipes != '[]' }} @@ -111,5 +111,5 @@ jobs: with: mlops_repo: ${{ vars.MLOPS_REPO }} registry: ${{ vars.REGISTRY }} - test_dir: ${{ inputs.group_dir }}/${{ matrix.recipe }} + test_dir: ${{ matrix.recipe }} token: ${{ github.token }} diff --git a/preset/.actions.json b/preset/classical-ml/.actions.json similarity index 100% rename from preset/.actions.json rename to preset/classical-ml/.actions.json diff --git a/preset/classical-ml/docker-compose.yaml b/preset/classical-ml/docker-compose.yaml new file mode 100644 index 00000000..c3b99886 --- /dev/null +++ b/preset/classical-ml/docker-compose.yaml @@ -0,0 +1,57 @@ +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# Copyright (c) 2023 Intel Corporation +# +# -*- coding: utf-8 -*- +# + +version: '3' +services: + classical-ml: + build: + args: + BASE_IMAGE: ${BASE_IMAGE:-ubuntu} + BASE_TAG: ${BASE_TAG:-22.04} + DAAL4PY_VERSION: ${DAAL4PY_VERSION:-2024.0.1} + DEVICE: ${DEVICE:-flex} + DPCPP_VER: ${DPCPP_VER:-2024.0.0-49819} + LEVEL_ZERO_DEV_VER: 1.14.0-744~22.04 + LEVEL_ZERO_GPU_VER: 1.3.27191.42-775~22.04 + LEVEL_ZERO_VER: 1.14.0-744~22.04 + ICD_VER: 23.35.27191.42-775~22.04 + IDP_VERSION: ${IDP_VERSION:-2024.0.0} + INTEL_CHANNEL: ${INTEL_CHANNEL:-intel} + MINICONDA_VERSION: ${MINICONDA_VERSION:-latest-Linux-x86_64} + MKL_VER: ${MKL_VER:-2024.0.0-49656} + MODIN_VERSION: ${MODIN_VERSION:-0.24.1} + PYTHON_VERSION: ${PYTHON_VERSION:-3.10} + SCIKIT_VERSION: ${SCIKIT_VERSION:-2024.0.1} + XGBOOST_VERSION: ${XGBOOST_VERSION:-1.7.3} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: '' + context: . + target: classical-ml-jupyter + command: | + bash -c "conda run -n classical-ml python -c 'import sklearn; import xgboost; print(\"SciKit:\", sklearn.__version__, \" XGBoost:\",xgboost.__version__)' && \ + conda run -n classical-ml python -c 'import modin.pandas as pd, modin.config as cfg; cfg.Engine.put(\"Ray\"); df = pd.DataFrame([1]);print(df+1)'" + image: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-classical-ml-${IDP_VERSION:-2024.0.0}-py${PYTHON_VERSION:-3.10} + devices: + - /dev/dri:/dev/dri + environment: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + network_mode: host + shm_size: 12GB + volumes: + - /dev/dri/by-path:/dev/dri/by-path diff --git a/preset/data-analytics/.actions.json b/preset/data-analytics/.actions.json new file mode 100644 index 00000000..d64e1497 --- /dev/null +++ b/preset/data-analytics/.actions.json @@ -0,0 +1,4 @@ +{ + "PYTHON_VERSION": ["3.9", "3.10"], + "experimental": [true] +} diff --git a/preset/data-analytics/docker-compose.yaml b/preset/data-analytics/docker-compose.yaml new file mode 100644 index 00000000..cfdf1774 --- /dev/null +++ b/preset/data-analytics/docker-compose.yaml @@ -0,0 +1,56 @@ +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# Copyright (c) 2023 Intel Corporation +# +# -*- coding: utf-8 -*- +# + +version: '3' +services: + classical-ml: + build: + args: + BASE_IMAGE: ${BASE_IMAGE:-ubuntu} + BASE_TAG: ${BASE_TAG:-22.04} + DAAL4PY_VERSION: ${DAAL4PY_VERSION:-2024.0.1} + DEVICE: ${DEVICE:-flex} + DPCPP_VER: ${DPCPP_VER:-2024.0.0-49819} + LEVEL_ZERO_DEV_VER: 1.14.0-744~22.04 + LEVEL_ZERO_GPU_VER: 1.3.27191.42-775~22.04 + LEVEL_ZERO_VER: 1.14.0-744~22.04 + ICD_VER: 23.35.27191.42-775~22.04 + IDP_VERSION: ${IDP_VERSION:-2024.0.0} + INTEL_CHANNEL: ${INTEL_CHANNEL:-intel} + MINICONDA_VERSION: ${MINICONDA_VERSION:-latest-Linux-x86_64} + MKL_VER: ${MKL_VER:-2024.0.0-49656} + MODIN_VERSION: ${MODIN_VERSION:-0.24.1} + PYTHON_VERSION: ${PYTHON_VERSION:-3.10} + SCIKIT_VERSION: ${SCIKIT_VERSION:-2024.0.1} + XGBOOST_VERSION: ${XGBOOST_VERSION:-1.7.3} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: '' + context: . + target: data-analytics-jupyter + command: > + bash -c "conda run -n data-analytics python -c 'import modin.pandas as pd, modin.config as cfg; cfg.Engine.put(\"Ray\"); df = pd.DataFrame([1]);print(df+1)'" + image: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-data-analytics-${IDP_VERSION:-2024.0.0}-py${PYTHON_VERSION:-3.10} + devices: + - /dev/dri:/dev/dri + environment: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + network_mode: host + shm_size: 12GB + volumes: + - /dev/dri/by-path:/dev/dri/by-path diff --git a/preset/deep-learning/.actions.json b/preset/deep-learning/.actions.json new file mode 100644 index 00000000..d64e1497 --- /dev/null +++ b/preset/deep-learning/.actions.json @@ -0,0 +1,4 @@ +{ + "PYTHON_VERSION": ["3.9", "3.10"], + "experimental": [true] +} diff --git a/preset/docker-compose.yaml b/preset/deep-learning/docker-compose.yaml similarity index 60% rename from preset/docker-compose.yaml rename to preset/deep-learning/docker-compose.yaml index 578ae2f8..d9b72af8 100644 --- a/preset/docker-compose.yaml +++ b/preset/deep-learning/docker-compose.yaml @@ -14,6 +14,7 @@ # # -*- coding: utf-8 -*- # + version: '3' services: dl-base: @@ -47,7 +48,7 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: '' - context: deep-learning + context: . target: deep-learning-jupyter command: | bash -c "conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ @@ -61,31 +62,6 @@ services: shm_size: 12GB volumes: - /dev/dri/by-path:/dev/dri/by-path - - classical-ml: - build: - args: - DAAL4PY_VERSION: ${DAAL4PY_VERSION:-2024.0.1} - MODIN_VERSION: ${MODIN_VERSION:-0.24.1} - SCIKIT_VERSION: ${SCIKIT_VERSION:-2024.0.1} - XGBOOST_VERSION: ${XGBOOST_VERSION:-1.7.3} - context: classical-ml/ - target: classical-ml-jupyter - command: | - bash -c "conda run -n classical-ml python -c 'import sklearn; import xgboost; print(\"SciKit:\", sklearn.__version__, \" XGBoost:\",xgboost.__version__)' && \ - conda run -n classical-ml python -c 'import modin.pandas as pd, modin.config as cfg; cfg.Engine.put(\"Ray\"); df = pd.DataFrame([1]);print(df+1)'" - extends: dl-base - image: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-classical-ml-${IDP_VERSION:-2024.0.0}-py${PYTHON_VERSION:-3.10} - - data-analytics: - build: - context: data-analytics/ - target: data-analytics-jupyter - command: > - bash -c "conda run -n data-analytics python -c 'import modin.pandas as pd, modin.config as cfg; cfg.Engine.put(\"Ray\"); df = pd.DataFrame([1]);print(df+1)'" - extends: classical-ml - image: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-data-analytics-${IDP_VERSION:-2024.0.0}-py${PYTHON_VERSION:-3.10} - deep-learning: build: args: @@ -105,21 +81,3 @@ services: - dl-base extends: dl-base image: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${IDP_VERSION:-2024.0.0}-py${PYTHON_VERSION:-3.10} - - inference-optimization: - build: - args: - COMPOSE_PROJECT_NAME: ${COMPOSE_PROJECT_NAME:-preset} - context: inference-optimization - target: inference-optimization - command: | - bash -c "conda run -n pytorch-cpu python -c 'import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ - conda run -n pytorch-cpu python -c 'import neural_compressor;print(\"Neural Compressor Version:\", neural_compressor.__version__)' && \ - conda run -n pytorch-gpu python -c 'import torch;print(torch.device(\"xpu\"));import intel_extension_for_pytorch as ipex;print(ipex.xpu.is_available());' && \ - conda run -n pytorch-gpu python -c 'import neural_compressor;print(\"Neural Compressor Version:\", neural_compressor.__version__)' && \ - conda run -n tensorflow python -c 'from tensorflow.python.client import device_lib; print(device_lib.list_local_devices())' && \ - conda run -n tensorflow python -c 'import neural_compressor, tf2onnx; print(\"\\nNeural Compressor Version:\", neural_compressor.__version__, \"\\\nTensorFlow2ONNX Version:\", tf2onnx.__version__)'" - depends_on: - - dl-base - extends: dl-base - image: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${IDP_VERSION:-2024.0.0}-py${PYTHON_VERSION:-3.10} diff --git a/preset/inference-optimization/.actions.json b/preset/inference-optimization/.actions.json new file mode 100644 index 00000000..d64e1497 --- /dev/null +++ b/preset/inference-optimization/.actions.json @@ -0,0 +1,4 @@ +{ + "PYTHON_VERSION": ["3.9", "3.10"], + "experimental": [true] +} diff --git a/preset/inference-optimization/docker-compose.yaml b/preset/inference-optimization/docker-compose.yaml new file mode 100644 index 00000000..93028d1b --- /dev/null +++ b/preset/inference-optimization/docker-compose.yaml @@ -0,0 +1,82 @@ +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# Copyright (c) 2023 Intel Corporation +# +# -*- coding: utf-8 -*- +# + +version: '3' +services: + dl-base: + build: + args: + BASE_IMAGE: ${BASE_IMAGE:-ubuntu} + BASE_TAG: ${BASE_TAG:-22.04} + DEVICE: ${DEVICE:-flex} + DPCPP_VER: ${DPCPP_VER:-2024.0.0-49819} + HOROVOD_VERSION: ${HOROVOD_VERSION:-0.28.1} + ICD_VER: 23.35.27191.42-775~22.04 + IDP_VERSION: ${IDP_VERSION:-2024.0.0} + INTEL_CHANNEL: ${INTEL_CHANNEL:-intel} + IPEX_CPU_VERSION: ${IPEX_CPU_VERSION:-2.0.100} + IPEX_GPU_VERSION: ${IPEX_GPU_VERSION:-2.0.120} + ITEX_VERSION: ${ITEX_VERSION:-2.14} + LEVEL_ZERO_DEV_VER: 1.14.0-744~22.04 + LEVEL_ZERO_GPU_VER: 1.3.27191.42-775~22.04 + LEVEL_ZERO_VER: 1.14.0-744~22.04 + MINICONDA_VERSION: ${MINICONDA_VERSION:-latest-Linux-x86_64} + MKL_VER: ${MKL_VER:-2024.0.0-49656} + NEURAL_COMPRESSOR_VERSION: ${NEURAL_COMPRESSOR_VERSION:-2.3.1} + ONECCL_CPU_VERSION: ${ONECCL_CPU_VERSION:-2.0.0} + ONECCL_GPU_VERSION: ${ONECCL_GPU_VERSION:-2.0.200} + PYTHON_VERSION: ${PYTHON_VERSION:-3.10} + TF_VERSION: ${TF_VERSION:-2.14} + TORCH_CPU_VERSION: ${TORCH_CPU_VERSION:-2.0.1=*cpu*} + TORCH_GPU_VERSION: ${TORCH_GPU_VERSION:-2.0.1=*xpu*} + TORCHVISION_CPU_VERSION: ${TORCHVISION_CPU_VERSION:-0.15.2=*cpu*} + TORCHVISION_GPU_VERSION: ${TORCHVISION_GPU_VERSION:-0.15.2=*xpu*} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: '' + context: ../deep-learning + target: deep-learning-jupyter + command: | + bash -c "conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ + conda run -n tensorflow python -c 'import tensorflow as tf; print(tf.__version__)'" + devices: + - /dev/dri:/dev/dri + environment: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + network_mode: host + shm_size: 12GB + volumes: + - /dev/dri/by-path:/dev/dri/by-path + + inference-optimization: + build: + args: + COMPOSE_PROJECT_NAME: ${COMPOSE_PROJECT_NAME:-preset} + context: . + target: inference-optimization + command: | + bash -c "conda run -n pytorch-cpu python -c 'import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ + conda run -n pytorch-cpu python -c 'import neural_compressor;print(\"Neural Compressor Version:\", neural_compressor.__version__)' && \ + conda run -n pytorch-gpu python -c 'import torch;print(torch.device(\"xpu\"));import intel_extension_for_pytorch as ipex;print(ipex.xpu.is_available());' && \ + conda run -n pytorch-gpu python -c 'import neural_compressor;print(\"Neural Compressor Version:\", neural_compressor.__version__)' && \ + conda run -n tensorflow python -c 'from tensorflow.python.client import device_lib; print(device_lib.list_local_devices())' && \ + conda run -n tensorflow python -c 'import neural_compressor, tf2onnx; print(\"\\nNeural Compressor Version:\", neural_compressor.__version__, \"\\\nTensorFlow2ONNX Version:\", tf2onnx.__version__)'" + depends_on: + - dl-base + extends: dl-base + image: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${IDP_VERSION:-2024.0.0}-py${PYTHON_VERSION:-3.10} From 8a98081ea5e5bdca927fd2df78096281b806cb17 Mon Sep 17 00:00:00 2001 From: Tyler Titsworth Date: Thu, 4 Jan 2024 08:45:58 -0800 Subject: [PATCH 2/3] OpenSSF Security (#179) * remove checkbox triggering * add permissions to all actions * update review-trigger permissions * Update review-trigger.yml * Update review-trigger.yml --- .github/pull_request_template.md | 7 +- .github/workflows/bandit.yaml | 2 + .github/workflows/checkbox-trigger.yml | 101 ------------------ .../workflows/container-pipeline-tester.yaml | 3 + .github/workflows/dockerfile-builder.yml | 3 + .github/workflows/review-trigger.yml | 16 +++ .github/workflows/serving-mkl-build.yaml | 94 ---------------- .github/workflows/unit-test.yaml | 2 + 8 files changed, 30 insertions(+), 198 deletions(-) delete mode 100644 .github/workflows/checkbox-trigger.yml delete mode 100644 .github/workflows/serving-mkl-build.yaml diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 3190faec..1f923f4f 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -15,6 +15,7 @@ - [ ] I have tested any changes in container groups locally with `/test-runner/test_runner.py` with all existing tests passing, and I have added new tests where necessary. -- [ ] Automated Validation (Do not edit, check to begin Validation) - -command: /test-group group_dir: my-container-group, runner_label: test-runner + +### Automated Testing + +Leave a PR comment in the following format: `/test-group group_dir: , runner_label: ` to run automated tests. diff --git a/.github/workflows/bandit.yaml b/.github/workflows/bandit.yaml index bef87b75..2b3fc62e 100644 --- a/.github/workflows/bandit.yaml +++ b/.github/workflows/bandit.yaml @@ -3,6 +3,8 @@ on: pull_request_review: types: [submitted] +permissions: read-all + jobs: scan-bandit: if: github.event.review.state == 'approved' diff --git a/.github/workflows/checkbox-trigger.yml b/.github/workflows/checkbox-trigger.yml deleted file mode 100644 index 8d70ffbd..00000000 --- a/.github/workflows/checkbox-trigger.yml +++ /dev/null @@ -1,101 +0,0 @@ ---- -name: Checkbox Pipeline Validation -on: - pull_request: - types: [edited] -jobs: - check-description-checkbox: - runs-on: [ k8-runners ] - if: contains(github.event.pull_request.body, '- [x] Automated Validation (Do not edit, check to begin Validation)') && contains(github.event.pull_request.body, '/test-group') && !contains(github.event.pull_request.labels.*.name, 'validating') - outputs: - group_dir: ${{ steps.comment-inputs.outputs.group_dir }} - env_overrides: ${{ steps.comment-inputs.outputs.env_overrides }} - runner_label: ${{ steps.comment-inputs.outputs.runner_label }} - steps: - - uses: actions/checkout@v4 - - name: Get Inputs - id: comment-inputs - run: bash .github/utils/val-args.sh "${{ github.event.pull_request.body }}" - - name: Remove PASS Label - if: contains(github.event.pull_request.labels.*.name, 'PASS') - uses: actions/github-script@v7 - with: - result-encoding: string - script: | - github.rest.issues.removeLabel({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - name: ["PASS"] - }) - - name: Remove FAIL Label - if: contains(github.event.pull_request.labels.*.name, 'FAIL') - uses: actions/github-script@v7 - with: - result-encoding: string - script: | - github.rest.issues.removeLabel({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - name: ["FAIL"] - }) - - name: Lock - uses: actions/github-script@v7 - with: - script: | - github.rest.issues.addLabels({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - labels: ['validating'] - }) - container-pipeline-tester: - needs: [ check-description-checkbox ] - if: needs.check-description-checkbox.outputs.group_dir && needs.check-description-checkbox.outputs.runner_label - uses: ./.github/workflows/container-pipeline-tester.yaml - with: - group_dir: ${{ needs.check-description-checkbox.outputs.group_dir }} - env_overrides: ${{ needs.check-description-checkbox.outputs.env_overrides || '' }} - runner_label: ${{ needs.check-description-checkbox.outputs.runner_label }} - secrets: inherit - status-check: - needs: [ check-description-checkbox, container-pipeline-tester ] - if: ${{ always() && needs.check-description-checkbox.result != 'skipped' }} - runs-on: [ k8-runners ] - steps: - - name: Unlock - if: ${{ needs.check-description-checkbox.result != 'skipped' }} - uses: actions/github-script@v7 - with: - result-encoding: string - script: | - github.rest.issues.removeLabel({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - name: ["validating"] - }) - - name: Set Fail Label - if: ${{ needs.check-description-checkbox.result != 'success' || needs.container-pipeline-tester.result != 'success' }} - uses: actions/github-script@v7 - with: - script: | - github.rest.issues.addLabels({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - labels: ['FAIL'] - }) - - name: Set Pass Label - if: ${{ needs.check-description-checkbox.result == 'success' && needs.container-pipeline-tester.result == 'success' }} - uses: actions/github-script@v7 - with: - script: | - github.rest.issues.addLabels({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - labels: ['PASS'] - }) - \ No newline at end of file diff --git a/.github/workflows/container-pipeline-tester.yaml b/.github/workflows/container-pipeline-tester.yaml index f1cf8e04..14d565e4 100644 --- a/.github/workflows/container-pipeline-tester.yaml +++ b/.github/workflows/container-pipeline-tester.yaml @@ -1,4 +1,7 @@ name: Container Pipeline Tester + +permissions: read-all + on: workflow_dispatch: inputs: diff --git a/.github/workflows/dockerfile-builder.yml b/.github/workflows/dockerfile-builder.yml index 0e75f08b..8922d1d5 100644 --- a/.github/workflows/dockerfile-builder.yml +++ b/.github/workflows/dockerfile-builder.yml @@ -1,4 +1,7 @@ name: Dockerfile Builder + +permissions: read-all + on: workflow_dispatch: diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 1bbdad55..5194983b 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -3,6 +3,22 @@ name: Review Pipeline Validation on: pull_request_review: types: [submitted, edited] + +permissions: + actions: read + checks: read + contents: read + deployments: read + discussions: read + id-token: write + issues: read + packages: read + pages: read + pull-requests: write + repository-projects: read + security-events: read + statuses: read + jobs: check-review: runs-on: [ k8-runners ] diff --git a/.github/workflows/serving-mkl-build.yaml b/.github/workflows/serving-mkl-build.yaml deleted file mode 100644 index e73e0117..00000000 --- a/.github/workflows/serving-mkl-build.yaml +++ /dev/null @@ -1,94 +0,0 @@ -name: Tensorflow Serving Container Build -on: - workflow_dispatch: - inputs: - base_image_tag: - default: "20.04" - required: false - type: string - bazel_version: - default: "5.4.0" - required: false - type: string - tf_package_version: - default: "2.12.0" - required: false - type: string - tf_bazel_options: - default: "--local_ram_resources=HOST_RAM*0.8 --local_cpu_resources=HOST_CPUS-4" - required: true - type: string - tf_serving_build_options: - description: "Build Option excluding '--copt-=march='" - default: "--config=mkl --config=release --define=build_with_openmp=false" - required: true - type: string - other_version: - required: false - type: string - -jobs: - base-build: - container: - image: ${{ vars.REGISTRY }}/aiops/compose-dev - env: - http_proxy: ${{ secrets.HTTP_PROXY }} - https_proxy: ${{ secrets.HTTPS_PROXY }} - no_proxy: ${{ secrets.NO_PROXY }} - credentials: - username: ${{ secrets.REGISTRY_USER }} - password: ${{ secrets.REGISTRY_TOKEN }} - strategy: - matrix: - tf_package: ["intel-tensorflow", "intel-tensorflow-avx512"] - experimental: [ true ] - fail-fast: false - runs-on: [ aia-devops ] - steps: - - uses: actions/checkout@v4 - with: - submodules: true - set-safe-directory: true - - uses: docker/login-action@v3 - with: - username: ${{ secrets.HUB_USER }} - password: ${{ secrets.HUB_TOKEN }} - - uses: docker/login-action@v3 - with: - registry: ${{ vars.REGISTRY }} - username: ${{ secrets.REGISTRY_USER }} - password: ${{ secrets.REGISTRY_TOKEN }} - - name: Remove Containers - run: docker compose down - working-directory: tensorflow - - if: matrix.tf_package == "intel-tensorflow-avx512" - name: Build Framework Ingredient Containers - run: | - BASE_IMAGE_TAG=${{ github.event.inputs.base_image_tag }} \ - BAZEL_VERSION=${{ github.event.inputs.bazel_version }} \ - REGISTRY=${{ vars.REGISTRY }} \ - TF_PACKAGE=${{ matrix.tf_package }} \ - TF_SERVING_BAZEL_OPTIONS=${{ github.event.inputs.tf_bazel_options }} \ - TF_SERVING_BUILD_OPTION="${{ github.event.inputs.tf_serving_build_options }} --copt=-march=skylake-avx512" \ - TF_SERVING_VERSION=${{ github.event.inputs.tf_package_version }} \ - ${{ github.event.input.other_version }} docker compose -f docker-compose-serving.yaml build --no-cache - working-directory: tensorflow - - if: matrix.tf_package == "intel-tensorflow" - name: Build Framework Ingredient Containers - run: | - BASE_IMAGE_TAG=${{ github.event.inputs.base_image_tag }} \ - BAZEL_VERSION=${{ github.event.inputs.bazel_version }} \ - TF_PACKAGE=${{ matrix.tf_package }} \ - TF_SERVING_BAZEL_OPTIONS=${{ github.event.inputs.tf_bazel_options }} \ - TF_SERVING_BUILD_OPTION="${{ github.event.inputs.tf_serving_build_options }} --copt=-march=native" \ - TF_SERVING_VERSION=${{ github.event.inputs.tf_package_version }} \ - ${{ github.event.input.other_version }} docker compose -f docker-compose-serving.yaml build --no-cache - working-directory: tensorflow - - name: Push Framework Ingredient Containers - run: | - BASE_IMAGE_TAG=${{ github.event.inputs.base_image_tag }} \ - TF_PACKAGE=${{ matrix.tf_package }} \ - TF_SERVING_VERSION=${{ github.event.inputs.tf_package_version }} \ - ${{ github.event.input.other_version }} docker compose -f docker-compose-serving.yaml push serving-mkl - working-directory: tensorflow - diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index d5fbd8ac..4fb32341 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -4,6 +4,8 @@ on: branches: - develop +permissions: read-all + jobs: unit-test: runs-on: [ test-runner ] From 5af7f2baee4d9da2f736b9fbf3b0bf9d561d1a7c Mon Sep 17 00:00:00 2001 From: Tyler Titsworth Date: Thu, 4 Jan 2024 09:22:48 -0800 Subject: [PATCH 3/3] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2aac17e4..e80f28a6 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # IntelĀ® AI Containers +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8270/badge)](https://www.bestpractices.dev/projects/8270) This repository contains Dockerfiles, scripts, yaml files, Helm charts, etc. used to scale out AI containers with versions of TensorFlow and PyTorch that have been optimized for Intel platforms. Scaling is done with python, Docker, kubernetes, kubeflow, cnvrg.io, Helm, and other container orchestration frameworks for use in the cloud and on-premise.