Skip to content

Commit

Permalink
Merge branch 'f/mlonmcu_docker' into 'main'
Browse files Browse the repository at this point in the history
Build mlonmcu docker images

See merge request es/ai/hannah/hannah!384
  • Loading branch information
cgerum committed May 16, 2024
2 parents 6636e3d + c9e43ae commit 827d8d1
Show file tree
Hide file tree
Showing 17 changed files with 1,054 additions and 1,876 deletions.
71 changes: 71 additions & 0 deletions .github/workflows/publish_images.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
##
## Copyright (c) 2024 Hannah contributors.
##
## This file is part of hannah.
## See https://github.com/ekut-es/hannah for further info.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##

#
name: Create and publish a Docker image

# Configures this workflow to run every time a change is pushed to the branch called `release`.
on:
push:
branches:
- '*'
- '*/*'

# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}

# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
jobs:
build-and-push-image:
runs-on: ubuntu-latest
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
permissions:
contents: read
packages: write
attestations: write
#
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
- name: Build and push Docker image
id: push
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
33 changes: 30 additions & 3 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
##
## Copyright (c) 2023 Hannah contributors.
## Copyright (c) 2024 Hannah contributors.
##
## This file is part of hannah.
## See https://github.com/ekut-es/hannah for further info.
Expand Down Expand Up @@ -44,7 +44,7 @@ before_script:

# install and manage dependencies with poetry
- poetry config installer.max-workers 10
- poetry install -E vision
- poetry install --all-extras

# Show limits
- ulimit -a
Expand Down Expand Up @@ -133,6 +133,33 @@ test_python_310:
rules:
- if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH

test_python_311:
stage: test
image: python:3.11
script:
- set -e
- "echo 'import coverage; coverage.process_startup()' > sitecustomize.py"
- export PYTHONPATH=$PWD
- export COVERAGE_PROCESS_START=$PWD/.coveragerc
- poetry run coverage run --source hannah -m pytest test
- unset PYTHONPATH
- unset COVERAGE_PROCESS_START
- poetry run coverage report
- poetry run coverage xml
tags:
- docker
interruptible: true
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
dependencies: []
cache:
paths:
- .cache/
rules:
- if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH


build_docs:
Expand Down Expand Up @@ -215,7 +242,7 @@ deploy to github:
only:
- main
- pub/.*
script:
script:
- env
- ./scripts/git-push [email protected]:ekut-es/hannah.git $CI_COMMIT_REF_NAME
tags:
Expand Down
13 changes: 10 additions & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,16 @@
## limitations under the License.
##

FROM ubuntu:22.04
ARG target="hannah"

RUN apt update -y && apt -y install git mesa-utils python3-pip python3-dev libblas-dev liblapack-dev libsndfile1-dev libsox-dev cmake ninja-build curl build-essential python-is-python3
FROM ubuntu:22.04 as hannah

FROM tumeda/mlonmcu-bench:latest as mlonmcu

FROM ${target}


RUN apt-get update -y && apt-get -y install git mesa-utils python3-pip python3-dev libblas-dev liblapack-dev libsndfile1-dev libsox-dev cmake ninja-build curl build-essential python-is-python3


# Install poetry using recommended method
Expand All @@ -33,4 +40,4 @@ COPY poetry.lock pyproject.toml /deps/

# Install dependencies
RUN poetry config virtualenvs.create false \
&& poetry install --no-interaction --no-ansi -E vision --no-root
&& poetry install --no-interaction --no-ansi --all-extras --no-root
7 changes: 2 additions & 5 deletions doc/deployment/tensorrt.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,8 @@ For deployment on NVIDIA targets we support TensorRT backends.
Currently the TensorRT backend always compiles for the first GPU of the local system.

## Installation
The tensorrt module in the poetry shell needs to be installed seperately via pip:
```
poetry shell
pip install tensorrt
```

Tensorrt is unfortunately not compatible to poetry installation and must be installed separately `pip install tensorrt`

## Configuration

Expand Down
1 change: 0 additions & 1 deletion external/hannah-tvm
Submodule hannah-tvm deleted from 2d7b7d
33 changes: 21 additions & 12 deletions hannah/models/ai8x/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
Linear,
MaxPooling,
Relu,
Requantize,
)
from hannah.nas.parameters.parameters import (
CategoricalParameter,
Expand Down Expand Up @@ -65,64 +66,71 @@ def dynamic_depth(*exits, switch):
return ChoiceOp(*exits, switch=switch)()


@scope
def batch_norm(input):
n_chans = input.shape()[1]
running_mu = Tensor(name="running_mean", shape=(n_chans,), axis=("c",))
running_std = Tensor(name="running_std", shape=(n_chans,), axis=("c",))
return BatchNorm()(input, running_mu, running_std)


@scope
def block(input):
expand_ratio = IntScalarParameter(1, 3, name="expand_ratio")
reduce_ratio = IntScalarParameter(1, 3, name="reduce_ratio")
def quantize_weight(weight):
weight = Requantize()(weight)
return weight


@scope
def block(input, expand_ratio, reduce_ratio):
max_pool1 = max_pool(input, kernel_size=2, stride=2, padding=1)
identity1 = identity(input)
conv_in = choice(input, [max_pool1, identity1])
# conv_in = choice(input, [max_pool1, identity1])

conv_in = input

weight1 = Tensor(
"w1",
(conv_in.shape()[1], Int(conv_in.shape()[1] * expand_ratio), 1, 1),
(Int(conv_in.shape()[1] * expand_ratio), Int(conv_in.shape()[1]), 1, 1),
axis=["O", "I", "kH", "kW"],
grad=True,
)
conv1 = conv2d(conv_in, weight1)
weight1_quantized = quantize_weight(weight1)
conv1 = conv2d(conv_in, weight1_quantized)
bn1 = batch_norm(conv1)
relu1 = relu(bn1)

weight2 = Tensor(
"w2",
(relu1.shape()[1], Int(relu1.shape()[1] / reduce_ratio), 3, 3),
(Int(relu1.shape()[1] / reduce_ratio), relu1.shape()[1], 3, 3),
axis=["O", "I", "kH", "kW"],
grad=True,
)
conv2 = conv2d(relu1, weight2)
weight2_quantized = quantize_weight(weight2)
conv2 = conv2d(relu1, weight2_quantized)
bn2 = batch_norm(conv2)
relu2 = relu(bn2)

return relu2


def search_space(name, input, num_classes=10, max_blocks=9):
expand_ratio = IntScalarParameter(1, 3, name="expand_ratio")
reduce_ratio = IntScalarParameter(1, 3, name="reduce_ratio")

depth = IntScalarParameter(0, max_blocks, name="depth")
input_channels = IntScalarParameter(8, 64, name="input_channels", step_size=8)

weight = Tensor(
"w1",
(input.shape()[1], input_channels, 3, 3),
(input_channels, input.shape()[1], 3, 3),
axis=["O", "I", "kH", "kW"],
grad=True,
)
weight = quantize_weight(weight)

out = conv2d(input, weight)

blocks = []
for i in range(max_blocks + 1):
out = block(out)
out = block(out, expand_ratio.new(), reduce_ratio.new())
blocks.append(out)

out = dynamic_depth(*blocks, switch=depth)
Expand All @@ -133,6 +141,7 @@ def search_space(name, input, num_classes=10, max_blocks=9):
axis=["O", "I"],
grad=True,
)
linear_weight = quantize_weight(linear_weight)
out = linear(out, linear_weight)

return out
Loading

0 comments on commit 827d8d1

Please sign in to comment.