diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..542a9e41 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +*?Dockerfile +README.md +.vscode +.git +venv.local +tutorials +__pycache__ +/common/docker/*/sample-inputs diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..0ac652ba --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +# CPython cache directory +__pycache__ + +# Development virtual environment +venv.local + +# CubeIDE installation +en.st-stm32cubeide_1.14.0_19471_20231121_1200_amd64.deb_bundle.sh.zip diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..c8cfe395 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/common/docker/build/README.md b/common/docker/build/README.md new file mode 100644 index 00000000..f6ddc75c --- /dev/null +++ b/common/docker/build/README.md @@ -0,0 +1,14 @@ +# Generic build workload meant to be run from jobcontrol-api + +## Runtime interface + +### Input + + +### Output + + +## Build + + +## jobcontrol-api diff --git a/common/docker/build/api.http b/common/docker/build/api.http new file mode 100644 index 00000000..1cc06423 --- /dev/null +++ b/common/docker/build/api.http @@ -0,0 +1,42 @@ +@base_url = https://dev.stm-vespucci.com +@jobcontrol_api_base_path = /svc/jobcontrol/v1alpha1 +@token = dummy +### +# @name create +POST {{base_url}}{{jobcontrol_api_base_path}}/jobs +Authorization: Bearer {{token}} +Accept: application/json +Content-Type: multipart/form-data; boundary=------------------------327ad9cec9ffd168 + +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="uploadedFile"; filename="cubeai_output" +Content-Type: application/octet-stream + +< ./sample-inputs/object_detection/cubeai-output.zip +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="uploadedFile"; filename="model" +Content-Type: application/octet-stream + +< ./sample-inputs/object_detection/model.tflite +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="templateId" + +01HMBW4VR0P1YQXJZEDW04YHJY +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="runtimeInput" + +{ + "job_config": { + "root": "object_detection/scripts/deployment" + }, + "build_config": {} +} + +--------------------------327ad9cec9ffd168-- +### +GET {{base_url}}{{jobcontrol_api_base_path}}/jobs/{{create.response.body.$.id}} +Authorization: Bearer {{token}} +Accept: application/json +### +DELETE {{base_url}}{{jobcontrol_api_base_path}}/jobs/{{create.response.body.$.id}} +Authorization: Bearer {{token}} diff --git a/common/docker/build/build.Dockerfile b/common/docker/build/build.Dockerfile new file mode 100644 index 00000000..f5d41476 --- /dev/null +++ b/common/docker/build/build.Dockerfile @@ -0,0 +1,28 @@ +FROM python:3.10-bullseye as prepare + +ARG cubeide_filename="en.st-stm32cubeide_1.14.0_19471_20231121_1200_amd64.deb_bundle.sh.zip" + +RUN apt-get update + +COPY ./common/docker/build/${cubeide_filename} /run/cubeide.zip +RUN cd /run && unzip cubeide.zip && LICENSE_ALREADY_ACCEPTED=1 sh /run/*cubeide*.sh +RUN rm /run/*cubeide* + +FROM scratch as run + +COPY --from=prepare / / + +RUN useradd --create-home --user-group --uid 9001 --shell /bin/bash stm32ai +USER 9001:9001 + +RUN mkdir -p /tmp/inputs /tmp/outputs + +WORKDIR /stm32ai-modelzoo +COPY --chown=9001:9001 ./requirements.txt . + +RUN pip install --no-cache-dir --user --requirement ./requirements.txt colorama +COPY --chown=9001:9001 . . + +ENV HOME=/home/stm32ai + +ENTRYPOINT [ "/stm32ai-modelzoo/common/docker/build/build.entrypoint.py" ] diff --git a/common/docker/build/build.entrypoint.py b/common/docker/build/build.entrypoint.py new file mode 100755 index 00000000..4d263e9c --- /dev/null +++ b/common/docker/build/build.entrypoint.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +import yaml +import sys +from os import path, makedirs +import logging +import shutil +import subprocess as sp +import zipfile as zf + +logging.basicConfig(level=logging.DEBUG) + +log = logging.getLogger(__name__) + +# In place dict merge (a is the receiver) +def merge(a: dict, b: dict, path=[]): + for key in b: + if key in a: + if isinstance(a[key], dict) and isinstance(b[key], dict): + merge(a[key], b[key], path + [str(key)]) + # elif a[key] != b[key]: + # raise Exception('Conflict at ' + '.'.join(path + [str(key)])) + else: + a[key] = b[key] + else: + a[key] = b[key] + return a + +CUBEAI_CODEGEN_ZIP_PATH = "/tmp/inputs/cubeai-output.zip" + +def run(): + log.info("Starting up build job...") + + config = {} + + with open("/tmp/inputs/job.json", "rt") as file: + job_config = yaml.safe_load(file) + + logging.debug("Job config: %r", job_config) + + assert "root" in job_config + root = job_config["root"] + assert isinstance(root, str) + + root = path.abspath(root) + assert path.isdir(root), "Configured script root does not exist" + + assert path.isfile(CUBEAI_CODEGEN_ZIP_PATH), "Missing CubeAI codegen archive" + + outputs_path = path.join(root, "outputs") + stm32ai_files_path = path.join(outputs_path, "stm32ai_files") + + makedirs(stm32ai_files_path, exist_ok=True) + + with zf.ZipFile(CUBEAI_CODEGEN_ZIP_PATH, "r") as archive: + archive.extractall(stm32ai_files_path) + + user_config_path = path.join(root, "user_config.yaml") + + with open(user_config_path, "rt") as file: + base_config = yaml.safe_load(file) + + _ = merge(config, base_config) + + with open("/tmp/inputs/config.json", "rt") as file: + override_config = yaml.safe_load(file) + + _ = merge(config, override_config) + + config["hydra"] = { "run": { "dir": "outputs" } } + config["model"]["model_path"] = "/tmp/inputs/model.tflite" + + logging.debug("Final config: %r", config) + + with open(user_config_path, "wt") as file: + yaml.safe_dump(config, file) + + _spawned = sp.run( + "python ./deploy.py", + stdout=sys.stdout, + stderr=sys.stderr, + shell=True, + cwd=root, + check=True + ) + + project_folder = path.join(path.dirname(path.dirname(root)), "getting_started") + assert path.isdir(project_folder), "Missing project folder" + + shutil.make_archive("/tmp/outputs/getting_started", "zip", root_dir=project_folder) + +if __name__ == "__main__": + run() diff --git a/common/docker/build/sample-inputs/object_detection/config.json b/common/docker/build/sample-inputs/object_detection/config.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/common/docker/build/sample-inputs/object_detection/config.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/common/docker/build/sample-inputs/object_detection/cubeai-output.zip b/common/docker/build/sample-inputs/object_detection/cubeai-output.zip new file mode 100644 index 00000000..573b6ea7 Binary files /dev/null and b/common/docker/build/sample-inputs/object_detection/cubeai-output.zip differ diff --git a/common/docker/build/sample-inputs/object_detection/job.json b/common/docker/build/sample-inputs/object_detection/job.json new file mode 100644 index 00000000..6046eea5 --- /dev/null +++ b/common/docker/build/sample-inputs/object_detection/job.json @@ -0,0 +1,3 @@ +{ + "root": "object_detection/scripts/deployment" +} \ No newline at end of file diff --git a/common/docker/build/sample-inputs/object_detection/model.tflite b/common/docker/build/sample-inputs/object_detection/model.tflite new file mode 100644 index 00000000..479ed906 Binary files /dev/null and b/common/docker/build/sample-inputs/object_detection/model.tflite differ diff --git a/common/docker/train/README.md b/common/docker/train/README.md new file mode 100644 index 00000000..dda36ff3 --- /dev/null +++ b/common/docker/train/README.md @@ -0,0 +1,31 @@ +# Generic train workload meant to be run from jobcontrol-api + +## Runtime interface + +### Input + +The job expects the following files as input: + +- A json-formatted file at `/tmp/inputs/config.json` representing overrides for the options provided in `user_config.yaml`. The options in this file are merged together with the defaults in the training script folder +- A json-formatted file at `/tmp/inputs/job.json` containing job-specific input values. Most importantly, we expect a property under the key `root` whose value must be a string representing the path to the directory containing the target script. This is meant to be the mechanism enabling generality over all modelzoo examples. We'll henceforth refer to this path as `` +- The training dataset as a zip archive located at `/tmp/inputs/train.zip` +- The (optional) validation dataset as a zip archive located at `/tmp/inputs/valid.zip` +- The (optional) test dataset as a zip archive located at `/tmp/inputs/test.zip` + +The dataset archives must contain the dataset folder at the top level. These archives will each be extracted in a target folder, the path to which will be respectively set as value to the options of `dataset.training_path`, `dataset.validation_path` and `dataset.test_path`. + +### Output + +After completing the training, we expect the folder `outputs` to be created in ``. This folder is archived at `/tmp/outputs/outputs.zip`. + +## Build + +The build context must be set at the root of the repository. from the folder where this file is located, the correct relative path to the build context is `../../..`. + +Assuming the repo root is the current directory, build with + +```sh +docker build --file common/docker/train/train.Dockerfile . +``` + +## jobcontrol-api diff --git a/common/docker/train/api.http b/common/docker/train/api.http new file mode 100644 index 00000000..cb1e7d93 --- /dev/null +++ b/common/docker/train/api.http @@ -0,0 +1,69 @@ +@base_url = https://dev.stm-vespucci.com +@jobcontrol_api_base_path = /svc/jobcontrol/v1alpha1 +@token = dummy +### +# @name create +POST {{base_url}}{{jobcontrol_api_base_path}}/jobs +Authorization: Bearer {{token}} +Accept: application/json +Content-Type: multipart/form-data; boundary=------------------------327ad9cec9ffd168 + +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="uploadedFile"; filename="train_data" +Content-Type: application/octet-stream + +< ./sample-inputs/object_detection/train.zip +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="uploadedFile2"; filename="valid_data" +Content-Type: application/octet-stream + +< ./sample-inputs/object_detection/valid.zip +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="templateId" + +01HM2EE60T3HJ4G5MPYK3GVT5D +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="runtimeInput" + +{ + "job_config": { + "root": "object_detection/scripts/training" + }, + "training_config": {} +} + +--------------------------327ad9cec9ffd168-- +### +# @name create +POST {{base_url}}{{jobcontrol_api_base_path}}/jobs +Authorization: Bearer {{token}} +Accept: application/json +Content-Type: multipart/form-data; boundary=------------------------327ad9cec9ffd168 + +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="uploadedFile"; filename="train_data" +Content-Type: application/octet-stream + +< ./sample-inputs/hand_posture/train.zip +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="templateId" + +01HM2EE60T3HJ4G5MPYK3GVT5D +--------------------------327ad9cec9ffd168 +Content-Disposition: form-data; name="runtimeInput" + +{ + "job_config": { + "root": "hand_posture/scripts/training" + }, + "training_config": {} +} + +--------------------------327ad9cec9ffd168-- +### +GET {{base_url}}{{jobcontrol_api_base_path}}/jobs/{{create.response.body.$.id}} +Authorization: Bearer {{token}} +Accept: application/json +### +DELETE {{base_url}}{{jobcontrol_api_base_path}}/jobs/{{create.response.body.$.id}} +Authorization: Bearer {{token}} diff --git a/common/docker/train/sample-inputs/hand_posture/train.zip b/common/docker/train/sample-inputs/hand_posture/train.zip new file mode 100644 index 00000000..130f1cc9 Binary files /dev/null and b/common/docker/train/sample-inputs/hand_posture/train.zip differ diff --git a/common/docker/train/sample-inputs/object_detection/train.zip b/common/docker/train/sample-inputs/object_detection/train.zip new file mode 100644 index 00000000..44648472 Binary files /dev/null and b/common/docker/train/sample-inputs/object_detection/train.zip differ diff --git a/common/docker/train/sample-inputs/object_detection/valid.zip b/common/docker/train/sample-inputs/object_detection/valid.zip new file mode 100644 index 00000000..decdbd2c Binary files /dev/null and b/common/docker/train/sample-inputs/object_detection/valid.zip differ diff --git a/common/docker/train/train.Dockerfile b/common/docker/train/train.Dockerfile new file mode 100644 index 00000000..937c7899 --- /dev/null +++ b/common/docker/train/train.Dockerfile @@ -0,0 +1,22 @@ +FROM python:3.10 + +RUN apt-get update +RUN apt-get install --yes libgl1 + +RUN useradd --create-home --user-group --uid 9001 --shell /bin/bash stm32ai + +USER 9001:9001 + +RUN mkdir -p /tmp/inputs /tmp/outputs \ + /tmp/datasets/train /tmp/datasets/valid /tmp/datasets/test + +WORKDIR /stm32ai-modelzoo + +COPY --chown=9001:9001 ./requirements.txt . +RUN pip install --user --requirement ./requirements.txt + +COPY --chown=9001:9001 . . + +ENV HOME=/home/stm32ai + +ENTRYPOINT [ "/stm32ai-modelzoo/common/docker/train/train.entrypoint.py" ] diff --git a/common/docker/train/train.entrypoint.py b/common/docker/train/train.entrypoint.py new file mode 100755 index 00000000..6d331f53 --- /dev/null +++ b/common/docker/train/train.entrypoint.py @@ -0,0 +1,116 @@ +#!/bin/env python +import yaml +import sys +from os import path +import logging +import shutil +import subprocess as sp + +logging.basicConfig(level=logging.DEBUG) + +log = logging.getLogger(__name__) + +# In place dict merge (a is the receiver) +def merge(a: dict, b: dict, path=[]): + for key in b: + if key in a: + if isinstance(a[key], dict) and isinstance(b[key], dict): + merge(a[key], b[key], path + [str(key)]) + # elif a[key] != b[key]: + # raise Exception('Conflict at ' + '.'.join(path + [str(key)])) + else: + a[key] = b[key] + else: + a[key] = b[key] + return a + + +TRAINING_PATH = "/tmp/datasets/train" +VALIDATION_PATH = "/tmp/datasets/valid" +TEST_PATH = "/tmp/datasets/test" + +TRAIN_ZIP = "/tmp/inputs/train.zip" +VALID_ZIP = "/tmp/inputs/valid.zip" +TEST_ZIP = "/tmp/inputs/test.zip" + +def run(): + log.info("Starting up training job...") + + dataset_config = { + "training_path": TRAINING_PATH, + "validation_path": VALIDATION_PATH, + "test_path": TEST_PATH + } + config = {} + + with open("/tmp/inputs/job.json", "rt") as file: + job_config = yaml.safe_load(file) + + logging.debug("Job config: %r", job_config) + + assert "root" in job_config + + root = job_config["root"] + + assert isinstance(root, str) + + root = path.abspath(root) + + assert path.isdir(root), "Configured script root does not exist" + + user_config_path = path.join(root, "user_config.yaml") + + with open(user_config_path, "rt") as file: + base_config = yaml.safe_load(file) + + _ = merge(config, base_config) + + with open("/tmp/inputs/config.json", "rt") as file: + override_config = yaml.safe_load(file) + + _ = merge(config, override_config) + + + assert path.isfile(TRAIN_ZIP), "Training dataset was not provided" + + shutil.unpack_archive(TRAIN_ZIP, TRAINING_PATH, "zip") + + if path.isfile(VALID_ZIP): + shutil.unpack_archive(VALID_ZIP, VALIDATION_PATH, "zip") + else: + logging.info("Validation dataset was not provided") + del dataset_config["validation_path"] + + if path.isfile(TEST_PATH): + shutil.unpack_archive(TEST_ZIP, TEST_PATH, "zip") + else: + logging.info("Test dataset was not provided") + del dataset_config["test_path"] + + + _ = merge(config, { "dataset": dataset_config }) + + config["hydra"] = { "run": { "dir": "outputs" } } + + logging.debug("Final config: %r", config) + + with open(user_config_path, "wt") as file: + yaml.safe_dump(config, file) + + _spawned = sp.run( + "python ./train.py", + stdout=sys.stdout, + stderr=sys.stderr, + shell=True, + cwd=root, + check=True + ) + + outputs_path = path.join(root, "outputs") + + assert path.isdir(outputs_path), "Missing outputs folder" + + shutil.make_archive("/tmp/outputs/outputs", "zip", root_dir=outputs_path) + +if __name__ == "__main__": + run() diff --git a/hand_posture/scripts/deployment/deploy.py b/hand_posture/scripts/deployment/deploy.py index fcf2346f..f0261d50 100644 --- a/hand_posture/scripts/deployment/deploy.py +++ b/hand_posture/scripts/deployment/deploy.py @@ -32,7 +32,7 @@ from common_deploy import stm32ai_deploy from evaluate import evaluate_model from utils import get_config, mlflow_ini, setup_seed - +from header_file_generator import gen_h_user_file @hydra.main(version_base=None, config_path="", config_name="user_config") def main(cfg: DictConfig) -> None: @@ -43,11 +43,13 @@ def main(cfg: DictConfig) -> None: # Set all seeds setup_seed(42) - # Evaluate model performance / footprints and get c code / Lib - evaluate_model(cfg, c_header=True, c_code=True) + if cfg.model.evaluate: + # Evaluate model performance / footprints and get c code / Lib + evaluate_model(cfg, c_header=True, c_code=True) # Build and Flash Getting Started if cfg.stm32ai.serie.upper() == "STM32F4" and cfg.stm32ai.IDE.lower() == "gcc": + gen_h_user_file(cfg, cfg.model.model_path) stm32ai_deploy(cfg, debug=False) else: raise TypeError("Options for cfg.stm32ai.serie and cfg.stm32ai.IDE not supported yet!") diff --git a/hand_posture/scripts/deployment/user_config.yaml b/hand_posture/scripts/deployment/user_config.yaml index 3fdbd861..86c9853e 100644 --- a/hand_posture/scripts/deployment/user_config.yaml +++ b/hand_posture/scripts/deployment/user_config.yaml @@ -16,6 +16,9 @@ model: model_type: {name : CNN2D_ST_HandPosture, version : v1} input_shape: [8, 8, 2] model_path: ../../models/CNN2D_ST_HandPosture/ST_pretrainedmodel_custom_dataset/ST_VL53L8CX_handposture_dataset/CNN2D_ST_HandPosture_8classes/CNN2D_ST_HandPosture_8classes.h5 + + # Reply config extension + evaluate: False stm32ai: c_project_path: ../../getting_started @@ -26,11 +29,11 @@ stm32ai: optimization: balanced footprints_on_target: STM32L4R9I-DISCO path_to_stm32ai: C:/Work/AI/Cube.AI/7.3.0/windows/stm32ai.exe - path_to_cubeIDE: C:/ST/STM32CubeIDE_1.7.0/STM32CubeIDE/stm32cubeide.exe + path_to_cubeIDE: /opt/st/stm32cubeide_1.14.0/stm32cubeide mlflow: uri: ./mlruns hydra: run: - dir: outputs/${now:%Y_%m_%d_%H_%M_%S} \ No newline at end of file + dir: outputs/${now:%Y_%m_%d_%H_%M_%S} diff --git a/hand_posture/scripts/utils/utils.py b/hand_posture/scripts/utils/utils.py index 663438ba..248a9a4e 100644 --- a/hand_posture/scripts/utils/utils.py +++ b/hand_posture/scripts/utils/utils.py @@ -143,22 +143,22 @@ def train(cfg): "{}/{}".format(cfg.general.saved_models_dir, "best_model.h5")) model.save(model_path) - # Evaluate model footprints with cubeai - if cfg.stm32ai.footprints_on_target: - print("[INFO] : Establishing a connection to the Developer Cloud to launch the model benchmark on STM32 target...") - try: - output_analyze = Cloud_analyze(cfg, model_path) - if output_analyze == 0: - raise Exception( - "Connection failed, using local cubeai. Link to download https://www.st.com/en/embedded-software/x-cube-ai.html") - except Exception as e: - output_analyze = 0 - print("[FAIL] :", e) - print("[INFO] : Offline benchmark launched...") - benchmark_model(cfg, model_path) - - else: - benchmark_model(cfg, model_path) + # # Evaluate model footprints with cubeai + # if cfg.stm32ai.footprints_on_target: + # print("[INFO] : Establishing a connection to the Developer Cloud to launch the model benchmark on STM32 target...") + # try: + # output_analyze = Cloud_analyze(cfg, model_path) + # if output_analyze == 0: + # raise Exception( + # "Connection failed, using local cubeai. Link to download https://www.st.com/en/embedded-software/x-cube-ai.html") + # except Exception as e: + # output_analyze = 0 + # print("[FAIL] :", e) + # print("[INFO] : Offline benchmark launched...") + # benchmark_model(cfg, model_path) + + # else: + # benchmark_model(cfg, model_path) # Train the model @@ -193,23 +193,23 @@ def train(cfg): training_confusion_matrix(test_ds, best_model, cfg.dataset.class_names) - # Generating C model - if cfg.stm32ai.footprints_on_target: - try: - if output_analyze != 0: - output_benchmark = Cloud_benchmark(cfg, model_path, output_analyze) - if output_benchmark == 0: - raise Exception( - "Connection failed, using local cubeai. Link to download https://www.st.com/en/embedded-software/x-cube-ai.html") - else: - raise Exception( - "Connection failed, using local cubeai. Link to download https://www.st.com/en/embedded-software/x-cube-ai.html") - except Exception as e: - print("[FAIL] :", e) - print("[INFO] : Offline benchmark launched...") - benchmark_model(cfg, model_path) - else: - benchmark_model(cfg, model_path) + # # Generating C model + # if cfg.stm32ai.footprints_on_target: + # try: + # if output_analyze != 0: + # output_benchmark = Cloud_benchmark(cfg, model_path, output_analyze) + # if output_benchmark == 0: + # raise Exception( + # "Connection failed, using local cubeai. Link to download https://www.st.com/en/embedded-software/x-cube-ai.html") + # else: + # raise Exception( + # "Connection failed, using local cubeai. Link to download https://www.st.com/en/embedded-software/x-cube-ai.html") + # except Exception as e: + # print("[FAIL] :", e) + # print("[INFO] : Offline benchmark launched...") + # benchmark_model(cfg, model_path) + # else: + # benchmark_model(cfg, model_path) diff --git a/object_detection/getting_started/Application/STM32H747I-Discovery/STM32CubeIDE/CM7/.cproject b/object_detection/getting_started/Application/STM32H747I-Discovery/STM32CubeIDE/CM7/.cproject index c6ae9dd2..1b9ea652 100644 --- a/object_detection/getting_started/Application/STM32H747I-Discovery/STM32CubeIDE/CM7/.cproject +++ b/object_detection/getting_started/Application/STM32H747I-Discovery/STM32CubeIDE/CM7/.cproject @@ -26,6 +26,7 @@