diff --git a/.env b/.env index 5840847..4f83cff 100644 --- a/.env +++ b/.env @@ -1,12 +1,6 @@ -PORT = 1212 - TASK_EXECUTOR_HOST=http://localhost TASK_EXECUTOR_PORT=2591 IPFS_NODE_URL=http://localhost:5001 PYMECA_ACTOR_SERVER_HOST=http://localhost PYMECA_ACTOR_SERVER_PORT=9999 - -# deprecating -REGISTRATION_SERVICE_API_URL=http://localhost:7000 -TRANSACTION_SERVICE_API_URL=http://localhost:8000 diff --git a/.env.example b/.env.example index c0e3582..da68eb1 100644 --- a/.env.example +++ b/.env.example @@ -1,12 +1,6 @@ -# PORT=1212 - # TASK_EXECUTOR_HOST=http://localhost # TASK_EXECUTOR_PORT=2591 # IPFS_NODE_URL=http://localhost:5001 # PYMECA_ACTOR_SERVER_HOST=http://localhost # PYMECA_ACTOR_SERVER_PORT=9999 - -# # deprecating -# REGISTRATION_SERVICE_API_URL=http://localhost:7000 -# TRANSACTION_SERVICE_API_URL=http://localhost:8000 diff --git a/docs/source/_static/developer_guide.md b/docs/source/_static/developer_guide.md index 2a97502..b91dd1d 100644 --- a/docs/source/_static/developer_guide.md +++ b/docs/source/_static/developer_guide.md @@ -54,7 +54,6 @@ Note: The main process executes the entry point script defined in the package.json (as 'main'). It manages the application's lifecycle, and has native access to desktop functionality, such as menus, dialogs, and tray icons. More specifically, the main process for this app is responsible for the following: - Manage lifecycle events (e.g. 'ready', 'closed'. Refer to the official docs for a complete list: *https://www.electronjs.org/docs/latest/api/app*) - Create BrowserWindows (see *Renderer* below), Menu, Tray. -- Communicating with the MECA SDK to handle task processing via sockets (see ```sdk\README.md```). - Communicating with the Docker Daemon with Dockerode to orchestrate the execution of Task Executor containers during Node.js runtime (see ```task_executor\README.md```). - Communicating with IPFS to upload and download tasks. - In Linux, the default download directory is `/home/$USER/.MECA/ipfsFiles` diff --git a/sdk/.gitignore b/sdk/.gitignore deleted file mode 100644 index cd916e7..0000000 --- a/sdk/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.env -/build diff --git a/sdk/README.md b/sdk/README.md deleted file mode 100644 index 2e370f7..0000000 --- a/sdk/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# SDK for developers using MECAnywhere - -- "Developers" in this document refers to developers who want to offload their tasks to MECAnywhere. - -The task execution flow is as such - - -Therefore, developers only have 2 things to do: -1. Use the SDK in their application to connect to MECAnywhere desktop app. -2. Build a containerized task and upload it to IPFS via our CLI. - -## Python SDK - -This SDK only provides offloading asynchronously and receiving results via callback. To do synchronous offloading, you may use the `join` function immediately after an offload. - -#### async def initiate_connection([timeout]) -> None - Starts a socket connection to the local MECA desktop app. - - - timeout: int (optional) - -#### async def offload_task(task_id, container_ref, data, callback, resource, runtime) -> str - Sends a task input to the local MECA desktop app that helps to run the task on a remote MECA host. - The task is the containerized application you have uploaded to the container repository that returns some result given the input data. - - - task_id: str - identifier for the task to correlate with the callback results - - container_ref: str - image name of the task for the data to be processed - - data: str - - callback: Callable[[str], None] - function to be called when the results are received - - resource: dict (optional) - resource requirements for the task - - runtime: str (optional) - runtime environment for the task - -#### async def disconnect() -> None - Closes the socket connection to the local MECA desktop app. -#### async def join(task_timeout, join_timeout) -> None - Waits for all tasks to finish. - - - task_timeout: int - timeout for each task - - join_timeout: int - timeout for all tasks - -### Test (with KNN example) - -> You may skip step 1 if you uploaded your image to a public container repository or have it built in your local device. - -1. Go to example_containers and build dockerized task with - -``` -docker build -t example_containers/knn -``` - -2. Start MECAnywhere desktop app. -3. Go to knn.py and change the container_ref to the tag you just built. -Start virtual env, install requirements and run knn.py. - -``` -pip install torch==2.0.1+cpu --index-url https://download.pytorch.org/whl/cpu -pip install -r requirements.txt -python knn.py -``` - -### Examples - -This folder contains examples of how to use the SDK to offload tasks. The example_containers folder contains example tasks that can be called and run by the host. - -- [basic_offload.py](basic_offload.py) uses the [sampleserver](../task_executor/sample/) container. - -- [knn.py](knn.py) uses the [knn](example_containers/knn) container. - -## JavaScript - -Not in use diff --git a/sdk/basic_offload.py b/sdk/basic_offload.py deleted file mode 100644 index 0fd8c45..0000000 --- a/sdk/basic_offload.py +++ /dev/null @@ -1,51 +0,0 @@ -from py import meca_api -from dotenv import load_dotenv -import asyncio - - -task_corr_ids = dict() -results = dict() -NUMBER_OF_TASKS = 2 - -def callback_on_receive(task_id, status, response, err, corr_id): - if status == 1: - results[task_id] = response - print("Received result for task", task_id, ":", response) - else: - print(err, "for task: ", task_id, "corr_id:", corr_id) - results[task_id] = response - -async def main(): - load_dotenv() - try: - await meca_api.initiate_connection() - except Exception as e: - await meca_api.disconnect() - return - - for i in range(NUMBER_OF_TASKS): - await meca_api.offload_task( - str(i), - 'sampleserver:latest', - "{\"name\": \"meca dev " + str(i) + "\"}", - callback=callback_on_receive, - resource={ - "cpu": 1, - "memory": 256 - }, - use_gpu=True, - gpu_count=1 - ) - - await meca_api.join() - - print("All results received:", results) - print("Result count:", len(results)) - await meca_api.disconnect() - -if __name__ == '__main__': - try: - asyncio.run(main()) - except KeyboardInterrupt: - print("Program closed by user.") - asyncio.run(meca_api.disconnect()) diff --git a/sdk/example_containers/.gitignore b/sdk/example_containers/.gitignore deleted file mode 100644 index d455b0a..0000000 --- a/sdk/example_containers/.gitignore +++ /dev/null @@ -1,136 +0,0 @@ -/out/ -output.png - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -tmp/ -.idea -result.png diff --git a/sdk/example_containers/README.md b/sdk/example_containers/README.md deleted file mode 100644 index a101bf2..0000000 --- a/sdk/example_containers/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Example task containers - -This folder contains example task containers showing how to build your own task container. Each folder must contain all of the files listed in the structure below. - -## Structure - -The structure of the task container is as follows: - -``` -- template - - Dockerfile - - src - - description.txt - - name.txt - - example_input.bin - - example_output.bin - - config.json - -``` - -## Building a task container - -### 1. App that handles requests - -The app will receive a post request at `localhost:8080\` and return the result. The input format is any json object and the output format is a string. After the app starts up, print `"meca-init-done"`. - -For example, we create a flask app: - -```python - from flask import Flask, request - import json - - app = Flask(__name__) - - @app.route('/', methods=['POST']) - def hello_world(): - data = request.json - return data['input'] - - print("meca-init-done") -``` - -### 2. Dockerfile - -Expose port 8080 to the app of the request handler. - -Following the flask app created above, the Dockerfile may look like this: - -```dockerfile -FROM python:3.9-slim - -WORKDIR /app - -COPY flask_app.py flask_app.py - -EXPOSE 8080 - -CMD ["python", "-m", "flask", "--app", "flask_app.py", "run", "--host=0.0.0.0", "--port=8080"] -``` - -### 3. config.json fields - -The `config.json` file contains the configuration of the task with the following default fields: - -DEFAULTS: - -```json -{ - "resource": { - "cpu": 1, - "mem": 128, - "use_gpu": false, - "gpu_count": 0 - } -} -``` - -field | description ---- | --- -`resource.cpu` | The number of CPUs to allocate to the task. This field will limit the users that can run the task to those with the same or more CPUs available. -`resource.mem` | The amount of memory to allocate to the task in MB. This field will limit the users that can run the task to those with the same or more memory available. -`resource.use_gpu` | Whether the task requires a GPU to run. -`resource.gpu_count` | The number of GPUs to allocate to the task, if `use_gpu` is true. - -### 4. Using and uploading the container - -Build the image and push it to IPFS via the MECA CLI as a task developer. You will be compensated with the task fee that you list for each task executed by a MECA client. - -Test your task folder structure by running the test function in the MECA CLI. diff --git a/sdk/example_containers/knn/Dockerfile b/sdk/example_containers/knn/Dockerfile deleted file mode 100644 index 53c7577..0000000 --- a/sdk/example_containers/knn/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3.9-slim - -WORKDIR /app - -COPY /src/requirements.txt requirements.txt - -RUN pip install torch==2.0.1+cpu --index-url https://download.pytorch.org/whl/cpu -RUN pip install -r requirements.txt - -COPY /src/app.py app.py - -EXPOSE 8080 - -CMD ["python", "-m", "flask", "--app", "app.py", "run", "--host=0.0.0.0", "--port=8080"] diff --git a/sdk/example_containers/knn/config.json b/sdk/example_containers/knn/config.json deleted file mode 100644 index 2fe573f..0000000 --- a/sdk/example_containers/knn/config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "resource": { - "cpu": 2, - "mem": 1024 - } -} diff --git a/sdk/example_containers/knn/description.txt b/sdk/example_containers/knn/description.txt deleted file mode 100644 index cf59838..0000000 --- a/sdk/example_containers/knn/description.txt +++ /dev/null @@ -1 +0,0 @@ -This app serves HTTP requests to run K-nearest neighbors algorithm on a chunked dataset in a distributed fashion. diff --git a/sdk/example_containers/knn/example_input.bin b/sdk/example_containers/knn/example_input.bin deleted file mode 100644 index bdef437..0000000 --- a/sdk/example_containers/knn/example_input.bin +++ /dev/null @@ -1 +0,0 @@ -{"dataset":[[1,2],[3,4],[5,6],[7,8],[9,10]],"point":[2,3],"k":2,"num_processes":2} diff --git a/sdk/example_containers/knn/example_output.bin b/sdk/example_containers/knn/example_output.bin deleted file mode 100644 index ed22a6f..0000000 --- a/sdk/example_containers/knn/example_output.bin +++ /dev/null @@ -1 +0,0 @@ -[0, 1] diff --git a/sdk/example_containers/knn/name.txt b/sdk/example_containers/knn/name.txt deleted file mode 100644 index c9ce200..0000000 --- a/sdk/example_containers/knn/name.txt +++ /dev/null @@ -1 +0,0 @@ -knn diff --git a/sdk/example_containers/knn/src/app.py b/sdk/example_containers/knn/src/app.py deleted file mode 100644 index a669913..0000000 --- a/sdk/example_containers/knn/src/app.py +++ /dev/null @@ -1,72 +0,0 @@ -import json -import torch -import multiprocessing -from flask import Flask, request - -app = Flask(__name__) - -@app.route('/', methods=['POST']) -def entry_point(): - input = request.json - - dataset = torch.Tensor(input.get('dataset')) - point = torch.Tensor(input.get('point')) - k = input.get('k') - num_processes = input.get('num_processes') - - print("Number of processes:", num_processes) - print("Number of nearest neighbors:", k) - - neighbors = distributed_knn(point, dataset, k, num_processes) - - print("Nearest neighbors indices:", neighbors) - print("Nearest neighbors data points:", dataset[neighbors]) - - return json.dumps(neighbors) - - -def euclidean_distance(a, b): - return torch.sqrt(torch.sum((a - b) ** 2)) - -def find_k_nearest_neighbors(point, dataset_chunk, k): - distances = [(index, euclidean_distance(point, data_point)) for index, data_point in enumerate(dataset_chunk)] - distances.sort(key=lambda x: x[1]) - return [index for index, _ in distances[:k]] - -def process_data_chunk(point, dataset_chunk, k, chunk_start_index, result_queue): - neighbors = find_k_nearest_neighbors(point, dataset_chunk, k) - adjusted_neighbors = [index + chunk_start_index for index in neighbors] - result_queue.put(adjusted_neighbors) - -def distributed_knn(point, dataset, k, num_processes): - num_data_points = len(dataset) - chunk_size = num_data_points // num_processes - - manager = multiprocessing.Manager() - result_queue = manager.Queue() - - processes = [] - for i in range(num_processes): - start_index = i * chunk_size - end_index = (i + 1) * chunk_size if i < num_processes - 1 else num_data_points - sliced_dataset = dataset[start_index:end_index].clone().detach() - process = multiprocessing.Process(target=process_data_chunk, args=(point, sliced_dataset, k, start_index, result_queue)) - processes.append(process) - - for process in processes: - process.start() - - for process in processes: - process.join() - - neighbors_list = [] - while not result_queue.empty(): - neighbors_list.extend(result_queue.get()) - - distances = [(index, euclidean_distance(point, dataset[index])) for index in neighbors_list] - distances.sort(key=lambda x: x[1]) - overall_neighbors = [index for index, _ in distances[:k]] - - return overall_neighbors - -print("meca-init-done") diff --git a/sdk/example_containers/knn/src/requirements.txt b/sdk/example_containers/knn/src/requirements.txt deleted file mode 100644 index 1e595cb..0000000 --- a/sdk/example_containers/knn/src/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -blinker==1.6.2 -click==8.1.6 -colorama==0.4.6 -Flask==2.3.2 -itsdangerous==2.1.2 -Jinja2==3.1.2 -MarkupSafe==2.1.3 -numpy==1.25.2 -Werkzeug==2.3.6 diff --git a/sdk/example_containers/python-template/Dockerfile b/sdk/example_containers/python-template/Dockerfile deleted file mode 100644 index a389098..0000000 --- a/sdk/example_containers/python-template/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM python:3.9.0 -WORKDIR /app -COPY /src . -RUN pip install -r requirements.txt -EXPOSE 8080 -CMD ["flask", "run", "--host=0.0.0.0", "--port=8080"] - diff --git a/sdk/example_containers/python-template/config.json b/sdk/example_containers/python-template/config.json deleted file mode 100644 index 3cf832b..0000000 --- a/sdk/example_containers/python-template/config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "resource": { - "cpu": 1, - "mem": 128 - } -} diff --git a/sdk/example_containers/python-template/description.txt b/sdk/example_containers/python-template/description.txt deleted file mode 100644 index 6793fb5..0000000 --- a/sdk/example_containers/python-template/description.txt +++ /dev/null @@ -1 +0,0 @@ -description of the task diff --git a/sdk/example_containers/python-template/example_input.bin b/sdk/example_containers/python-template/example_input.bin deleted file mode 100644 index 756ef39..0000000 --- a/sdk/example_containers/python-template/example_input.bin +++ /dev/null @@ -1,2 +0,0 @@ -{"json": "example input for the task"} - diff --git a/sdk/example_containers/python-template/example_output.bin b/sdk/example_containers/python-template/example_output.bin deleted file mode 100644 index b5a9f83..0000000 --- a/sdk/example_containers/python-template/example_output.bin +++ /dev/null @@ -1 +0,0 @@ -{"json": "example input for the task"} diff --git a/sdk/example_containers/python-template/name.txt b/sdk/example_containers/python-template/name.txt deleted file mode 100644 index c6cbceb..0000000 --- a/sdk/example_containers/python-template/name.txt +++ /dev/null @@ -1 +0,0 @@ -name of the task diff --git a/sdk/example_containers/python-template/src/app.py b/sdk/example_containers/python-template/src/app.py deleted file mode 100644 index 596fa41..0000000 --- a/sdk/example_containers/python-template/src/app.py +++ /dev/null @@ -1,12 +0,0 @@ -import json -from flask import Flask, request - -app = Flask(__name__) - -@app.route('/', methods=['POST']) -def entry_point(): - input = request.json - - return json.dumps(input) - -print("meca-init-done") diff --git a/sdk/example_containers/python-template/src/requirements.txt b/sdk/example_containers/python-template/src/requirements.txt deleted file mode 100644 index e3e9a71..0000000 --- a/sdk/example_containers/python-template/src/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Flask diff --git a/sdk/example_containers/sgx-task/Dockerfile b/sdk/example_containers/sgx-task/Dockerfile deleted file mode 100644 index 747742f..0000000 --- a/sdk/example_containers/sgx-task/Dockerfile +++ /dev/null @@ -1,148 +0,0 @@ -FROM ubuntu:20.04 as sgx_dcap_2.14_1.11 - -USER root - -# avoid tzdata interactive config during apt install. -ENV TZ=Asia/Singapore -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone - -RUN apt-get update && apt-get install -y \ - build-essential \ - git \ - wget - -# install sdk -RUN wget https://download.01.org/intel-sgx/sgx-dcap/1.11/linux/distro/ubuntu20.04-server/sgx_linux_x64_sdk_2.14.100.2.bin \ - && chmod 755 sgx_linux_x64_sdk_2.14.100.2.bin \ - && ./sgx_linux_x64_sdk_2.14.100.2.bin --prefix=/opt/intel \ - source /opt/intel/sgxsdk/environment - -# install dcap libs -RUN echo 'deb [arch=amd64] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main' | tee /etc/apt/sources.list.d/intel-sgx.list > /dev/null \ - && wget -O - https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key | apt-key add - \ - && apt update - -RUN apt install -y \ - libsgx-epid=2.14.100.2-focal1 \ - libsgx-headers=2.14.100.2-focal1 \ - libsgx-urts=2.14.100.2-focal1 \ - libsgx-launch=2.14.100.2-focal1 \ - libsgx-ae-le=2.14.100.2-focal1 \ - libsgx-ae-pce=2.14.100.2-focal1 \ - libsgx-ae-qe3=1.11.100.2-focal1 \ - libsgx-ae-qve=1.11.100.2-focal1 \ - libsgx-ae-epid=2.14.100.2-focal1 \ - libsgx-qe3-logic=1.11.100.2-focal1 \ - libsgx-pce-logic=1.11.100.2-focal1 \ - libsgx-enclave-common=2.14.100.2-focal1 \ - sgx-aesm-service=2.14.100.2-focal1 \ - libsgx-quote-ex=2.14.100.2-focal1 \ - libsgx-dcap-ql=1.11.100.2-focal1 \ - libsgx-dcap-quote-verify=1.11.100.2-focal1 \ - # workload system additional packages - libsgx-quote-ex-dev=2.14.100.2-focal1 \ - libsgx-dcap-ql-dev=1.11.100.2-focal1 \ - libsgx-dcap-quote-verify-dev=1.11.100.2-focal1 \ - # qpl needed for verification - libsgx-dcap-default-qpl=1.11.100.2-focal1 \ - # runtime - libsgx-uae-service=2.14.100.2-focal1 - -# install sgx-sdk mitigation tools (necessary to build sgxssl) -RUN apt-get install -y \ - build-essential \ - ocaml \ - ocamlbuild \ - automake \ - autoconf \ - libtool \ - wget \ - python-is-python3 \ - libssl-dev \ - git \ - cmake \ - perl \ - libssl-dev \ - libcurl4-openssl-dev \ - protobuf-compiler \ - libprotobuf-dev \ - debhelper \ - reprepro \ - unzip \ - && cd / && git clone https://github.com/intel/linux-sgx.git && cd linux-sgx && git checkout sgx_2.14 && make preparation - -RUN ls /linux-sgx/external/toolset/ubuntu20.04/ && cp /linux-sgx/external/toolset/ubuntu20.04/* /usr/local/bin && cd / && ls /usr/local/bin - -# install sgxssl -RUN wget https://github.com/intel/intel-sgx-ssl/archive/refs/tags/lin_2.14_1.1.1k.tar.gz \ - && tar -xzvf lin_2.14_1.1.1k.tar.gz \ - && cd intel-sgx-ssl-lin_2.14_1.1.1k/openssl_source \ - && wget https://www.openssl.org/source/openssl-1.1.1w.tar.gz \ - && cd ../Linux && make all && make install && cd ../ - -# should remove the sources -RUN rm -rf /linux-sgx && rm -rf /lin_2.14_1.1.1k.tar.gz /intel-sgx-ssl-lin_2.14_1.1.1k - -# image size: 1.94GB - -FROM sgx_dcap_2.14_1.11 as server_builder - -USER root - -ENV SGX_SDK=/opt/intel/sgxsdk - -# avoid tzdata interactive config during apt install. -ENV TZ=Asia/Singapore -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone - -RUN apt-get update && apt-get install -y \ - # install deps (libuv) - cmake \ - clang - -# # without -i the bash will not load .bashrc. With -i docker complains but we can ignore it. -# SHELL ["/bin/bash", "--login", "-i", "-c"] -# ENV NODE_VERSION=16.15.0 -# ENV NVM_DIR /tmp/nvm -# WORKDIR $NVM_DIR -# RUN wget -qO- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash \ -# && . $NVM_DIR/nvm.sh \ -# && nvm install $NODE_VERSION \ -# && nvm alias default $NODE_VERSION \ -# && nvm use default -# ENV NODE_PATH $NVM_DIR/v$NODE_VERSION/lib/node_modules -# ENV PATH $NVM_DIR/v$NODE_VERSION/bin:$PATH - -# a cleaner version of the above -ENV NVM_DIR /usr/local/nvm -ENV NODE_VERSION v16.15.0 -RUN mkdir -p /usr/local/nvm -RUN wget -qO- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.1/install.sh | bash -RUN /bin/bash -c "source $NVM_DIR/nvm.sh && nvm install $NODE_VERSION && nvm use --delete-prefix $NODE_VERSION" -ENV NODE_PATH $NVM_DIR/versions/node/$NODE_VERSION/bin -ENV PATH $NODE_PATH:$PATH - -RUN node --version && npm --version - -COPY ./src /sgx-task -RUN cd /sgx-task/ && make mrproper && make deps \ - && cd worker && make mrproper && make all && make install && cd ..\ - && cd server && make mrproper && make all && make install && cd .. - -# image -FROM sgx_dcap_2.14_1.11 - -USER root - -# # default libuv thread pool size to 8. -# ARG UV_THREADPOOL_SIZE=8 -# RUN echo UV_THREADPOOL_SIZE=${UV_THREADPOOL_SIZE} -# ENV UV_THREADPOOL_SIZE ${UV_THREADPOOL_SIZE} -# sequential request execution -ENV UV_THREADPOOL_SIZE 1 - -COPY --from=server_builder /sgx-task/server/install /install - -EXPOSE 8080 - -CMD SGX_AESM_ADDR=1 /install/bin/server /install/lib/Worker_Enclave.signed.so diff --git a/sdk/example_containers/sgx-task/README.md b/sdk/example_containers/sgx-task/README.md deleted file mode 100644 index 8d3ed16..0000000 --- a/sdk/example_containers/sgx-task/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# sgx-task-worker - -SGX task worker sample: It creates a server that conforms to the meca requirement and respond to users input `name` with `hello name`. - -## setup - -Environment setup - -Follow the DCAP setup guides to register the worker machine to Intel Platform Certification Service and cache its attestation collateral at the meca designated Platform Certification Caching Service server. - -The sgx-task server shall be able to prepare an attestation report during RA. - -## build - -To build the sample app, under `sgx-task/` run - -```sh -docker build -t sgx-task . -``` - -To test the server - -```sh -docker run -it -p 2333:8080 --device /dev/sgx_enclave:/dev/sgx/enclave -v /var/run/aesmd:/var/run/aesmd sgx-task -``` - -issue a request (now requires the client to perform encryption decryption) - - - -```sh -cd src/client_test -make clean && make all -./client_ec_test -``` - -## example input and output - -These are the input packaged and response received with the `client_test`. - -The example input: - -```json -{ - "value" : "e2668b74921293303890963bc088b4478d827ea42d2d2d2d2d424547494e205055424c4943204b45592d2d2d2d2d0a4d485977454159484b6f5a497a6a3043415159464b344545414349445967414535314376636e4a496f6f455866563834614941676a6d75596a3647334252464d0a5a577770434f65514c324349745a4751713075435a794e4e747972414563767970615a47674f4f7156772f534e533166737062474d5a4e6f2b656b37584536450a433644537a503043617332494e4769312f6757577351616773414835504566360a2d2d2d2d2d454e44205055424c4943204b45592d2d2d2d2d0a000000000000000000000000bd7c85ff9a050361b53372a4bdb1bd7e" -} -``` - -The hex value are encrypted with the users key value of `sbip` with the output encryption key encrypted by the shared session key established after remote attestation. - -The example output: - -```json -{ - "msg" : "c4631eafeb6044a9fa0300000000000000000000000072988629eb5cad55f89ad8315553727e" -} -``` - -The hex value is `hello-sbip` encrypted with the output encryption key. diff --git a/sdk/example_containers/sgx-task/config.json b/sdk/example_containers/sgx-task/config.json deleted file mode 100644 index ac5b3c5..0000000 --- a/sdk/example_containers/sgx-task/config.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "resource": { - "cpu": 1, - "memory": 128, - "use_gpu": false, - "gpu_count": 0, - "use_sgx": true - } -} diff --git a/sdk/example_containers/sgx-task/description.txt b/sdk/example_containers/sgx-task/description.txt deleted file mode 100644 index 0429875..0000000 --- a/sdk/example_containers/sgx-task/description.txt +++ /dev/null @@ -1 +0,0 @@ -This app serves HTTP requests to echo back input. The encrypted input is decrypted inside SGX enclave and reencrypted as output. diff --git a/sdk/example_containers/sgx-task/example_input.bin b/sdk/example_containers/sgx-task/example_input.bin deleted file mode 100644 index d697571..0000000 --- a/sdk/example_containers/sgx-task/example_input.bin +++ /dev/null @@ -1,3 +0,0 @@ -{ - "value" : "e2668b74921293303890963bc088b4478d827ea42d2d2d2d2d424547494e205055424c4943204b45592d2d2d2d2d0a4d485977454159484b6f5a497a6a3043415159464b344545414349445967414535314376636e4a496f6f455866563834614941676a6d75596a3647334252464d0a5a577770434f65514c324349745a4751713075435a794e4e747972414563767970615a47674f4f7156772f534e533166737062474d5a4e6f2b656b37584536450a433644537a503043617332494e4769312f6757577351616773414835504566360a2d2d2d2d2d454e44205055424c4943204b45592d2d2d2d2d0a000000000000000000000000bd7c85ff9a050361b53372a4bdb1bd7e" - } \ No newline at end of file diff --git a/sdk/example_containers/sgx-task/example_output.bin b/sdk/example_containers/sgx-task/example_output.bin deleted file mode 100644 index d5312ab..0000000 --- a/sdk/example_containers/sgx-task/example_output.bin +++ /dev/null @@ -1,3 +0,0 @@ -{ - "msg" : "c4631eafeb6044a9fa0300000000000000000000000072988629eb5cad55f89ad8315553727e" - } diff --git a/sdk/example_containers/sgx-task/name.txt b/sdk/example_containers/sgx-task/name.txt deleted file mode 100644 index ccb3d0d..0000000 --- a/sdk/example_containers/sgx-task/name.txt +++ /dev/null @@ -1 +0,0 @@ -sgx-sample \ No newline at end of file diff --git a/sdk/example_containers/sgx-task/src/Makefile b/sdk/example_containers/sgx-task/src/Makefile deleted file mode 100644 index b737cba..0000000 --- a/sdk/example_containers/sgx-task/src/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# used to mainly build dependencies -PROJECT_ROOT_DIR := $(shell readlink -f .) - -DEPS_INSTALL_DIR = $(PROJECT_ROOT_DIR)/deps/install - -LIBUV_DIR = $(PROJECT_ROOT_DIR)/deps/libuv -LLHTTP_DIR = $(PROJECT_ROOT_DIR)/deps/llhttp - -.PHONY: all concurrent_runtime_deps deps mrproper preparation - -preparation: - git submodule update --init --recursive - -server_deps: - mkdir -p $(DEPS_INSTALL_DIR) - cd $(LIBUV_DIR) && mkdir build && cd build && cmake .. -DCMAKE_INSTALL_PREFIX=$(DEPS_INSTALL_DIR)/libuv && make && make install && cd $(PROJECT_ROOT_DIR) - mkdir -p $(DEPS_INSTALL_DIR)/llhttp/include && mkdir $(DEPS_INSTALL_DIR)/llhttp/lib - npm --version - cd $(LLHTTP_DIR) && npm install && make && PREFIX=$(DEPS_INSTALL_DIR)/llhttp make install && cd $(PROJECT_ROOT_DIR) - -server_deps_clean: - make -C $(LLHTTP_DIR) clean - rm -rf $(LLHTTP_DIR)/node_modules - rm -rf $(LIBUV_DIR)/build - -deps: server_deps - -all: preparation deps - -clean: - @echo "nothing to clean" - -mrproper: clean server_deps_clean - rm -rf deps/install diff --git a/sdk/example_containers/sgx-task/src/client_test/Makefile b/sdk/example_containers/sgx-task/src/client_test/Makefile deleted file mode 100644 index 8276c3e..0000000 --- a/sdk/example_containers/sgx-task/src/client_test/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -PROJECT_ROOT_DIR ?= $(shell readlink -f ..) - -App_C_Cpp_Flags := -O0 -g -fPIC -Wno-attributes -I. -App_C_Cpp_Flags += -I$(PROJECT_ROOT_DIR) - -.PHONY: all clean - -all: client_ec_test - -hexutil.o: $(PROJECT_ROOT_DIR)/common/hexutil.cc - g++ $(App_C_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -json.o: $(PROJECT_ROOT_DIR)/common/json.cc - g++ $(App_C_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -%.o: %.cc - g++ $(App_C_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -App_Cpp_Objects := hexutil.o json.o - -client_ec_test: main_ec.o $(App_Cpp_Objects) - g++ $^ -o $@ -lcrypto -lssl -lcurl - @echo "Link => $@" - -clean: - rm -f client_ec_test *.o diff --git a/sdk/example_containers/sgx-task/src/client_test/main_ec.cc b/sdk/example_containers/sgx-task/src/client_test/main_ec.cc deleted file mode 100644 index cb3ab49..0000000 --- a/sdk/example_containers/sgx-task/src/client_test/main_ec.cc +++ /dev/null @@ -1,450 +0,0 @@ -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "common/hexutil.h" -#include "common/json.h" - -#define MAX_SESSION_KEY_IV_LENGTH 16 - -unsigned char default_output_key[16] = {0x30, 0x31, 0x32, 0x33, 0x34, 0x35, - 0x36, 0x37, 0x38, 0x39, 0x30, 0x31, - 0x32, 0x33, 0x34, 0x35}; - -#define EC_PUBLIC_KEY_SIZE 215 - -int get_pkey_by_ec(EVP_PKEY* pk) { - int res = -1; - EVP_PKEY_CTX* ctx; - - ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL); - if (ctx == NULL) return res; - res = EVP_PKEY_keygen_init(ctx); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - - res = EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, NID_secp384r1); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - - /* Generate key */ - res = EVP_PKEY_keygen(ctx, &pk); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - -done: - if (ctx) EVP_PKEY_CTX_free(ctx); - - return res; -} - -std::string get_public_key_as_string(EVP_PKEY* pkey) { - BIO* bio = BIO_new(BIO_s_mem()); - PEM_write_bio_PUBKEY(bio, pkey); - // Allocate memory - uint8_t* local_public_key = (uint8_t*)malloc(EC_PUBLIC_KEY_SIZE + 1); - if (!local_public_key) { - BIO_free(bio); - return ""; - } - memset(local_public_key, 0x00, EC_PUBLIC_KEY_SIZE + 1); - if (!BIO_read(bio, local_public_key, EC_PUBLIC_KEY_SIZE + 1)) { - BIO_free(bio); - return ""; - } - - BIO_free(bio); - auto ret = - std::string((char*)local_public_key, strlen((char*)local_public_key)); - free(local_public_key); - return ret; -} - -void load_public_key(const char* pub_key, size_t pub_key_size, - EVP_PKEY** pkey) { - BIO* bio = BIO_new_mem_buf(pub_key, pub_key_size); - EC_KEY* ecPublicKey = NULL; - PEM_read_bio_EC_PUBKEY(bio, &ecPublicKey, NULL, NULL); - EVP_PKEY_assign_EC_KEY(*pkey, ecPublicKey); - printf("public key size: %d\n", EVP_PKEY_size(*pkey)); - BIO_free(bio); -} - -void load_private_key(const char* pri_key, size_t pri_key_size, - EVP_PKEY** pkey) { - BIO* bio = BIO_new_mem_buf(pri_key, pri_key_size); - EC_KEY* ecPrivateKey = NULL; - PEM_read_bio_ECPrivateKey(bio, &ecPrivateKey, NULL, NULL); - EVP_PKEY_assign_EC_KEY(*pkey, ecPrivateKey); - printf("private key size: %d\n", EVP_PKEY_size(*pkey)); - BIO_free(bio); -} - -std::string derive_ecdh_secret(EVP_PKEY* own_key, EVP_PKEY* peer_key) { - EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(own_key, NULL); - if (!ctx) { - printf("EVP_PKEY_CTX_new failed\n"); - return ""; - } - if (EVP_PKEY_derive_init(ctx) != 1) { - printf("EVP_PKEY_derive_init failed\n"); - return ""; - } - - if (EVP_PKEY_derive_set_peer(ctx, peer_key) != 1) { - printf("EVP_PKEY_derive_set_peer failed\n"); - return ""; - } - - size_t secret_len; - if (1 != EVP_PKEY_derive(ctx, NULL, &secret_len)) { - return ""; - } - - unsigned char* secret = (unsigned char*)malloc(secret_len); - if (!secret) { - printf("malloc secret failed\n"); - return ""; - } - - if (EVP_PKEY_derive(ctx, secret, &secret_len) != 1) { - printf("EVP_PKEY_derive failed\n"); - return ""; - } - auto ret = std::string((char*)secret, secret_len); - EVP_PKEY_CTX_free(ctx); - free(secret); - return ret; -} - -std::string kdf_derive(const unsigned char* secret, size_t secret_len, - const char* info, size_t info_len, size_t key_size) { - char* key = (char*)malloc(key_size); - if (!key) { - return ""; - } - memset(key, 0, key_size); - EVP_PKEY_CTX* ctx; - if (NULL == (ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL))) { - return ""; - } - if (1 != EVP_PKEY_derive_init(ctx)) { - return ""; - } - if (1 != EVP_PKEY_CTX_set_hkdf_md(ctx, EVP_sha256())) { - return ""; - } - if (1 != EVP_PKEY_CTX_set1_hkdf_key(ctx, secret, secret_len)) { - return ""; - } - if (1 != EVP_PKEY_CTX_set1_hkdf_salt(ctx, NULL, 0)) { - return ""; - } - if (1 != EVP_PKEY_CTX_add1_hkdf_info(ctx, info, info_len)) { - return ""; - } - if (1 != EVP_PKEY_derive(ctx, (unsigned char*)key, &key_size)) { - return ""; - } - EVP_PKEY_CTX_free(ctx); - std::string ret{key, key_size}; - free(key); - return ret; -} - -std::string encrypt_request(const std::string& data, - const std::string& enclave_public_key, - const std::string& output_key) { - std::string plaintext{data}; - plaintext.append(output_key.c_str(), 16); - int encrypted_message_len = 0; - unsigned char* ek = NULL; - int ek_len = 0; - unsigned char iv[MAX_SESSION_KEY_IV_LENGTH]; - memset(iv, 0, MAX_SESSION_KEY_IV_LENGTH); - size_t encrypted_message_max_length = plaintext.size() + EVP_MAX_IV_LENGTH; - unsigned char encrypted[encrypted_message_max_length]; - memset(encrypted, 0, encrypted_message_max_length); - EVP_PKEY* pkey = EVP_PKEY_new(); - load_public_key(enclave_public_key.data(), enclave_public_key.size(), &pkey); - - EVP_PKEY* tmp_ec_key_pair = EVP_PKEY_new(); - get_pkey_by_ec(tmp_ec_key_pair); - - // derive shared seccret - auto secret = derive_ecdh_secret(tmp_ec_key_pair, pkey); - auto session_key = kdf_derive((const unsigned char*)secret.c_str(), - secret.size(), "info", 4, 16); - - // encrypt data with the derived secret - EVP_CIPHER_CTX* ctx2 = EVP_CIPHER_CTX_new(); - if (EVP_EncryptInit_ex(ctx2, EVP_aes_128_gcm(), NULL, - (const unsigned char*)session_key.c_str(), iv) != 1) { - printf("EVP_EncryptInit_ex failed\n"); - EVP_CIPHER_CTX_free(ctx2); - return ""; - } - int ciphertext_len = 0; - int len; - if (EVP_EncryptUpdate(ctx2, encrypted, &len, - (const unsigned char*)plaintext.data(), - plaintext.size()) != 1) { - printf("EVP_EncryptUpdate failed\n"); - EVP_CIPHER_CTX_free(ctx2); - return ""; - } - ciphertext_len = len; - printf("encrypted data updated to (%dB)\n", ciphertext_len); - - if (EVP_EncryptFinal_ex(ctx2, encrypted + len, &len) != 1) { - printf("EVP_EncryptFinal_ex failed\n"); - EVP_CIPHER_CTX_free(ctx2); - return ""; - } - ciphertext_len += len; - printf("encrypted data updated to (%dB)\n", ciphertext_len); - - unsigned char tag[16] = { - 0, - }; - EVP_CIPHER_CTX_ctrl(ctx2, EVP_CTRL_GCM_GET_TAG, 16, tag); - printf("tag: \n"); - BIO_dump_fp(stdout, (const char*)tag, 16); - - EVP_CIPHER_CTX_free(ctx2); - - auto tmp_ec_key_pair_str = get_public_key_as_string(tmp_ec_key_pair); - printf("tmp ec key pair (len: %ld) : %s\n", tmp_ec_key_pair_str.size(), - tmp_ec_key_pair_str.c_str()); - - EVP_PKEY_free(pkey); - EVP_PKEY_free(tmp_ec_key_pair); - - return std::string((char*)encrypted, ciphertext_len) + tmp_ec_key_pair_str + - std::string((char*)iv, 12) + std::string((char*)tag, 16); -} - -std::string decrypt_response(const std::string& data, - const std::string& output_key) { - uint8_t tag[16]; - memcpy(tag, data.data() + data.size() - 16, 16); - uint8_t iv[12]; - memcpy(iv, data.data() + data.size() - 28, 12); - size_t encrypt_len = data.size() - 28; - EVP_CIPHER_CTX* ctx; - /* Create and initialise the context */ - if (!(ctx = EVP_CIPHER_CTX_new())) return ""; - - if (EVP_DecryptInit_ex( - ctx, EVP_aes_128_gcm(), NULL, - reinterpret_cast(output_key.c_str()), - iv) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag); - - uint8_t plaintext[encrypt_len]; - int plaintext_len; - int len; - if (EVP_DecryptUpdate(ctx, plaintext, &len, (const unsigned char*)data.data(), - encrypt_len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - plaintext_len = len; - // printf("so far plaintext len: %d\n", plaintext_len); - - if (EVP_DecryptFinal_ex(ctx, plaintext + len, &len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - plaintext_len += len; - - EVP_CIPHER_CTX_free(ctx); - return std::string((char*)plaintext, plaintext_len); -} - -class HttpClient { - public: - HttpClient() { - // Initialize curl - curl_global_init(CURL_GLOBAL_DEFAULT); - curl = curl_easy_init(); - } - - ~HttpClient() { - std::cout << "HttpClient destructor called\n"; - // Cleanup curl - curl_easy_cleanup(curl); - curl_global_cleanup(); - } - - std::string get(const std::string& url) { - if (curl) { - // Set the URL to send the GET request to - curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); - - // Store response in a string - std::string response; - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeCallback); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response); - - // Perform the GET request - CURLcode res = curl_easy_perform(curl); - if (res != CURLE_OK) { - std::cerr << "Failed to perform GET request: " - << curl_easy_strerror(res) << "\n"; - } - - return response; - } - - return ""; - } - - std::string post(const std::string& url, const std::string& data) { - if (curl) { - // Set the URL to send the GET request to - curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); - - // set the post data - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data.c_str()); - - // Store response in a string - std::string response; - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeCallback); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response); - - // Perform the GET request - CURLcode res = curl_easy_perform(curl); - if (res != CURLE_OK) { - std::cerr << "Failed to perform GET request: " - << curl_easy_strerror(res) << "\n"; - } - - return response; - } - - return ""; - } - - private: - static size_t writeCallback(char* data, size_t size, size_t nmemb, - std::string* buffer) { - // Append the data to the buffer - buffer->append(data, size * nmemb); - return size * nmemb; - } - - CURL* curl; -}; - -class SGXTaskClient : public HttpClient { - public: - SGXTaskClient(const std::string& addr) : HttpClient(), addr_(addr) {} - - ~SGXTaskClient() = default; - - void ra() { - // get report - std::string url = addr_ + "/ra"; - std::string response = get(url); - if (response.size() == 0) { - std::cerr << "Failed to get report from server\n"; - return; - } - - std::cout << "RA Response: " << response << "\n"; - - // decode the response: - auto data = json::JSON::Load(response); - auto msg = json::JSON::Load(data["msg"].ToString()); - this->report_ = msg["report"].ToString(); - this->enclave_key_ = msg["key"].ToString(); - } - - std::string exec(const std::string& input, const std::string& output_key) { - if (this->enclave_key_.size() == 0 || this->enclave_key_ == "null") { - std::cerr << "Enclave key not initialized\n"; - return ""; - } - - // encrypt the input - std::string encrypted_input = - encrypt_request(input, enclave_key_, output_key); - - std::cout << "prepared sample size: " << encrypted_input.size() << "\n"; - - // send the request - std::string url = addr_ + "/run"; - json::JSON json_request; - - json_request["value"] = - hex_encode(encrypted_input.data(), encrypted_input.size()); - - // std::string response = post(url, "{\"value\": \"" + encrypted_input+ - // "\"}"); - auto request_data = json_request.dump(); - std::cout << "request data: " << request_data << "\n"; - std::string response = post(url, request_data); - if (response.size() == 0) { - std::cerr << "Failed to get response from server\n"; - return ""; - } - - std::cout << "exec Response: " << response << "\n"; - - auto data = json::JSON::Load(response); - auto msg = data["msg"].ToString(); - // decrypt the response - std::string decrypted_response = - decrypt_response(hex_decode(msg.c_str(), msg.size()), output_key); - return decrypted_response; - } - - private: - std::string addr_; - // initialized after ra - std::string report_; - std::string enclave_key_; -}; - -int main() { - // { - // std::string url = "127.0.0.1:8080"; - // HttpClient client; - - // // get - // std::string response = client.get(url); - // std::cout << "Response: " << response << "\n"; - - // // put - // response = client.post(url, "{\"name\": \"sbip\"}"); - // std::cout << "Response: " << response << "\n"; - // } - - { - SGXTaskClient client{"127.0.0.1:2333"}; - // SGXTaskClient client{"172.18.0.255:8080"}; - client.ra(); - auto response = - client.exec("sbip", std::string((char*)default_output_key, 16)); - std::cout << "Response: " << response << "\n"; - } - - return 0; -} diff --git a/sdk/example_containers/sgx-task/src/common/fileutil.cc b/sdk/example_containers/sgx-task/src/common/fileutil.cc deleted file mode 100644 index 8f900f6..0000000 --- a/sdk/example_containers/sgx-task/src/common/fileutil.cc +++ /dev/null @@ -1,103 +0,0 @@ -#include "fileutil.h" - -#include -#include - -int ReadFileToString(const std::string& file_path, std::string* content) { - assert(content); - std::ifstream infile(file_path); - if (!infile.good()) { - printf("file failed to load: %s\n", file_path.c_str()); - return 0; // cannot differentiate file not exist or error in openning - } - std::stringstream ss; - ss << infile.rdbuf(); - *content = ss.str(); - return 0; -} - -/** - * @brief read the content of a file into a char array. - * - * @param file_path - * @param output_len output_len will be updated with the len of read content, - * if succeeded - * @return char* : read content (caller owns the data and shall free it). - */ -char* ReadFileToCharArray(const char* file_path, size_t* output_len) { - assert(file_path); - assert(output_len); - FILE* fp = std::fopen(file_path, "rb"); - if (fp == nullptr) { - printf("failed to open file: %s\n", file_path); - return nullptr; - } - if (std::fseek(fp, 0, SEEK_END) == -1) { - printf("fseek error\n"); - return nullptr; - } - auto file_len = std::ftell(fp); - if (file_len == -1) { - printf("ftell error\n"); - return nullptr; - } - if (file_len == 0) { - *output_len = 0; - return nullptr; - } - auto ret = malloc(file_len + 1); - if (ret == nullptr) { - printf("malloc failed\n"); - return nullptr; - } - std::rewind(fp); - if (std::fread(ret, 1, file_len, fp) != static_cast(file_len)) { - printf("fread error"); - free(ret); - return nullptr; - } - if (std::fclose(fp) == -1) { - printf("close error"); - free(ret); - return nullptr; - } - *output_len = file_len; - ((char*)ret)[file_len] = '\0'; - return (char*)ret; -} - -int WriteStringToFile(const std::string& file_path, - const std::string& content) { - std::ofstream outfile(file_path, std::ios::out); - outfile << content; - outfile.flush(); - outfile.close(); - return 0; -} - -/** - * @brief write a char array to a file - * - * @param file_path - * @param src - * @param len : len of the char array - * @return int : 0 for success; -1 for failure - */ -int WriteCharArrayToFile(const char* file_path, const char* src, size_t len) { - assert(file_path); - assert(src); - FILE* fp = std::fopen(file_path, "wb"); - if (fp == nullptr) { - printf("failed to open file\n"); - return -1; - } - if (std::fwrite(src, 1, len, fp) != len) { - printf("fwrite error"); - return -1; - } - if (std::fclose(fp) == -1) { - printf("close error"); - return -1; - } - return 0; -} diff --git a/sdk/example_containers/sgx-task/src/common/fileutil.h b/sdk/example_containers/sgx-task/src/common/fileutil.h deleted file mode 100644 index 098f23c..0000000 --- a/sdk/example_containers/sgx-task/src/common/fileutil.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef MECA_SGX_COMMON_U_FILEUTIL_H_ -#define MECA_SGX_COMMON_U_FILEUTIL_H_ - -#include - -inline bool IsFileExist(const std::string& file_path) { - std::ifstream infile(file_path); - return infile.good(); -} - -int ReadFileToString(const std::string& file_path, std::string* content); - -/** - * @brief read the content of a file into a char array. - * - * @param file_path - * @param output_len output_len will be updated with the len of read content, - * if succeeded - * @return char* : read content (caller owns the data and shall free it). - */ -char* ReadFileToCharArray(const char* file_path, size_t* output_len); - -int WriteStringToFile(const std::string& file_path, const std::string& content); - -/** - * @brief write a char array to a file - * - * @param file_path - * @param src - * @param len : len of the char array - * @return int : 0 for success; -1 for failure - */ -int WriteCharArrayToFile(const char* file_path, const char* src, size_t len); - -#endif // MECA_SGX_COMMON_U_FILEUTIL_H_ diff --git a/sdk/example_containers/sgx-task/src/common/hexutil.cc b/sdk/example_containers/sgx-task/src/common/hexutil.cc deleted file mode 100644 index 4172cef..0000000 --- a/sdk/example_containers/sgx-task/src/common/hexutil.cc +++ /dev/null @@ -1,83 +0,0 @@ -#include "hexutil.h" - -#include -#include -#include - -namespace { -const char _hextable[] = "0123456789abcdef"; - -struct hex_buffer_deleter { - void operator()(void *p) const { free(p); } -}; - -thread_local std::unique_ptr _hex_buffer{nullptr}; -thread_local size_t _hex_buffer_size = 0; -} // anonymous namespace - -void print_hexstring(FILE *fp, const void *vsrc, size_t len) { - const unsigned char *sp = (const unsigned char *)vsrc; - size_t i; - for (i = 0; i < len; ++i) { - fprintf(fp, "%02x", sp[i]); - } - fprintf(fp, "\n"); -} - -const char *hexstring(const void *vsrc, size_t len) { - size_t i, bsz; - const char *src = (const char *)vsrc; - char *bp; - - bsz = len * 2 + 1; /* Make room for NULL byte */ - if (bsz >= _hex_buffer_size) { - /* Allocate in 1K increments. Make room for the NULL byte. */ - size_t newsz = 1024 * (bsz / 1024) + ((bsz % 1024) ? 1024 : 0); - _hex_buffer_size = newsz; - auto _hex_buffer_ptr = (char *)realloc(_hex_buffer.get(), newsz); - if (_hex_buffer_ptr == NULL) { - return "(out of memory)"; - } - _hex_buffer.release(); - _hex_buffer.reset(_hex_buffer_ptr); - } - - for (i = 0, bp = _hex_buffer.get(); i < len; ++i) { - *bp = _hextable[(uint8_t)src[i] >> 4]; - ++bp; - *bp = _hextable[(uint8_t)src[i] & 0xf]; - ++bp; - } - _hex_buffer.get()[len * 2] = 0; - - return (const char *)_hex_buffer.get(); -} - -// need testing -std::string hex_encode(const void *vsrc, size_t len) { - const char *src = (const char *)vsrc; - char ret[len * 2 + 1]; - char *bp = ret; - for (size_t i = 0; i < len; ++i) { - *bp = _hextable[(uint8_t)src[i] >> 4]; - ++bp; - *bp = _hextable[(uint8_t)src[i] & 0xf]; - ++bp; - } - ret[len * 2] = 0; - return std::string(ret, len * 2); -} - -// need testing -std::string hex_decode(const char *vsrc, size_t len) { - const char *src = (const char *)vsrc; - char ret[len / 2]; - char *bp = ret; - for (size_t i = 0; i < len; i += 2) { - *bp = (uint8_t)((src[i] >= 'a' ? src[i] - 'a' + 10 : src[i] - '0') << 4); - *bp |= - (uint8_t)(src[i + 1] >= 'a' ? src[i + 1] - 'a' + 10 : src[i + 1] - '0'); - ++bp; - } - return std::string(ret, len / 2); -} \ No newline at end of file diff --git a/sdk/example_containers/sgx-task/src/common/hexutil.h b/sdk/example_containers/sgx-task/src/common/hexutil.h deleted file mode 100644 index d27be56..0000000 --- a/sdk/example_containers/sgx-task/src/common/hexutil.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef MECA_SGX_COMMON_IOPATCHT_H_ -#define MECA_SGX_COMMON_IOPATCHT_H_ - -#include -#include - -void print_hexstring(FILE *fp, const void *src, size_t len); - -const char *hexstring(const void *src, size_t len); - -std::string hex_encode(const void *vsrc, size_t len); -std::string hex_decode(const char *vsrc, size_t len); - -#endif // MECA_SGX_COMMON_IOPATCHT_H_ diff --git a/sdk/example_containers/sgx-task/src/common/json.cc b/sdk/example_containers/sgx-task/src/common/json.cc deleted file mode 100644 index adb49b5..0000000 --- a/sdk/example_containers/sgx-task/src/common/json.cc +++ /dev/null @@ -1,340 +0,0 @@ -/********************************************************************************** - * Obtained from https://github.com/nbsdx/SimpleJSON , under the terms of the - *WTFPL. - ***********************************************************************************/ -#include "json.h" - -namespace json { - -namespace { -string json_escape(const string &str) { - string output; - for (unsigned i = 0; i < str.length(); ++i) switch (str[i]) { - case '\"': - output += "\\\""; - break; - case '\\': - output += "\\\\"; - break; - case '\b': - output += "\\b"; - break; - case '\f': - output += "\\f"; - break; - case '\n': - output += "\\n"; - break; - case '\r': - output += "\\r"; - break; - case '\t': - output += "\\t"; - break; - default: - output += str[i]; - break; - } - return output; -} - -JSON parse_next(const string &, size_t &); - -void consume_ws(const string &str, size_t &offset) { - while (isspace(str[offset])) ++offset; -} - -JSON parse_object(const string &str, size_t &offset) { - JSON Object = JSON::Make(JSON::Class::Object); - - ++offset; - consume_ws(str, offset); - if (str[offset] == '}') { - ++offset; - return Object; - } - - while (true) { - JSON Key = parse_next(str, offset); - consume_ws(str, offset); - if (str[offset] != ':') { - // std::cerr << "Error: Object: Expected colon, found '" << str[offset] << - // "'\n"; - break; - } - consume_ws(str, ++offset); - JSON Value = parse_next(str, offset); - Object[Key.ToString()] = Value; - - consume_ws(str, offset); - if (str[offset] == ',') { - ++offset; - continue; - } else if (str[offset] == '}') { - ++offset; - break; - } else { - // std::cerr << "ERROR: Object: Expected comma, found '" << str[offset] << - // "'\n"; - break; - } - } - - return Object; -} - -JSON parse_array(const string &str, size_t &offset) { - JSON Array = JSON::Make(JSON::Class::Array); - unsigned index = 0; - - ++offset; - consume_ws(str, offset); - if (str[offset] == ']') { - ++offset; - return Array; - } - - while (true) { - Array[index++] = parse_next(str, offset); - consume_ws(str, offset); - - if (str[offset] == ',') { - ++offset; - continue; - } else if (str[offset] == ']') { - ++offset; - break; - } else { - // std::cerr << "ERROR: Array: Expected ',' or ']', found '" << - // str[offset] << "'\n"; - return std::move(JSON::Make(JSON::Class::Array)); - } - } - - return Array; -} - -JSON parse_string(const string &str, size_t &offset) { - JSON String; - string val; - for (char c = str[++offset]; c != '\"'; c = str[++offset]) { - if (c == '\\') { - switch (str[++offset]) { - case '\"': - val += '\"'; - break; - case '\\': - val += '\\'; - break; - // case '/' : val += '/' ; break; - case 'b': - val += '\b'; - break; - case 'f': - val += '\f'; - break; - case 'n': - val += '\n'; - break; - case 'r': - val += '\r'; - break; - case 't': - val += '\t'; - break; - case 'u': { - val += "\\u"; - for (unsigned i = 1; i <= 4; ++i) { - c = str[offset + i]; - if ((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || - (c >= 'A' && c <= 'F')) - val += c; - else { - // std::cerr << "ERROR: String: Expected hex character in unicode - // escape, found '" << c << "'\n"; - return std::move(JSON::Make(JSON::Class::String)); - } - } - offset += 4; - } break; - default: - val += '\\'; - break; - } - } else - val += c; - } - ++offset; - String = val; - return String; -} - -JSON parse_number(const string &str, size_t &offset) { - JSON Number; - string val, exp_str; - char c; - bool isDouble = false; - long exp = 0; - while (true) { - c = str[offset++]; - if ((c == '-') || (c >= '0' && c <= '9')) - val += c; - else if (c == '.') { - val += c; - isDouble = true; - } else - break; - } - if (c == 'E' || c == 'e') { - c = str[offset++]; - if (c == '-') { - ++offset; - exp_str += '-'; - } - while (true) { - c = str[offset++]; - if (c >= '0' && c <= '9') - exp_str += c; - else if (!isspace(c) && c != ',' && c != ']' && c != '}') { - // std::cerr << "ERROR: Number: Expected a number for exponent, found '" - // << c << "'\n"; - return std::move(JSON::Make(JSON::Class::Null)); - } else - break; - } - exp = std::stol(exp_str); - } else if (!isspace(c) && c != ',' && c != ']' && c != '}') { - // std::cerr << "ERROR: Number: unexpected character '" << c << "'\n"; - return std::move(JSON::Make(JSON::Class::Null)); - } - --offset; - - if (isDouble) - Number = std::stod(val) * std::pow(10, exp); - else { - if (!exp_str.empty()) - Number = (double)std::stol(val) * std::pow(10, exp); - else - Number = std::stol(val); - } - return Number; -} - -JSON parse_bool(const string &str, size_t &offset) { - JSON Bool; - if (str.substr(offset, 4) == "true") - Bool = true; - else if (str.substr(offset, 5) == "false") - Bool = false; - else { - // std::cerr << "ERROR: Bool: Expected 'true' or 'false', found '" << - // str.substr( offset, 5 ) << "'\n"; - return std::move(JSON::Make(JSON::Class::Null)); - } - offset += (Bool.ToBool() ? 4 : 5); - return Bool; -} - -JSON parse_null(const string &str, size_t &offset) { - JSON Null; - if (str.substr(offset, 4) != "null") { - // std::cerr << "ERROR: Null: Expected 'null', found '" << str.substr( - // offset, 4 ) << "'\n"; - return std::move(JSON::Make(JSON::Class::Null)); - } - offset += 4; - return Null; -} - -JSON parse_next(const string &str, size_t &offset) { - char value; - consume_ws(str, offset); - value = str[offset]; - switch (value) { - case '[': - return std::move(parse_array(str, offset)); - case '{': - return std::move(parse_object(str, offset)); - case '\"': - return std::move(parse_string(str, offset)); - case 't': - case 'f': - return std::move(parse_bool(str, offset)); - case 'n': - return std::move(parse_null(str, offset)); - default: - if ((value <= '9' && value >= '0') || value == '-') - return std::move(parse_number(str, offset)); - } - // std::cerr << "ERROR: Parse: Unknown starting character '" << value << - // "'\n"; printf("ERROR: Parse: Unknown starting character %02hhX\n", value); - return JSON(); -} -} // anonymous namespace - -string JSON::ToString(bool &ok) const { - ok = (Type == Class::String); - // return ok ? std::move( json_escape( *Internal.String ) ): string(""); - // return ok ? *Internal.String: string(""); - return ok ? *Internal.String : dump(); -} - -string JSON::dump(int depth, string tab) const { - string pad = ""; - for (int i = 0; i < depth; ++i, pad += tab); - - switch (Type) { - case Class::Null: - return "null"; - case Class::Object: { - string s = "{\n"; - bool skip = true; - for (auto &p : *Internal.Map) { - if (!skip) s += ",\n"; - s += (pad + "\"" + p.first + "\" : " + p.second.dump(depth + 1, tab)); - skip = false; - } - s += ("\n" + pad.erase(0, 2) + "}"); - return s; - } - case Class::Array: { - string s = "["; - bool skip = true; - for (auto &p : *Internal.List) { - if (!skip) s += ", "; - s += p.dump(depth + 1, tab); - skip = false; - } - s += "]"; - return s; - } - case Class::String: - return "\"" + json_escape(*Internal.String) + "\""; - case Class::Floating: - return std::to_string(Internal.Float); - case Class::Integral: - return std::to_string(Internal.Int); - case Class::Boolean: - return Internal.Bool ? "true" : "false"; - default: - return ""; - } - return ""; -} -JSON JSON::Load(const string &str) { - size_t offset = 0; - return std::move(parse_next(str, offset)); -} - -JSON Array() { return std::move(JSON::Make(JSON::Class::Array)); } - -template -JSON Array(T... args) { - JSON arr = JSON::Make(JSON::Class::Array); - arr.append(args...); - return std::move(arr); -} - -JSON Object() { return std::move(JSON::Make(JSON::Class::Object)); } - -} // namespace json \ No newline at end of file diff --git a/sdk/example_containers/sgx-task/src/common/json.h b/sdk/example_containers/sgx-task/src/common/json.h deleted file mode 100644 index d81adb8..0000000 --- a/sdk/example_containers/sgx-task/src/common/json.h +++ /dev/null @@ -1,411 +0,0 @@ -/********************************************************************************** - * Obtained from https://github.com/nbsdx/SimpleJSON , under the terms of the - *WTFPL. - ***********************************************************************************/ -#ifndef MECA_SGX_COMMON_JSON_H_ -#define MECA_SGX_COMMON_JSON_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace json { - -using std::deque; -using std::enable_if; -using std::initializer_list; -using std::is_convertible; -using std::is_floating_point; -using std::is_integral; -using std::is_same; -using std::map; -using std::string; - -class JSON { - union BackingData { - BackingData(double d) : Float(d) {} - BackingData(long l) : Int(l) {} - BackingData(bool b) : Bool(b) {} - BackingData(string s) : String(new string(s)) {} - BackingData() : Int(0) {} - - deque *List; - map *Map; - string *String; - double Float; - long Int; - bool Bool; - } Internal; - - public: - enum class Class { Null, Object, Array, String, Floating, Integral, Boolean }; - - template - class JSONWrapper { - Container *object; - - public: - JSONWrapper(Container *val) : object(val) {} - JSONWrapper(std::nullptr_t) : object(nullptr) {} - - typename Container::iterator begin() { - return object ? object->begin() : typename Container::iterator(); - } - typename Container::iterator end() { - return object ? object->end() : typename Container::iterator(); - } - typename Container::const_iterator begin() const { - return object ? object->begin() : typename Container::iterator(); - } - typename Container::const_iterator end() const { - return object ? object->end() : typename Container::iterator(); - } - }; - - template - class JSONConstWrapper { - const Container *object; - - public: - JSONConstWrapper(const Container *val) : object(val) {} - JSONConstWrapper(std::nullptr_t) : object(nullptr) {} - - typename Container::const_iterator begin() const { - return object ? object->begin() : typename Container::const_iterator(); - } - typename Container::const_iterator end() const { - return object ? object->end() : typename Container::const_iterator(); - } - }; - - JSON() : Internal(), Type(Class::Null) {} - - JSON(initializer_list list) : JSON() { - SetType(Class::Object); - for (auto i = list.begin(), e = list.end(); i != e; ++i, ++i) - operator[](i->ToString()) = *std::next(i); - } - - JSON(JSON &&other) : Internal(other.Internal), Type(other.Type) { - other.Type = Class::Null; - other.Internal.Map = nullptr; - } - - JSON &operator=(JSON &&other) { - ClearInternal(); - Internal = other.Internal; - Type = other.Type; - other.Internal.Map = nullptr; - other.Type = Class::Null; - return *this; - } - - JSON(const JSON &other) { - switch (other.Type) { - case Class::Object: - Internal.Map = new map(other.Internal.Map->begin(), - other.Internal.Map->end()); - break; - case Class::Array: - Internal.List = new deque(other.Internal.List->begin(), - other.Internal.List->end()); - break; - case Class::String: - Internal.String = new string(*other.Internal.String); - break; - default: - Internal = other.Internal; - } - Type = other.Type; - } - - JSON &operator=(const JSON &other) { - ClearInternal(); - switch (other.Type) { - case Class::Object: - Internal.Map = new map(other.Internal.Map->begin(), - other.Internal.Map->end()); - break; - case Class::Array: - Internal.List = new deque(other.Internal.List->begin(), - other.Internal.List->end()); - break; - case Class::String: - Internal.String = new string(*other.Internal.String); - break; - default: - Internal = other.Internal; - } - Type = other.Type; - return *this; - } - - ~JSON() { - switch (Type) { - case Class::Array: - delete Internal.List; - break; - case Class::Object: - delete Internal.Map; - break; - case Class::String: - delete Internal.String; - break; - default:; - } - } - - template - JSON(T b, typename enable_if::value>::type * = 0) - : Internal(b), Type(Class::Boolean) {} - - template - JSON(T i, typename enable_if::value && - !is_same::value>::type * = 0) - : Internal((long)i), Type(Class::Integral) {} - - template - JSON(T f, typename enable_if::value>::type * = 0) - : Internal((double)f), Type(Class::Floating) {} - - template - JSON(T s, typename enable_if::value>::type * = 0) - : Internal(string(s)), Type(Class::String) {} - - JSON(std::nullptr_t) : Internal(), Type(Class::Null) {} - - static JSON Make(Class type) { - JSON ret; - ret.SetType(type); - return ret; - } - - static JSON Load(const string &); - - template - void append(T arg) { - SetType(Class::Array); - Internal.List->emplace_back(arg); - } - - template - void append(T arg, U... args) { - append(arg); - append(args...); - } - - template - typename enable_if::value, JSON &>::type operator=(T b) { - SetType(Class::Boolean); - Internal.Bool = b; - return *this; - } - - template - typename enable_if::value && !is_same::value, - JSON &>::type - operator=(T i) { - SetType(Class::Integral); - Internal.Int = i; - return *this; - } - - template - typename enable_if::value, JSON &>::type operator=(T f) { - SetType(Class::Floating); - Internal.Float = f; - return *this; - } - - template - typename enable_if::value, JSON &>::type operator=( - T s) { - SetType(Class::String); - *Internal.String = string(s); - return *this; - } - - JSON &operator[](const string &key) { - SetType(Class::Object); - return Internal.Map->operator[](key); - } - - JSON &operator[](unsigned index) { - SetType(Class::Array); - if (index >= Internal.List->size()) Internal.List->resize(index + 1); - return Internal.List->operator[](index); - } - - JSON &at(const string &key) { return operator[](key); } - - const JSON &at(const string &key) const { return Internal.Map->at(key); } - - JSON &at(unsigned index) { return operator[](index); } - - const JSON &at(unsigned index) const { return Internal.List->at(index); } - - int length() const { - if (Type == Class::Array) - return (int)Internal.List->size(); - else - return -1; - } - - bool hasKey(const string &key) const { - if (Type == Class::Object) - return Internal.Map->find(key) != Internal.Map->end(); - return false; - } - - int size() const { - if (Type == Class::Object) - return (int)Internal.Map->size(); - else if (Type == Class::Array) - return (int)Internal.List->size(); - else - return -1; - } - - Class JSONType() const { return Type; } - - /// Functions for getting primitives from the JSON object. - bool IsNull() const { return Type == Class::Null; } - - string ToString() const { - bool b; - return std::move(ToString(b)); - } - - string ToString(bool &ok) const; - - double ToFloat() const { - bool b; - return ToFloat(b); - } - double ToFloat(bool &ok) const { - ok = (Type == Class::Floating); - return ok ? Internal.Float : 0.0; - } - - long ToInt() const { - bool b; - return ToInt(b); - } - long ToInt(bool &ok) const { - ok = (Type == Class::Integral); - return ok ? Internal.Int : 0; - } - - bool ToBool() const { - bool b; - return ToBool(b); - } - bool ToBool(bool &ok) const { - ok = (Type == Class::Boolean); - return ok ? Internal.Bool : false; - } - - JSONWrapper> ObjectRange() { - if (Type == Class::Object) - return JSONWrapper>(Internal.Map); - return JSONWrapper>(nullptr); - } - - JSONWrapper> ArrayRange() { - if (Type == Class::Array) return JSONWrapper>(Internal.List); - return JSONWrapper>(nullptr); - } - - JSONConstWrapper> ObjectRange() const { - if (Type == Class::Object) - return JSONConstWrapper>(Internal.Map); - return JSONConstWrapper>(nullptr); - } - - JSONConstWrapper> ArrayRange() const { - if (Type == Class::Array) - return JSONConstWrapper>(Internal.List); - return JSONConstWrapper>(nullptr); - } - - string dump(int depth = 1, string tab = " ") const; - - // friend std::ostream& operator<<( std::ostream&, const JSON & ); - - private: - void SetType(Class type) { - if (type == Type) return; - - ClearInternal(); - - switch (type) { - case Class::Null: - Internal.Map = nullptr; - break; - case Class::Object: - Internal.Map = new map(); - break; - case Class::Array: - Internal.List = new deque(); - break; - case Class::String: - Internal.String = new string(); - break; - case Class::Floating: - Internal.Float = 0.0; - break; - case Class::Integral: - Internal.Int = 0; - break; - case Class::Boolean: - Internal.Bool = false; - break; - } - - Type = type; - } - - private: - /* beware: only call if YOU know that Internal is allocated. No checks - performed here. This function should be called in a constructed JSON just - before you are going to overwrite Internal... - */ - void ClearInternal() { - switch (Type) { - case Class::Object: - delete Internal.Map; - break; - case Class::Array: - delete Internal.List; - break; - case Class::String: - delete Internal.String; - break; - default:; - } - } - - private: - Class Type = Class::Null; -}; - -JSON Array(); - -template -JSON Array(T... args); - -JSON Object(); - -// std::ostream& operator<<( std::ostream &os, const JSON &json ) { -// os << json.dump(); -// return os; -// } - -} // End Namespace json - -#endif // MECA_SGX_COMMON_JSON_H_ diff --git a/sdk/example_containers/sgx-task/src/common/tcrypto_ext.cc b/sdk/example_containers/sgx-task/src/common/tcrypto_ext.cc deleted file mode 100644 index 71db75b..0000000 --- a/sdk/example_containers/sgx-task/src/common/tcrypto_ext.cc +++ /dev/null @@ -1,416 +0,0 @@ -#include "tcrypto_ext.h" - -#include -#include - -// for key pair generation -#include "openssl/ec.h" -#include "openssl/evp.h" -#include "openssl/kdf.h" -#include "openssl/pem.h" -#include "openssl/rsa.h" - -namespace { -// for now we use fixed IV and aad for prototype -// for production, different value should be used for encryption (need to -// support update and thread synchronization on them) -uint8_t enc_iv_buf[AES_GCM_IV_SIZE] = {0}; -uint8_t enc_aad_buf[AES_GCM_AAD_SIZE] = {0}; -} // anonymous namespace - -/** - * @brief - * - * @param src - * @param size - * @param decryption_key - * @param output caller is responsible to free the resources - * @return sgx_status_t - */ -sgx_status_t decrypt_content_with_key_aes(const uint8_t* src, size_t size, - const uint8_t* decryption_key, - uint8_t** output) { - size_t cipher_text_size = get_aes_decrypted_size(size); - - // temporary small variables - uint8_t iv_buf[AES_GCM_IV_SIZE] = {0}; - uint8_t tag_buf[AES_GCM_TAG_SIZE] = {0}; - uint8_t aad_buf[AES_GCM_AAD_SIZE] = {0}; - - // buffer for decrypted result. ownership transfer at the end to output. - uint8_t* result = (uint8_t*)malloc(cipher_text_size + 1); - sgx_status_t ret = (result == NULL) ? SGX_ERROR_OUT_OF_MEMORY : SGX_SUCCESS; - - if (ret == SGX_SUCCESS) { - // copy contents - const uint8_t* p = src; - p += cipher_text_size; - memcpy(iv_buf, p, AES_GCM_IV_SIZE); - p += AES_GCM_IV_SIZE; - memcpy(tag_buf, p, AES_GCM_TAG_SIZE); - p += AES_GCM_TAG_SIZE; - memcpy(aad_buf, p, AES_GCM_AAD_SIZE); - - // decrypt - ret = sgx_rijndael128GCM_decrypt( - (const sgx_aes_gcm_128bit_key_t*)decryption_key, src, - (uint32_t)cipher_text_size, result, iv_buf, AES_GCM_IV_SIZE, aad_buf, - AES_GCM_AAD_SIZE, (const sgx_aes_gcm_128bit_tag_t*)tag_buf); - result[cipher_text_size] = '\0'; - } - - // assign the result to output if success; free the resource otherwise. - if (ret != SGX_SUCCESS) { - free(result); - return ret; - } - *output = result; - - return ret; -} - -/** - * @brief - * - * @param src - * @param size - * @param encryption_key - * @param output caller is responsible to free the resources. - * @return sgx_status_t - */ -sgx_status_t encrypt_content_with_key_aes(const uint8_t* src, size_t size, - const uint8_t* encryption_key, - uint8_t** output) { - size_t whole_cipher_text_size = get_aes_encrypted_size(size); - - // allocate temporary buffers for decryption operation. freed at the end. - uint8_t* whole_cipher_text = (uint8_t*)malloc(whole_cipher_text_size); - if (whole_cipher_text == NULL) return SGX_ERROR_OUT_OF_MEMORY; - - // temporary variables - uint8_t tag_buf[AES_GCM_TAG_SIZE] = {0}; - - // encrypt - sgx_status_t ret = sgx_rijndael128GCM_encrypt( - (const sgx_aes_gcm_128bit_key_t*)encryption_key, src, (uint32_t)size, - whole_cipher_text, enc_iv_buf, AES_GCM_IV_SIZE, enc_aad_buf, - AES_GCM_AAD_SIZE, (sgx_aes_gcm_128bit_tag_t*)tag_buf); - - // free the resource when failure. - if (ret != SGX_SUCCESS) { - free(whole_cipher_text); - return ret; - } - - // assemble the message - uint8_t* pos = whole_cipher_text + size; - memcpy(pos, enc_iv_buf, AES_GCM_IV_SIZE); - pos += AES_GCM_IV_SIZE; - memcpy(pos, tag_buf, AES_GCM_TAG_SIZE); - pos += AES_GCM_TAG_SIZE; - memcpy(pos, enc_aad_buf, AES_GCM_AAD_SIZE); - - // assign the result to output if success; - *output = whole_cipher_text; - - return ret; -} - -// from SampleCode/SampleAttestedTLS/common/utility.cc - -int get_pkey_by_rsa(EVP_PKEY* pk) { - int res = -1; - EVP_PKEY_CTX* ctx = NULL; - - ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL); - if (ctx == NULL) return res; - res = EVP_PKEY_keygen_init(ctx); - if (res <= 0) { - // PRINT("keygen_init failed (%d)\n", res); - goto done; - } - - res = EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, RSA_3072_PRIVATE_KEY_SIZE); - if (res <= 0) { - // PRINT("set_rsa_kengen_bits failed (%d)\n", res); - goto done; - } - - /* Generate key */ - res = EVP_PKEY_keygen(ctx, &pk); - if (res <= 0) { - // PRINT("keygen failed (%d)\n", res); - goto done; - } - -done: - if (ctx) EVP_PKEY_CTX_free(ctx); - - return res; -} - -int get_pkey_by_ec(EVP_PKEY* pk) { - int res = -1; - EVP_PKEY_CTX* ctx; - - ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL); - if (ctx == NULL) return res; - res = EVP_PKEY_keygen_init(ctx); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - - res = EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, NID_secp384r1); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - - /* Generate key */ - res = EVP_PKEY_keygen(ctx, &pk); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - -done: - if (ctx) EVP_PKEY_CTX_free(ctx); - - return res; -} - -// actually is generating RSA pair -// hardare independant -sgx_status_t generate_key_pair(int type, uint8_t** public_key, - size_t* public_key_size, uint8_t** private_key, - size_t* private_key_size) { - sgx_status_t result = SGX_ERROR_UNEXPECTED; - uint8_t* local_public_key = nullptr; - uint8_t* local_private_key = nullptr; - int res = -1; - EVP_PKEY* pkey = nullptr; - BIO* bio = nullptr; - - pkey = EVP_PKEY_new(); - if (!pkey) { - // PRINT("EVP_PKEY_new failed\n"); - result = SGX_ERROR_UNEXPECTED; - goto done; - } - - if (type != RSA_TYPE && type != EC_TYPE) { - type = RSA_TYPE; // by default, we use RSA_TYPE - } - - switch (type) { - case RSA_TYPE: - res = get_pkey_by_rsa(pkey); - break; - case EC_TYPE: - res = get_pkey_by_ec(pkey); - break; - } - - if (res <= 0) { - // PRINT("get_pkey failed (%d)\n", res); - result = SGX_ERROR_UNEXPECTED; - goto done; - } - - // Allocate memory - local_public_key = (uint8_t*)malloc(RSA_3072_PUBLIC_KEY_SIZE); - if (!local_public_key) { - // PRINT("out-of-memory:calloc(local_public_key failed\n"); - result = SGX_ERROR_OUT_OF_EPC; - goto done; - } - memset(local_public_key, 0x00, RSA_3072_PUBLIC_KEY_SIZE); - - local_private_key = (uint8_t*)malloc(RSA_3072_PRIVATE_KEY_SIZE); - if (!local_private_key) { - // PRINT("out-of-memory: calloc(local_private_key) failed\n"); - result = SGX_ERROR_OUT_OF_EPC; - goto done; - } - memset(local_private_key, 0x00, RSA_3072_PRIVATE_KEY_SIZE); - - // Write out the public/private key in PEM format for exchange with - // other enclaves. - bio = BIO_new(BIO_s_mem()); - if (!bio) { - // PRINT("BIO_new for local_public_key failed\n"); - goto done; - } - - res = PEM_write_bio_PUBKEY(bio, pkey); - if (!res) { - // PRINT("PEM_write_bio_PUBKEY failed (%d)\n", res); - goto done; - } - - res = BIO_read(bio, local_public_key, RSA_3072_PUBLIC_KEY_SIZE); - if (!res) { - // PRINT("BIO_read public key failed (%d)\n", res); - goto done; - } - BIO_free(bio); - bio = nullptr; - - bio = BIO_new(BIO_s_mem()); - if (!bio) { - // PRINT("BIO_new for local_public_key failed\n"); - goto done; - } - - res = PEM_write_bio_PrivateKey(bio, pkey, nullptr, nullptr, 0, nullptr, - nullptr); - if (!res) { - // PRINT("PEM_write_bio_PrivateKey failed (%d)\n", res); - goto done; - } - - res = BIO_read(bio, local_private_key, RSA_3072_PRIVATE_KEY_SIZE); - if (!res) { - // PRINT("BIO_read private key failed (%d)\n", res); - goto done; - } - - BIO_free(bio); - bio = nullptr; - - *public_key = local_public_key; - *private_key = local_private_key; - - *public_key_size = strlen(reinterpret_cast(local_public_key)); - *private_key_size = strlen(reinterpret_cast(local_private_key)); - - // PRINT("public_key_size %d, private_key_size %d\n", *public_key_size, - // *private_key_size); - result = SGX_SUCCESS; - -done: - if (bio) BIO_free(bio); - if (pkey) EVP_PKEY_free(pkey); // When this is called, rsa is also freed - if (result != SGX_SUCCESS) { - if (local_public_key) free(local_public_key); - if (local_private_key) free(local_private_key); - } - return result; -} - -// added decryption with encrypted symmetric key and encrypted secret data -// the decrypted data will contains the output encryption key at its tail. -unsigned char* rsa_decrypt_data(const unsigned char* data, int len, - const unsigned char* ek, int ek_len, - const unsigned char* iv, - const uint8_t* rsa_private_key, - int rsa_private_key_len, int* output_len) { - BIO* bio = nullptr; - bio = BIO_new_mem_buf(rsa_private_key, rsa_private_key_len); - RSA* loaded_private_key = NULL; - PEM_read_bio_RSAPrivateKey(bio, &loaded_private_key, NULL, NULL); - EVP_PKEY* pkey = EVP_PKEY_new(); - EVP_PKEY_assign_RSA(pkey, loaded_private_key); - EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new(); - EVP_CIPHER_CTX_init(ctx); - unsigned char* decrypted = (unsigned char*)malloc(len + EVP_MAX_IV_LENGTH); - EVP_OpenInit(ctx, EVP_aes_256_cbc(), ek, ek_len, iv, pkey); - int decrypted_len = 0; - int decyprtedBlockLen = 0; - EVP_OpenUpdate(ctx, decrypted, &decyprtedBlockLen, data, len); - decrypted_len = decyprtedBlockLen; - EVP_OpenFinal(ctx, decrypted + decrypted_len, &decyprtedBlockLen); - decrypted_len += decyprtedBlockLen; - *output_len = decrypted_len; - // free ctx - EVP_CIPHER_CTX_free(ctx); - BIO_free(bio); - EVP_PKEY_free(pkey); - return decrypted; -} - -void load_public_key(const char* pub_key, size_t pub_key_size, - EVP_PKEY** pkey) { - BIO* bio = BIO_new_mem_buf(pub_key, pub_key_size); - EC_KEY* ecPublicKey = NULL; - PEM_read_bio_EC_PUBKEY(bio, &ecPublicKey, NULL, NULL); - EVP_PKEY_assign_EC_KEY(*pkey, ecPublicKey); - BIO_free(bio); -} - -void load_private_key(const char* pri_key, size_t pri_key_size, - EVP_PKEY** pkey) { - BIO* bio = BIO_new_mem_buf(pri_key, pri_key_size); - EC_KEY* ecPrivateKey = NULL; - PEM_read_bio_ECPrivateKey(bio, &ecPrivateKey, NULL, NULL); - EVP_PKEY_assign_EC_KEY(*pkey, ecPrivateKey); - BIO_free(bio); -} - -std::string derive_ecdh_secret(EVP_PKEY* own_key, EVP_PKEY* peer_key) { - EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(own_key, NULL); - if (!ctx) { - return ""; - } - if (EVP_PKEY_derive_init(ctx) != 1) { - return ""; - } - - if (EVP_PKEY_derive_set_peer(ctx, peer_key) != 1) { - return ""; - } - - size_t secret_len; - if (1 != EVP_PKEY_derive(ctx, NULL, &secret_len)) { - return ""; - } - - unsigned char* secret = (unsigned char*)malloc(secret_len); - if (!secret) { - return ""; - } - - if (EVP_PKEY_derive(ctx, secret, &secret_len) != 1) { - return ""; - } - auto ret = std::string((char*)secret, secret_len); - EVP_PKEY_CTX_free(ctx); - free(secret); - return ret; -} - -std::string kdf_derive(const unsigned char* secret, size_t secret_len, - const char* info, size_t info_len, size_t key_size) { - char* key = (char*)malloc(key_size); - if (!key) { - return ""; - } - memset(key, 0, key_size); - EVP_PKEY_CTX* ctx; - if (NULL == (ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL))) { - return ""; - } - if (1 != EVP_PKEY_derive_init(ctx)) { - return ""; - } - if (1 != EVP_PKEY_CTX_set_hkdf_md(ctx, EVP_sha256())) { - return ""; - } - if (1 != EVP_PKEY_CTX_set1_hkdf_key(ctx, secret, secret_len)) { - return ""; - } - if (1 != EVP_PKEY_CTX_set1_hkdf_salt(ctx, NULL, 0)) { - return ""; - } - if (1 != EVP_PKEY_CTX_add1_hkdf_info(ctx, info, info_len)) { - return ""; - } - if (1 != EVP_PKEY_derive(ctx, (unsigned char*)key, &key_size)) { - return ""; - } - EVP_PKEY_CTX_free(ctx); - std::string ret{key, key_size}; - free(key); - return ret; -} diff --git a/sdk/example_containers/sgx-task/src/common/tcrypto_ext.h b/sdk/example_containers/sgx-task/src/common/tcrypto_ext.h deleted file mode 100644 index 8fa1b03..0000000 --- a/sdk/example_containers/sgx-task/src/common/tcrypto_ext.h +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef MECA_SGX_COMMON_TCRYPTOEXT_H_ -#define MECA_SGX_COMMON_TCRYPTOEXT_H_ - -// helper functions to use tcrypto. -#include - -#include "openssl/evp.h" -#include "sgx_tcrypto.h" - -#define AES_GCM_IV_SIZE 12 -#define AES_GCM_TAG_SIZE 16 -#define AES_GCM_AAD_SIZE 4 - -#define MAX_SESSION_KEY_IV_LENGTH 16 -#define ENCRYPTED_SESSION_KEY_LENGTH 384 -#define OUTPUT_KEY_LENGTH 16 - -inline size_t get_aes_decrypted_size(size_t size) { - return size - AES_GCM_IV_SIZE - AES_GCM_TAG_SIZE - AES_GCM_AAD_SIZE; -} - -inline size_t get_aes_encrypted_size(size_t size) { - return size + AES_GCM_IV_SIZE + AES_GCM_TAG_SIZE + AES_GCM_AAD_SIZE; -} - -/** - * @brief - * - * @param src - * @param size - * @param decryption_key - * @param output caller is responsible to free the resources - * @return sgx_status_t - */ -sgx_status_t decrypt_content_with_key_aes(const uint8_t* src, size_t size, - const uint8_t* decryption_key, - uint8_t** output); - -/** - * @brief - * - * @param src - * @param size - * @param encryption_key - * @param output caller is responsible to free the resources. - * @return sgx_status_t - */ -sgx_status_t encrypt_content_with_key_aes(const uint8_t* src, size_t size, - const uint8_t* encryption_key, - uint8_t** output); - -// from SampleCode/SampleAttestedTLS/common/utility.h - -#define RSA_PUBLIC_KEY_SIZE 512 -#define RSA_PRIVATE_KEY_SIZE 2048 - -#define RSA_3072_PUBLIC_KEY_SIZE 650 -#define RSA_3072_PRIVATE_KEY_SIZE 3072 - -#define RSA_TYPE 0 -#define EC_TYPE 1 // EC-P384 - -sgx_status_t generate_key_pair(int type, uint8_t** public_key, - size_t* public_key_size, uint8_t** private_key, - size_t* private_key_size); - -// decrypt data with RSA private key -unsigned char* rsa_decrypt_data(const unsigned char* data, int len, - const unsigned char* ek, int ek_len, - const unsigned char* iv, - const uint8_t* rsa_private_key, - int rsa_private_key_len, int* output_len); - -#define EC_PUBLIC_KEY_SIZE 215 - -void load_public_key(const char* pub_key, size_t pub_key_size, EVP_PKEY** pkey); - -void load_private_key(const char* pri_key, size_t pri_key_size, - EVP_PKEY** pkey); - -std::string derive_ecdh_secret(EVP_PKEY* own_key, EVP_PKEY* peer_key); - -std::string kdf_derive(const unsigned char* secret, size_t secret_len, - const char* info, size_t info_len, size_t key_size); - -#endif // MECA_SGX_COMMON_TCRYPTOEXT_H_ diff --git a/sdk/example_containers/sgx-task/src/server/Makefile b/sdk/example_containers/sgx-task/src/server/Makefile deleted file mode 100644 index dcb0a8e..0000000 --- a/sdk/example_containers/sgx-task/src/server/Makefile +++ /dev/null @@ -1,137 +0,0 @@ -PROJECT_ROOT_DIR ?= $(shell readlink -f ..) -WORKER_INSTALL_PATH ?= $(PROJECT_ROOT_DIR)/worker/install -INSTALL_PREFIX ?= $(shell readlink -f install) - -DEPS_INSTALL_DIR = $(PROJECT_ROOT_DIR)/deps/install -LIBUV_DIR = $(DEPS_INSTALL_DIR)/libuv -LLHTTP_DIR = $(DEPS_INSTALL_DIR)/llhttp - -CONCURRENT_RT_LINK_FLAGS := -L$(LIBUV_DIR)/lib -l:libuv_a.a -lpthread -ldl -CONCURRENT_RT_CXX_FLAGS := -I$(LIBUV_DIR)/include -CONCURRENT_RT_CXX_FLAGS += -DUSE_LLHTTP -I$(LLHTTP_DIR)/include -CONCURRENT_RT_LINK_FLAGS += -L$(LLHTTP_DIR)/lib -l:libllhttp.a - -deps: - @echo "run make deps under repo root to build deps" - -# build with the Makefile under concurrent_runtime - -SGX_SDK ?= /opt/intel/sgxsdk -SGXSSL_DIR ?= /opt/intel/sgxssl -SGX_MODE ?= HW -SGX_ARCH ?= x64 -SGX_DEBUG ?= 1 - -WORKER_INSTALL_PATH ?= $(PROJECT_ROOT_DIR)/worker/install - -### Intel(R) SGX SDK Settings ### -ifeq ($(shell getconf LONG_BIT), 32) - SGX_ARCH := x86 -else ifeq ($(findstring -m32, $(CXXFLAGS)), -m32) - SGX_ARCH := x86 -endif - -ifeq ($(SGX_ARCH), x86) - SGX_COMMON_CFLAGS := -m32 - SGX_LIBRARY_PATH := $(SGX_SDK)/lib - SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x86/sgx_sign - SGX_EDGER8R := $(SGX_SDK)/bin/x86/sgx_edger8r -else - SGX_COMMON_CFLAGS := -m64 - SGX_LIBRARY_PATH := $(SGX_SDK)/lib64 - SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x64/sgx_sign - SGX_EDGER8R := $(SGX_SDK)/bin/x64/sgx_edger8r -endif - -ifeq ($(SGX_DEBUG), 1) -ifeq ($(SGX_PRERELEASE), 1) -$(error Cannot set SGX_DEBUG and SGX_PRERELEASE at the same time!!) -endif -endif - -ifeq ($(SGX_DEBUG), 1) - SGX_COMMON_CFLAGS += -O0 -g -DSGX_DEBUG -else - SGX_COMMON_CFLAGS += -O2 -endif - -ifneq ($(SGX_MODE), HW) - Urts_Library_Name := sgx_urts_sim -else - Urts_Library_Name := sgx_urts -endif - -APP_DCAP_LIBS := -lsgx_dcap_ql -lsgx_dcap_quoteverify -### Intel(R) SGX SDK Settings ### - -COMMON_CXX_FLAGS := $(CONCURRENT_RT_CXX_FLAGS) -I$(PROJECT_ROOT_DIR) - -.PHONY: all clean install - -all: sample-server - -Sources := main.cc -Sources += worker_service.cc -# setup compile flags -COMMON_CXX_FLAGS += -DUSE_WORKER_SERVICE -COMMON_CXX_FLAGS += $(SGX_COMMON_CFLAGS) -fPIC -Wno-attributes -# Three configuration modes - Debug, prerelease, release -# Debug - Macro DEBUG enabled. -# Prerelease - Macro NDEBUG and EDEBUG enabled. -# Release - Macro NDEBUG enabled. -ifeq ($(SGX_DEBUG), 1) - COMMON_CXX_FLAGS += -DDEBUG -UNDEBUG -UEDEBUG -else ifeq ($(SGX_PRERELEASE), 1) - COMMON_CXX_FLAGS += -DNDEBUG -DEDEBUG -UDEBUG -else - COMMON_CXX_FLAGS += -DNDEBUG -UEDEBUG -UDEBUG -endif # SGX_DEBUG - COMMON_CXX_FLAGS += -I$(SGX_SDK)/include -I$(WORKER_INSTALL_PATH)/include -# setup linker flags - COMMON_LINK_FLAGS += -L$(WORKER_INSTALL_PATH)/lib -l:libworker.a - - COMMON_LINK_FLAGS += $(CONCURRENT_RT_LINK_FLAGS) - -COMMON_LINK_FLAGS += -L$(SGX_LIBRARY_PATH) -l$(Urts_Library_Name) $(APP_DCAP_LIBS) \ --lpthread -lz -lm -lcrypto -ifneq ($(SGX_MODE), HW) - COMMON_LINK_FLAGS += -lsgx_uae_service_sim -else - COMMON_LINK_FLAGS += -lsgx_uae_service -endif - -COMMON_LINK_FLAGS += -L$(SGXSSL_DIR)/lib64 -lsgx_usgxssl - -Common_CXX_Flags += -Wall -Wextra -Winit-self -Wpointer-arith -Wreturn-type \ - -Waddress -Wsequence-point -Wformat-security \ - -Wmissing-include-dirs -Wfloat-equal -Wundef -Wshadow \ - -Wcast-align -Wcast-qual -Wconversion -Wredundant-decls - -Source_objects = $(Sources:.cc=.o) - -%.o: %.cc - $(CXX) $(COMMON_CXX_FLAGS) -c $< -o $@ - -ifneq ($(USE_WORKER_SERVICE), 1) -json.o: $(PROJECT_ROOT_DIR)/common/json.cc - $(CXX) $(COMMON_CXX_FLAGS) -c $< -o $@ - -Source_objects += json.o - -endif # build default server - -sample-server: $(Source_objects) - $(CXX) $^ -o $@ $(COMMON_LINK_FLAGS) - -install: - install -d $(INSTALL_PREFIX)/bin - install -C -m 755 sample-server $(INSTALL_PREFIX)/bin/server - install -d $(INSTALL_PREFIX)/lib - install -C -m 664 $(WORKER_INSTALL_PATH)/lib/Worker_Enclave.signed.so $(INSTALL_PREFIX)/lib - -clean: - rm -f *.o - rm -f sample-server - -mrproper: clean - rm -rf install diff --git a/sdk/example_containers/sgx-task/src/server/README.md b/sdk/example_containers/sgx-task/src/server/README.md deleted file mode 100644 index c002c0e..0000000 --- a/sdk/example_containers/sgx-task/src/server/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# server - -build and launch the server in this directory - -```sh -make all -SGX_AESM_ADDR=1 ./sample-server ../worker/install/lib/Worker_Enclave.signed.so -``` diff --git a/sdk/example_containers/sgx-task/src/server/main.cc b/sdk/example_containers/sgx-task/src/server/main.cc deleted file mode 100644 index cf0b1c5..0000000 --- a/sdk/example_containers/sgx-task/src/server/main.cc +++ /dev/null @@ -1,347 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "llhttp.h" -#include "messager.h" -#include "service.h" -#include "uv.h" -#include "worker_service.h" -typedef llhttp_t parser_t; -typedef llhttp_settings_t parser_settings_t; -#define PARSER_SETTINGS_INIT(m) llhttp_settings_init(m) - -namespace { - -constexpr char meca_ready[] = "meca-init-done"; - -Service* svc = nullptr; - -Messenger msgr; - -struct HttpRequest { - uint64_t content_len; - std::string url; - std::string body; -}; - -uv_tcp_t server; -uv_loop_t* loop; - -struct client_t { - uv_tcp_t handle; - parser_t parser; - uv_write_t write_req; - std::string response_scratch; // buffer for the resbuf - uv_buf_t resbuf{nullptr, 0}; - bool is_http_req = false; - bool next_header_value_is_content_len = false; - HttpRequest req; - runtime_task_t task_type; - uv_work_t work; // at a time there is only one work scheduled. -}; - -parser_settings_t settings; - -bool check_error(int ret) { - if (!ret) return false; - fprintf(stderr, "libuv error: %s\n", uv_strerror(ret)); - return true; -} - -void on_close(uv_handle_t* handle) { - printf("on close\n"); - client_t* client = (client_t*)handle->data; - client->resbuf.base = nullptr; - client->resbuf.len = 0; - delete client; - client = nullptr; -} - -void after_write(uv_write_t* req, int status) { - check_error(status); - uv_close((uv_handle_t*)req->handle, on_close); -} - -// concurrent runtime functions -std::string build_response(response_code_t code, const std::string& msg) { - auto build = [](const std::string&& code_msg, - const std::string&& msg) -> std::string { - return "HTTP/1.1 " + code_msg + "\r\n" + - "Content-Type: application/json\r\n" + - "Content-Length: " + std::to_string(msg.size()) + "\r\n" + "\r\n" + - std::move(msg); - }; - - // auto json_msg = "{\"msg\": \"" + std::move(msg) + "\"}"; - switch (code) { - case OK: - return build("200 OK", msgr.PackageResponse(false, msg)); - case FORBIDDEN: - return build("403 Forbidden", msgr.PackageResponse(true, msg)); - case NOT_FOUND: - return build("404 Not Found", msgr.PackageResponse(true, msg)); - default: - return build("500 Internal Server Error", - msgr.PackageResponse(true, msg)); - } -} - -inline void flush_meca_ready() { - printf("%s\n", meca_ready); - fflush(stdout); -} - -void on_work(uv_work_t* req) { - client_t* client = (client_t*)req->data; - response_code_t code; - std::string msg; -#ifndef NDEBUG - // printf("\nRequest body before processing:\n%s\n", - // client->req.body.c_str()); -#endif // NDEBUG - switch (client->task_type) { - case RA: - code = svc->Ra(&msg); - break; - case RUN: - code = svc->Run(msgr.ExtractUserJson(client->req.body), &msg); - break; - case INIT: - code = svc->Init(client->req.body, &msg); - break; - default: - code = NOT_FOUND; - msg = "unsupported path: " + client->req.url + "\n"; - break; - } - client->response_scratch = build_response(code, std::move(msg)); - client->resbuf = - uv_buf_init(const_cast(client->response_scratch.c_str()), - client->response_scratch.size()); - // printf("response to sent %.*s\n", (int) client->response_scratch.size(), - // client->response_scratch.c_str()); -} - -void after_work(uv_work_t* req, int status) { - if (check_error(status)) return; - client_t* client = (client_t*)req->data; - uv_write(&client->write_req, (uv_stream_t*)&client->handle, &client->resbuf, - 1, after_write); -} -// concurrent runtime functions - -int on_message_begin(parser_t* _) { - (void)_; -#ifndef NDEBUG - printf("\n***MESSAGE BEGIN***\n\n"); -#endif // NDEBUG - return 0; -} - -int on_headers_complete(parser_t* _) { - (void)_; -#ifndef NDEBUG - printf("\n***HEADERS COMPLETE***\n\n"); -#endif // NDEBUG - return 0; -} - -int on_message_complete(parser_t* parser) { -#ifndef NDEBUG - printf("\n***MESSAGE COMPLETE***\n\n"); -#endif // NDEBUG - client_t* client = (client_t*)parser->data; - if (client->req.url == "/run") { - client->task_type = RUN; - } else if (client->req.url == "/ra") { - client->task_type = RA; - } else if (client->req.url == "/init") { - client->task_type = INIT; - } else { - client->task_type = UNDEFINED; - } - uv_queue_work(client->handle.loop, &client->work, on_work, after_work); - return 0; -} - -int on_url(parser_t* parser, const char* at, size_t length) { -#ifndef NDEBUG - // printf("Url (%d): %.*s\n", (int)length, (int)length, at); -#endif // NDEBUG - client_t* client = (client_t*)parser->data; - client->req.url = std::string(at, length); - return 0; -} - -int on_header_field(parser_t* parser, const char* at, size_t length) { -#ifndef NDEBUG - // printf("Header field: %.*s\n", (int)length, at); -#endif // NDEBUG - if (strncmp(at, "Content-Type", std::max(length, strlen("Content-Type")))) { - client_t* client = (client_t*)parser->data; - client->next_header_value_is_content_len = true; - } - return 0; -} - -int on_header_value(parser_t* parser, const char* at, size_t /*length*/) { -#ifndef NDEBUG - // printf("Header value: %.*s\n", (int)length, at); -#endif // NDEBUG - client_t* client = (client_t*)parser->data; - if (client->next_header_value_is_content_len) { - client->req.content_len = strtoull(at, NULL, 10); - client->req.body.reserve(client->req.content_len); - client->next_header_value_is_content_len = false; - } - return 0; -} - -int on_body(parser_t* parser, const char* at, size_t length) { - // (void)_; - client_t* client = (client_t*)parser->data; - client->req.body.append(at, length); -#ifndef NDEBUG - // printf("Body: %.*s\n", (int)length, at); -#endif // NDEBUG - return 0; -} - -// all the callbacks implementation are from http-parser/contrib/parsertrace.c -// to print something demonstrating the processing of each phase. -// on_message_complete is rewritten to send back response. -void setup_http_parser_settings() { - PARSER_SETTINGS_INIT(&settings); - settings.on_message_begin = on_message_begin; - settings.on_url = on_url; - settings.on_header_field = on_header_field; - settings.on_header_value = on_header_value; - settings.on_headers_complete = on_headers_complete; - settings.on_body = on_body; - settings.on_message_complete = on_message_complete; -} - -void on_alloc(uv_handle_t* /*handle*/, size_t suggested_size, uv_buf_t* buf) { -#ifndef NDEBUG - // printf("on alloc\n"); -#endif // NDEBUG - *buf = uv_buf_init((char*)malloc(suggested_size), suggested_size); -} - -void on_read(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) { -#ifndef NDEBUG - // printf("on read\n"); -#endif // NDEBUG - /*do something*/ - if (nread >= 0) { -#ifndef NDEBUG - // printf("Read:\n%.*s\n", (int) nread, buf->base); -#endif // NDEBUG - /*parse http*/ - client_t* client = (client_t*)uv_handle_get_data((uv_handle_t*)stream); - parser_t* parser = &client->parser; - - if (!client->is_http_req) { - auto ret = llhttp_execute(parser, buf->base, nread); - if (ret != HPE_OK) { - fprintf(stderr, "Parse error: %s %s\n", llhttp_errno_name(ret), - parser->reason); - printf("Not a http request, no response sent\n"); - } - } else { - // continuous reading the request. append to the body. - client->req.body.append(buf->base, nread); - } - - } else { - // error - if (nread != UV_EOF) { - printf("Read error: %ld\n", nread); - } - uv_close((uv_handle_t*)stream, on_close); - uv_stop(loop); - printf("server schedules to shutdown\n"); - } - - free(buf->base); -} - -void on_connection(uv_stream_t* server_handle, int status) { - assert(server_handle == (uv_stream_t*)&server); - printf("\non_connection\n"); - - if (check_error(status)) return; - - // allocate http parser and a handle for each connection - client_t* client = new client_t; - - // init - llhttp_init(&client->parser, HTTP_BOTH, &settings); - - auto ret = uv_tcp_init(server_handle->loop, &client->handle); - // let the data pointer of handle to point to the client struct, - // so we can access http parser. - uv_handle_set_data((uv_handle_t*)&client->handle, client); - // let the data pointer of parser to point to the client struct, - // so we can access handle. - client->parser.data = client; - uv_req_set_data((uv_req_t*)&client->work, client); - - check_error(ret); - ret = uv_accept(server_handle, (uv_stream_t*)&client->handle); - if (check_error(ret)) { - uv_close((uv_handle_t*)&client->handle, on_close); - } else { - ret = uv_read_start((uv_stream_t*)&client->handle, on_alloc, on_read); - check_error(ret); - } -} -} // anonymous namespace - -int main(int argc, char* argv[]) { - // pick service selection - - if (argc != 2) { - fprintf(stderr, "Usage: worker_enclave\n"); - exit(1); - } - auto enclave_path = argv[1]; - WorkerService s{enclave_path}; - svc = &s; - - // setup http parser settings - setup_http_parser_settings(); - - // we only need a single loop, so use the default loop. - // uv_loop_t* loop = (uv_loop_t*) malloc(sizeof(uv_loop_t)); - // uv_loop_init(loop); - loop = uv_default_loop(); - - // start a server - uv_tcp_init(loop, &server); - - struct sockaddr_in address; - uv_ip4_addr("0.0.0.0", 8080, &address); - // uv_ip4_addr("0.0.0.0", 2531, &address); - - int ret = uv_tcp_bind(&server, (const struct sockaddr*)&address, 0); - check_error(ret); - - printf("sample server launched\n"); - - ret = uv_listen((uv_stream_t*)&server, 128, on_connection); - check_error(ret); - - flush_meca_ready(); - - uv_run(loop, UV_RUN_DEFAULT); - uv_loop_close(loop); - - // using default loop. - // free(loop); - return 0; -} diff --git a/sdk/example_containers/sgx-task/src/server/messager.h b/sdk/example_containers/sgx-task/src/server/messager.h deleted file mode 100644 index a87709c..0000000 --- a/sdk/example_containers/sgx-task/src/server/messager.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef MECA_SGX_MESSENGER_H_ -#define MECA_SGX_MESSENGER_H_ - -#include - -#include "common/json.h" - -class Messenger { - public: - ~Messenger() = default; - - // extract users request json from the serverless platform request - inline std::string ExtractUserJson( - const std::string& platform_message) const { - auto input = json::JSON::Load(platform_message); - return input["value"].ToString(); - } - - // package worker result to serverless platform compatible response format - // for error, it needs a json with a single field called "error" - inline std::string PackageResponse(bool error, - const std::string& response) const { - json::JSON ret; - if (error) { - ret["error"] = std::move(response); - } else { - auto json_response = json::JSON::Load(response); - if (!json_response.IsNull()) { - ret["msg"] = json_response; - } else { - ret["msg"] = std::move(response); - } - } - return ret.dump(); - } -}; - -#endif // MECA_SGX_MESSENGER_H_ diff --git a/sdk/example_containers/sgx-task/src/server/service.h b/sdk/example_containers/sgx-task/src/server/service.h deleted file mode 100644 index 5537fb1..0000000 --- a/sdk/example_containers/sgx-task/src/server/service.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef MECA_SGX_CONCURRENTRT_U_OW_SERVICE_H_ -#define MECA_SGX_CONCURRENTRT_U_OW_SERVICE_H_ - -#include - -typedef enum { - OK, // 200 - FORBIDDEN, // 403 - NOT_FOUND, // 404 - SERVER_ERROR // 500 -} response_code_t; - -typedef enum { INIT, RUN, RA, UNDEFINED } runtime_task_t; - -// backend service interface for concurrent runtime. -class Service { - public: - virtual ~Service() = default; - - // init is guaranteed to execute once - virtual response_code_t Init(const std::string& request, - std::string* response) = 0; - - virtual response_code_t Ra(std::string* response) = 0; - - // run is a asynchronous scheduled task. needs to be thread-safe - virtual response_code_t Run(const std::string& request, - std::string* response) = 0; -}; - -#endif // MECA_SGX_CONCURRENTRT_U_OW_SERVICE_H_ diff --git a/sdk/example_containers/sgx-task/src/server/worker_service.cc b/sdk/example_containers/sgx-task/src/server/worker_service.cc deleted file mode 100644 index f7c561b..0000000 --- a/sdk/example_containers/sgx-task/src/server/worker_service.cc +++ /dev/null @@ -1,139 +0,0 @@ -#include "worker_service.h" - -#include -#include -#include // for INT_MAX -#include // for strlen -#include - -#include "common/hexutil.h" -#include "common/json.h" - -// // implementation of ocalls -// void ocall_debug_print(const void* s, size_t len) { -// assert(len < INT_MAX); -// printf("DEBUG PRINT: %.*s\n", (int) len, (const char*) s); -// } -// void ocall_debug_print_string(const char* s) { -// printf("DEBUG PRINT: %s\n", s); -// } -// void ocall_debug_print_hexstring(const char* s) { -// printf("DEBUG PRINT (hex): "); -// for (unsigned int i = 0; i < strlen(s); i++) { -// printf("%02hhx", (unsigned char) s[i]); -// } -// printf("\n"); -// } -// void ocall_debug_print_hex(const void* s, size_t len) { -// printf("DEBUG PRINT (hex): "); -// auto it = (const unsigned char*) s; -// for (unsigned int i = 0; i < len; i++) { -// printf("%02hhx", *(it++)); -// } -// printf("\n"); -// } -// // implementation of ocalls - -void initialize_once(Worker* worker, response_code_t* code) { - // execute logic on the value for initialization - // decode json request - // return 404 for decode failure - printf("init execution\n"); - // execute logic on the value for initialization - *code = worker->Initialize() ? OK : SERVER_ERROR; -} - -response_code_t WorkerService::Init(const std::string& /*request*/, - std::string* response) { - assert(response); - response_code_t code = FORBIDDEN; - - std::call_once(initalized_, initialize_once, &worker_, &code); - switch (code) { - case FORBIDDEN: - // send response of error 403 - printf("init phase: reinitalize forbidden by default\n"); - *response = "reinit forbidden"; - break; - case OK: - *response = "init finished"; - code = OK; - break; - default: - *response = "unkown server code from init"; - code = SERVER_ERROR; - break; - } - - return code; -} - -response_code_t WorkerService::Ra(std::string* response) { - assert(response); - - response_code_t code = OK; - std::call_once(initalized_, initialize_once, &worker_, &code); - if (code != OK) { - // called init and errors encountered - *response = "init during run and error encountered"; - return SERVER_ERROR; - } - printf("ra execution"); - - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - // upon error return 502 (500 can be used for framework internal error) - - // decode the request - // execute the logic - std::string enclave_public_key; - std::string report = worker_.GetKeyAndReport(&enclave_public_key); - if (report.size() == 0) { - *response = "ra error"; - return SERVER_ERROR; - } - - // wrap the result into a response json - json::JSON json_response; - json_response["report"] = report; - json_response["key"] = enclave_public_key; - *response = json_response.dump(); - - // return 200 with the response - return OK; -} - -response_code_t WorkerService::Run(const std::string& request, - std::string* response) { - assert(response); - - response_code_t code = OK; - std::call_once(initalized_, initialize_once, &worker_, &code); - if (code != OK) { - // called init and errors encountered - *response = "init during run and error encountered"; - return SERVER_ERROR; - } - printf("run execution"); - - printf("Request (len=%ld): %s", request.size(), request.c_str()); - - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - // upon error return 502 (500 can be used for framework internal error) - - // decode the request - auto request_data = hex_decode(request.data(), request.size()); - // execute the logic - std::string output; - auto ret = worker_.Handle(0, request_data, &output); - if (ret != 0) { - // *response = "worker execution error"; - *response = output; - return SERVER_ERROR; - } - - // wrap the result into a response json - *response = std::move(output); - - // return 200 with the response - return OK; -} diff --git a/sdk/example_containers/sgx-task/src/server/worker_service.h b/sdk/example_containers/sgx-task/src/server/worker_service.h deleted file mode 100644 index bf79e7e..0000000 --- a/sdk/example_containers/sgx-task/src/server/worker_service.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef MECA_SGX_CONCURRENTRT_U_OW_WORKERSERVICE_H_ -#define MECA_SGX_CONCURRENTRT_U_OW_WORKERSERVICE_H_ - -#include -#include -#include - -#include "service.h" -#include "worker.h" - -class WorkerService : public Service { - public: - // the logic of worker and where it fetch and store data are configured - // in the worker_config.h - WorkerService(const std::string& enclave_path) - : worker_(std::move(enclave_path)) { - printf("Worker service config:\n enclave file name: %s\n", - enclave_path.c_str()); - } - ~WorkerService() = default; - - response_code_t Init(const std::string& request, - std::string* response) override; - - // ra will also attemp to init if it has not done so. - response_code_t Ra(std::string* response) override; - - // run will also attemp to init if it has not done so. - response_code_t Run(const std::string& request, - std::string* response) override; - - private: - std::once_flag initalized_; - Worker worker_; -}; - -#endif // MECA_SGX_CONCURRENTRT_U_OW_WORKERSERVICE_H_ diff --git a/sdk/example_containers/sgx-task/src/worker/Makefile b/sdk/example_containers/sgx-task/src/worker/Makefile deleted file mode 100644 index ca6b877..0000000 --- a/sdk/example_containers/sgx-task/src/worker/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -SGX_SDK ?= /opt/intel/sgxsdk -SGX_MODE ?= HW -SGX_ARCH ?= x64 -SGX_DEBUG ?= 1 - -PROJECT_ROOT_DIR := $(shell readlink -f ..) - -INSTALL ?= install -INSTALL_PREFIX ?= ./install -INSTALL_LIB_DIR = $(INSTALL_PREFIX)/lib -INSTALL_INCLUDE_DIR = $(INSTALL_PREFIX)/include - -.PHONY: all install clean mrproper - -all: - $(MAKE) -ef sgx_u.mk all SGX_MODE=$(SGX_MODE) SGX_DEBUG=$(SGX_DEBUG) - $(MAKE) -ef sgx_t.mk all SGX_MODE=$(SGX_MODE) SGX_DEBUG=$(SGX_DEBUG) - -install: - $(INSTALL) -d $(INSTALL_INCLUDE_DIR) - $(INSTALL) -d $(INSTALL_LIB_DIR) - $(INSTALL) -C -m 644 untrusted/worker.h $(INSTALL_INCLUDE_DIR) - $(INSTALL) -C -m 664 *.signed.so $(INSTALL_LIB_DIR) - $(INSTALL) -C -m 644 *.a $(INSTALL_LIB_DIR) - -clean: - $(MAKE) -ef sgx_u.mk clean - $(MAKE) -ef sgx_t.mk clean - -mrproper: clean - rm -rf ./install diff --git a/sdk/example_containers/sgx-task/src/worker/sgx_t.mk b/sdk/example_containers/sgx-task/src/worker/sgx_t.mk deleted file mode 100644 index 10dca5e..0000000 --- a/sdk/example_containers/sgx-task/src/worker/sgx_t.mk +++ /dev/null @@ -1,153 +0,0 @@ -### Project Settings ### -PROJECT_ROOT_DIR ?= $(shell readlink -f ..) - -### Intel(R) SGX SDK Settings ### -SGX_SDK ?= /opt/intel/sgxsdk -SGXSSL_DIR ?= /opt/intel/sgxssl -SGX_MODE ?= HW -SGX_ARCH ?= x64 -SGX_DEBUG ?= 1 -SGX_PRERELEASE ?= 0 -ifeq ($(shell getconf LONG_BIT), 32) - SGX_ARCH := x86 -else ifeq ($(findstring -m32, $(CXXFLAGS)), -m32) - SGX_ARCH := x86 -endif - -ifeq ($(SGX_ARCH), x86) - SGX_COMMON_CFLAGS := -m32 - SGX_LIBRARY_PATH := $(SGX_SDK)/lib - SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x86/sgx_sign - SGX_EDGER8R := $(SGX_SDK)/bin/x86/sgx_edger8r -else - SGX_COMMON_CFLAGS := -m64 - SGX_LIBRARY_PATH := $(SGX_SDK)/lib64 - SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x64/sgx_sign - SGX_EDGER8R := $(SGX_SDK)/bin/x64/sgx_edger8r -endif - -ifeq ($(SGX_DEBUG), 1) -ifeq ($(SGX_PRERELEASE), 1) -$(error Cannot set SGX_DEBUG and SGX_PRERELEASE at the same time!!) -endif -endif - -ifeq ($(SGX_DEBUG), 1) - SGX_COMMON_CFLAGS += -O0 -g -DSGX_DEBUG -else - SGX_COMMON_CFLAGS += -O2 -endif - -ifneq ($(SGX_MODE), HW) - Trts_Library_Name := sgx_trts_sim - Service_Library_Name := sgx_tservice_sim -else - Trts_Library_Name := sgx_trts - Service_Library_Name := sgx_tservice -endif - -ifeq ($(SGX_MODE), HW) -ifneq ($(SGX_DEBUG), 1) -ifneq ($(SGX_PRERELEASE), 1) -Build_Mode = HW_RELEASE -endif -endif -endif - -SGX_COMMON_FLAGS += -Wall -Wextra -Wchar-subscripts -Wno-coverage-mismatch \ - -Winit-self -Wpointer-arith -Wreturn-type \ - -Waddress -Wsequence-point -Wformat-security \ - -Wmissing-include-dirs -Wfloat-equal -Wundef -Wshadow \ - -Wcast-align -Wcast-qual -Wconversion -Wredundant-decls - -DCAP_TVL_LIB = sgx_dcap_tvl -### Intel(R) SGX SDK Settings ### - -### Project Settings ### -SGX_Include_Paths := -I$(SGX_SDK)/include -I$(SGX_SDK)/include/tlibc \ - -I$(SGX_SDK)/include/libcxx -I$(SGXSSL_DIR)/include - -Flags_Just_For_C := -Wno-implicit-function-declaration -std=c11 -Flags_Just_For_Cpp := -Wnon-virtual-dtor -std=c++11 -nostdinc++ -Common_C_Cpp_Flags := $(SGX_COMMON_CFLAGS) $(SGX_COMMON_FLAGS) -nostdinc -fvisibility=hidden -fpie -fstack-protector -fno-builtin -fno-builtin-printf -I. -Common_C_Flags := -Wjump-misses-init -Wstrict-prototypes \ - -Wunsuffixed-float-constants - -Enclave_C_Flags := $(Flags_Just_For_C) $(Common_C_Cpp_Flags) $(Common_C_Flags) -Itrusted $(SGX_Include_Paths) -I$(PROJECT_ROOT_DIR) - -Enclave_Cpp_Flags := $(Flags_Just_For_Cpp) $(Common_C_Cpp_Flags) -DSGXCLIENT -Itrusted $(SGX_Include_Paths) -I$(PROJECT_ROOT_DIR) - -Crypto_Library_Name := sgx_tcrypto -SGXSSL_Link_Flags := -L$(SGXSSL_DIR)/lib64 -Wl,--whole-archive -lsgx_tsgxssl -Wl,--no-whole-archive -lsgx_tsgxssl_crypto -lsgx_pthread - -Enclave_Link_Flags := $(SGX_COMMON_CFLAGS) \ - -Wl,--no-undefined -nostdlib -nodefaultlibs -nostartfiles \ - -L$(SGX_LIBRARY_PATH) \ - -Wl,--whole-archive -l$(DCAP_TVL_LIB) -l$(Trts_Library_Name) -Wl,--no-whole-archive \ - $(SGXSSL_Link_Flags) \ - -Wl,--start-group -lsgx_tstdc -lsgx_tcxx -l$(Crypto_Library_Name) \ - -l$(Service_Library_Name) -Wl,--end-group \ - -Wl,-Bstatic -Wl,-Bsymbolic \ - -Wl,-pie,-eenclave_entry -Wl,--export-dynamic \ - -Wl,--defsym,__ImageBase=0 \ - -Wl,--version-script=trusted/Worker_Enclave.lds - -### Project Settings ### - -### Phony targets ### -.PHONY: all clean - -### Build all ### -ifeq ($(Build_Mode), HW_RELEASE) -all: Worker_Enclave.so - @echo "Build enclave Server_Enclave.so [$(Build_Mode)|$(SGX_ARCH)] success!" - @echo - @echo "*********************************************************************************************************************************************************" - @echo "PLEASE NOTE: In this mode, please sign the Server_Enclave.so first using Two Step Sign mechanism before you run the app to launch and access the enclave." - @echo "*********************************************************************************************************************************************************" - @echo -else -all: Worker_Enclave.signed.so -endif - -### Edger8r related sourcs ### -trusted/Enclave_t.c: $(SGX_EDGER8R) ./trusted/Enclave.edl - @echo Entering ./trusted - cd ./trusted && $(SGX_EDGER8R) --trusted ../trusted/Enclave.edl --search-path ../trusted --search-path $(SGX_SDK)/include --search-path $(SGXSSL_DIR)/include --search-path $(PROJECT_ROOT_DIR) --search-path $(SGX_RA_TLS_DIR)/include $(Enclave_Search_Dirs) - @echo "GEN => $@" - -trusted/Enclave_t.o: ./trusted/Enclave_t.c - $(CC) $(Enclave_C_Flags) -c $< -o $@ - @echo "CC <= $<" -### Edger8r related sourcs ### - -## build files needed from other directory -trusted/json.o: $(PROJECT_ROOT_DIR)/common/json.cc - $(CXX) $(Enclave_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -trusted/tcrypto_ext.o: $(PROJECT_ROOT_DIR)/common/tcrypto_ext.cc - $(CXX) $(Enclave_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -## build files needed from other directory -trusted/Worker_Enclave.o: trusted/Worker_Enclave.cc - $(CXX) $(Enclave_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -### Enclave Image ### -Enclave_Cpp_Objects := trusted/json.o trusted/tcrypto_ext.o trusted/Worker_Enclave.o - -Worker_Enclave.so: trusted/Enclave_t.o $(Enclave_Cpp_Objects) - $(CXX) $^ -o $@ $(Enclave_Link_Flags) - @echo "LINK => $@" - -### Signing ### -Worker_Enclave.signed.so: Worker_Enclave.so - $(SGX_ENCLAVE_SIGNER) sign -key trusted/Worker_Enclave_private.pem -enclave Worker_Enclave.so -out $@ -config trusted/Worker_Enclave.config.xml - @echo "SIGN => $@" -### Sources ### - -### Clean command ### -clean: - rm -f Worker_Enclave.* trusted/Enclave_t.* $(Enclave_Cpp_Objects) diff --git a/sdk/example_containers/sgx-task/src/worker/sgx_u.mk b/sdk/example_containers/sgx-task/src/worker/sgx_u.mk deleted file mode 100644 index 2f148e8..0000000 --- a/sdk/example_containers/sgx-task/src/worker/sgx_u.mk +++ /dev/null @@ -1,159 +0,0 @@ -### Project Settings ### -PROJECT_ROOT_DIR ?= $(shell readlink -f ..) - -### Intel(R) SGX SDK Settings ### -SGX_SDK ?= /opt/intel/sgxsdk -SGXSSL_DIR ?= /opt/intel/sgxssl -SGX_MODE ?= HW -SGX_ARCH ?= x64 -SGX_DEBUG ?= 1 -SGX_PRERELEASE ?= 0 -ifeq ($(shell getconf LONG_BIT), 32) - SGX_ARCH := x86 -else ifeq ($(findstring -m32, $(CXXFLAGS)), -m32) - SGX_ARCH := x86 -endif - -ifeq ($(SGX_ARCH), x86) - SGX_COMMON_CFLAGS := -m32 - SGX_LIBRARY_PATH := $(SGX_SDK)/lib - SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x86/sgx_sign - SGX_EDGER8R := $(SGX_SDK)/bin/x86/sgx_edger8r -else - SGX_COMMON_CFLAGS := -m64 - SGX_LIBRARY_PATH := $(SGX_SDK)/lib64 - SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x64/sgx_sign - SGX_EDGER8R := $(SGX_SDK)/bin/x64/sgx_edger8r -endif - -ifeq ($(SGX_DEBUG), 1) -ifeq ($(SGX_PRERELEASE), 1) -$(error Cannot set SGX_DEBUG and SGX_PRERELEASE at the same time!!) -endif -endif - -ifeq ($(SGX_DEBUG), 1) - SGX_COMMON_CFLAGS += -O0 -g -DSGX_DEBUG -else - SGX_COMMON_CFLAGS += -O2 -endif - -ifneq ($(SGX_MODE), HW) - Urts_Library_Name := sgx_urts_sim -else - Urts_Library_Name := sgx_urts -endif - -ifeq ($(SGX_MODE), HW) -ifneq ($(SGX_DEBUG), 1) -ifneq ($(SGX_PRERELEASE), 1) -Build_Mode = HW_RELEASE -endif -endif -endif - -APP_DCAP_LIBS := -lsgx_dcap_ql -lsgx_dcap_quoteverify -### Intel(R) SGX SDK Settings ### - -### Project Settings ### -Common_C_Cpp_Flags := $(SGX_COMMON_CFLAGS) -fPIC -Wno-attributes -I. -Common_C_Cpp_Flags += -Wall -Wextra -Winit-self -Wpointer-arith -Wreturn-type \ - -Waddress -Wsequence-point -Wformat-security \ - -Wmissing-include-dirs -Wfloat-equal -Wundef -Wshadow \ - -Wcast-align -Wcast-qual -Wconversion -Wredundant-decls -Common_C_Flags := -Wjump-misses-init -Wstrict-prototypes \ - -Wunsuffixed-float-constants - -# Three configuration modes - Debug, prerelease, release -# Debug - Macro DEBUG enabled. -# Prerelease - Macro NDEBUG and EDEBUG enabled. -# Release - Macro NDEBUG enabled. -ifeq ($(SGX_DEBUG), 1) - Common_C_Cpp_Flags += -DDEBUG -UNDEBUG -UEDEBUG -else ifeq ($(SGX_PRERELEASE), 1) - Common_C_Cpp_Flags += -DNDEBUG -DEDEBUG -UDEBUG -else - Common_C_Cpp_Flags += -DNDEBUG -UEDEBUG -UDEBUG -endif - -App_C_Cpp_Flags := $(Common_C_Cpp_Flags) -Iuntrusted -I$(SGX_SDK)/include -I$(PROJECT_ROOT_DIR) - -### Project Settings ### - -### Linking setting ### -App_Link_Flags := -L$(SGX_LIBRARY_PATH) -l$(Urts_Library_Name) $(APP_DCAP_LIBS) \ - -lpthread -lz -lm -lcrypto - -## Add sgx_uae_service library to link ## -ifneq ($(SGX_MODE), HW) - App_Link_Flags += -lsgx_uae_service_sim -else - App_Link_Flags += -lsgx_uae_service -endif - -## Add sgx ssl library -App_Link_Flags += -L$(SGXSSL_DIR)/lib64 -lsgx_usgxssl -### Linking setting ### - -### Phony targets ### -.PHONY: all clean - -### Build all ### -ifeq ($(Build_Mode), HW_RELEASE) -all: App - @echo "Build App [$(Build_Mode)|$(SGX_ARCH)] success!" - @echo - @echo "*********************************************************************************************************************************************************" - @echo "PLEASE NOTE: In this mode, please sign the Worker_Enclave.so first using Two Step Sign mechanism before you run the app to launch and access the enclave." - @echo "*********************************************************************************************************************************************************" - @echo - -else -all: App App2 -endif - -### Sources ### -## Edger8r related sources ## -untrusted/Enclave_u.c: $(SGX_EDGER8R) trusted/Enclave.edl - @echo Entering ./untrusted - cd ./untrusted && $(SGX_EDGER8R) --untrusted ../trusted/Enclave.edl --search-path ../trusted --search-path $(SGX_SDK)/include --search-path $(SGXSSL_DIR)/include --search-path $(PROJECT_ROOT_DIR) $(Enclave_Search_Dirs) - @echo "GEN => $@" - -untrusted/Enclave_u.o: untrusted/Enclave_u.c - $(CC) $(Common_C_Flags) $(App_C_Cpp_Flags) -c $< -o $@ - @echo "CC <= $<" -## Edger8r related sources ## - -## build files needed from other directory -untrusted/hexutil.o: $(PROJECT_ROOT_DIR)/common/hexutil.cc - $(CXX) $(App_C_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -untrusted/json.o: $(PROJECT_ROOT_DIR)/common/json.cc - $(CXX) $(App_C_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -untrusted/%.o: untrusted/%.cc - $(CXX) $(App_C_Cpp_Flags) -c $< -o $@ - @echo "CXX <= $<" - -App_Cpp_Objects := untrusted/worker.o untrusted/json.o untrusted/hexutil.o \ - untrusted/ocall_patches.o - -libworker.a: untrusted/Enclave_u.o $(App_Cpp_Objects) - ar -rcs $@ $^ - @echo "LINK => $@" - -## Build worker app ## -App: untrusted/App.o libworker.a - $(CXX) $< -o $@ -L. -l:libworker.a $(App_Link_Flags) - @echo "LINK => $@" - -App2: untrusted/App2.o libworker.a - $(CXX) $< -o $@ -L. -l:libworker.a $(App_Link_Flags) - @echo "LINK => $@" -### Sources ### - -### Clean command ### -clean: - rm -f App2 App untrusted/App2.o untrusted/App.o $(App_Cpp_Objects) untrusted/Enclave_u.* libworker.a diff --git a/sdk/example_containers/sgx-task/src/worker/trusted/Enclave.edl b/sdk/example_containers/sgx-task/src/worker/trusted/Enclave.edl deleted file mode 100644 index 386d68e..0000000 --- a/sdk/example_containers/sgx-task/src/worker/trusted/Enclave.edl +++ /dev/null @@ -1,33 +0,0 @@ -enclave { - include "stdint.h" - - from "sgx_tstdc.edl" import *; // needed for mutex and lock_guard use - from "sgx_tsgxssl.edl" import *; // needed for sgxssl - from "sgx_pthread.edl" import*; // needed for sgxssl - - include "sgx_report.h" - include "sgx_ql_lib_common.h" - - trusted { - public sgx_status_t enc_get_key_and_report( - [in] const sgx_target_info_t* qe_target_info, - [out] sgx_report_t* app_report, - [out, size=key_size] uint8_t* key, size_t key_size); - public sgx_status_t enc_run( - [in, size=input_data_size] const char* input_data, size_t input_data_size, - [out] size_t* output_size); - public sgx_status_t enc_get_output( - [out, size=prediction_size] uint8_t* prediction, - size_t prediction_size); - public void enc_clear_exec_context(); - }; - - untrusted { -#ifndef NDEBUG - void ocall_debug_print([in, size=len] const void* s, size_t len); - void ocall_debug_print_string([in, string] const char* str); - void ocall_debug_print_hex([in, size=len] const void* str, size_t len); - void ocall_debug_print_hexstring([in, string] const char* str); -#endif // NDEBUG - }; -}; diff --git a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.cc b/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.cc deleted file mode 100644 index 59b258e..0000000 --- a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.cc +++ /dev/null @@ -1,289 +0,0 @@ -#include -#include -#include -#include - -#include "Enclave_t.h" -#include "common/tcrypto_ext.h" -#include "sgx_trts.h" - -#ifndef NDEBUG -#include -#endif // NDEBUG - -#include "sgx_ql_lib_common.h" -#include "sgx_quote_3.h" -#include "sgx_utils.h" - -// for debug -#include "openssl/ec.h" -#include "openssl/evp.h" -#include "openssl/pem.h" -#include "openssl/rsa.h" - -#define OUTPUT_BUF_SZ 4000 - -namespace { - -// global variable of private key hold by the enclave -uint8_t* enclave_public_key = nullptr; -size_t enclave_public_key_size = 0; -uint8_t* enclave_private_key = nullptr; -size_t enclave_private_key_size = 0; - -uint8_t enc_iv_buf[AES_GCM_IV_SIZE] = {0}; - -// --- execution context --- // -// the public key of the enclave -thread_local char output_buf[OUTPUT_BUF_SZ]; -thread_local size_t actual_output_size; - -// invoked after completing an request no matter success/failure -void ClearRequestContext() { memset(output_buf, 0, OUTPUT_BUF_SZ); } -// invoke upon failure and final tear down. -void ClearExecutionContext() { ClearRequestContext(); } - -inline void printf(const char* msg) { ocall_debug_print_string(msg); } - -// TODO -std::string decrypt_content(const char* input_data, size_t input_data_size, - std::string* output_key) { - const char* tag = input_data + (input_data_size - 16); - const char* iv = input_data + (input_data_size - 12 - 16); - const char* peer_pub_key_str = - input_data + (input_data_size - 12 - 16 - EC_PUBLIC_KEY_SIZE); - - // printf("peer key: %s\n", std::string{peer_pub_key_str, - // EC_PUBLIC_KEY_SIZE}.c_str()); - - // load peer key - EVP_PKEY* peer_pub_key = EVP_PKEY_new(); - load_public_key(peer_pub_key_str, EC_PUBLIC_KEY_SIZE, &peer_pub_key); - - // load enclave private key - EVP_PKEY* own_key = EVP_PKEY_new(); - load_private_key((const char*)enclave_private_key, enclave_private_key_size, - &own_key); - - // derive shared secret - std::string secret = derive_ecdh_secret(own_key, peer_pub_key); - std::string session_key = kdf_derive((const unsigned char*)secret.c_str(), - secret.size(), "info", 4, 16); - - EVP_PKEY_free(peer_pub_key); - EVP_PKEY_free(own_key); - - // decrypt the content - EVP_CIPHER_CTX* ctx; - if (!(ctx = EVP_CIPHER_CTX_new())) return ""; - if (EVP_DecryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, - (const unsigned char*)session_key.c_str(), - (const unsigned char*)iv) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, (void*)tag); - - int plaintext_len; - int len; - - unsigned char decrypted[input_data_size - 44]; - if (EVP_DecryptUpdate(ctx, decrypted, &len, (const unsigned char*)input_data, - input_data_size - 16 - 12 - EC_PUBLIC_KEY_SIZE) != 1) { - EVP_CIPHER_CTX_free(ctx); - // printf("EVP_DecryptUpdate failed\n"); - return ""; - } - // printf("decrypted data updated to (%dB)\n", len); - output_key->assign((char*)decrypted + len - OUTPUT_KEY_LENGTH, - OUTPUT_KEY_LENGTH); - std::string ret{(char*)decrypted, len - OUTPUT_KEY_LENGTH}; - EVP_CIPHER_CTX_free(ctx); - return ret; - - // assert(input_data_size >= MAX_SESSION_KEY_IV_LENGTH + - // ENCRYPTED_SESSION_KEY_LENGTH); const char* iv = input_data + - // (input_data_size - MAX_SESSION_KEY_IV_LENGTH); const char* ek = input_data - // + (input_data_size - MAX_SESSION_KEY_IV_LENGTH - - // ENCRYPTED_SESSION_KEY_LENGTH); int encrypted_input_len = input_data_size - - // (MAX_SESSION_KEY_IV_LENGTH + ENCRYPTED_SESSION_KEY_LENGTH); - - // // perform decryption - // int decrypted_len; - // unsigned char* decrypted = rsa_decrypt_data((const unsigned char*) - // input_data, encrypted_input_len, (const unsigned char*) ek, - // ENCRYPTED_SESSION_KEY_LENGTH, (const unsigned char*) iv, - // enclave_private_key, enclave_private_key_size, &decrypted_len); - // output_key->assign((char*)decrypted + decrypted_len-OUTPUT_KEY_LENGTH, - // OUTPUT_KEY_LENGTH); std::string ret{(char*)decrypted, - // decrypted_len-OUTPUT_KEY_LENGTH}; - - // if (decrypted) free(decrypted); - // return ret; -} - -std::string exec(const char* input, size_t input_len) { - return "hello " + std::string(input, input_len); -} - -// TODO -sgx_status_t encrypt_output(const std::string& output_plain, - const std::string& output_key, - std::string* encrypt_output) { - EVP_CIPHER_CTX* ctx; - if (!(ctx = EVP_CIPHER_CTX_new())) return SGX_ERROR_UNEXPECTED; - - if (EVP_EncryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, - (const unsigned char*)output_key.data(), - enc_iv_buf) != 1) { - // if(EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, key, iv) != 1) { - EVP_CIPHER_CTX_free(ctx); - return SGX_ERROR_UNEXPECTED; - } - - unsigned char ciphertext[output_plain.size()]; - - int ciphertext_len; - int len; - if (EVP_EncryptUpdate(ctx, ciphertext, &len, - (const unsigned char*)output_plain.data(), - output_plain.size()) != 1) { - EVP_CIPHER_CTX_free(ctx); - return SGX_ERROR_UNEXPECTED; - } - ciphertext_len = len; - - if (EVP_EncryptFinal_ex(ctx, ciphertext + len, &len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return SGX_ERROR_UNEXPECTED; - } - ciphertext_len += len; - - unsigned char tag[AES_GCM_TAG_SIZE] = { - 0, - }; - // tag needed for gcm, not for cbc - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag); - EVP_CIPHER_CTX_free(ctx); - encrypt_output->assign((char*)ciphertext, ciphertext_len); - encrypt_output->append((char*)enc_iv_buf, AES_GCM_IV_SIZE); - encrypt_output->append((char*)tag, AES_GCM_TAG_SIZE); - - // ocall_debug_print_hex((const uint8_t*)encrypt_output->data(), - // encrypt_output->size()); - return SGX_SUCCESS; -} -} // anonymous namespace - -sgx_status_t enc_get_key_and_report(const sgx_target_info_t* qe_target_info, - sgx_report_t* app_report, uint8_t* key, - size_t key_size) { - // prepare in enclave key - uint8_t* public_key_buffer = nullptr; - size_t public_key_size = 0; - uint8_t* private_key_buffer = nullptr; - size_t private_key_size = 0; - // sgx_status_t status = generate_key_pair(RSA_TYPE, &public_key_buffer, - // &public_key_size, &private_key_buffer, &private_key_size); - sgx_status_t status = - generate_key_pair(EC_TYPE, &public_key_buffer, &public_key_size, - &private_key_buffer, &private_key_size); - if (status != SGX_SUCCESS) { - if (private_key_buffer) free(private_key_buffer); - if (public_key_buffer) free(public_key_buffer); - return status; - } - - // ocall_debug_print(public_key_buffer, public_key_size); - // ocall_debug_print(private_key_buffer, private_key_size); - - // copy the private key to enclave global variable - enclave_public_key = public_key_buffer; - enclave_public_key_size = public_key_size; - enclave_private_key = private_key_buffer; - enclave_private_key_size = private_key_size; - - // obtain the hash to include in report - sgx_sha_state_handle_t sha_handle = nullptr; - status = sgx_sha256_init(&sha_handle); - if (status != SGX_SUCCESS) { - return status; - } - - status = sgx_sha256_update(enclave_public_key, (uint32_t)public_key_size, - sha_handle); - if (status != SGX_SUCCESS) { - return status; - } - - sgx_sha256_hash_t hash = {0}; - status = sgx_sha256_get_hash(sha_handle, &hash); - if (status != SGX_SUCCESS) { - return status; - } - - sgx_report_data_t report_data = {0}; - memcpy(report_data.d, hash, sizeof(hash)); - - sgx_sha256_close(sha_handle); - - status = sgx_create_report(qe_target_info, &report_data, app_report); - if (key_size == enclave_public_key_size) { - memcpy(key, enclave_public_key, enclave_public_key_size); - // ocall_debug_print_hex(enclave_private_key, enclave_public_key_size); - } else { - std::string msg = "key buffer is not large enough: need" + - std::to_string(enclave_public_key_size) + - "bytes, but only" + std::to_string(key_size) + - "bytes provided."; - ocall_debug_print_string(msg.c_str()); - return SGX_ERROR_INVALID_PARAMETER; - } - - return status; -} - -sgx_status_t enc_run(const char* input_data, size_t input_data_size, - size_t* output_size) { - // allocate buffer to host decrypted contents - std::string output_key; - // user input decryption - auto decrypt_input = - decrypt_content(input_data, input_data_size, &output_key); - - sgx_status_t ret = SGX_SUCCESS; - - // All ready, execute the task on input - - std::string output_plain = exec(decrypt_input.data(), decrypt_input.size()); - - // encrypt the output - std::string output; - sgx_status_t status = encrypt_output(output_plain, output_key, &output); - if (status == SGX_SUCCESS) memcpy(output_buf, output.data(), output.size()); - - // set output size for untrusted memory allocation - actual_output_size = (ret == SGX_SUCCESS) ? output.size() : 0; - *output_size = actual_output_size; - -#ifndef NDEBUG - // ocall_debug_print_hex(output_buf, output_size); -#endif // NDEBUG - - // free resources - return ret; -} - -sgx_status_t enc_get_output(uint8_t* ret, size_t size) { - // check if enough space in untrusted memory - if ((sgx_is_outside_enclave(ret, size) != 1) && (size < actual_output_size)) - return SGX_ERROR_UNEXPECTED; - - memcpy(ret, output_buf, actual_output_size); - // clear execution context - ClearRequestContext(); - return SGX_SUCCESS; -} - -void enc_clear_exec_context() { ClearExecutionContext(); } diff --git a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.config.xml b/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.config.xml deleted file mode 100644 index cdba1d4..0000000 --- a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.config.xml +++ /dev/null @@ -1,10 +0,0 @@ - - 0 - 0 - 0x120000 - 0x10000000 - 0x10000000 - 8 - 0 - 0 - diff --git a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.lds b/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.lds deleted file mode 100644 index b42342f..0000000 --- a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave.lds +++ /dev/null @@ -1,10 +0,0 @@ -Worker_Enclave.so -{ - global: - g_global_data_sim; - g_global_data; - g_peak_heap_used; - g_peak_rsrv_mem_committed; - local: - *; -}; \ No newline at end of file diff --git a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave_private.pem b/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave_private.pem deleted file mode 100644 index 529d07b..0000000 --- a/sdk/example_containers/sgx-task/src/worker/trusted/Worker_Enclave_private.pem +++ /dev/null @@ -1,39 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIG4gIBAAKCAYEAroOogvsj/fZDZY8XFdkl6dJmky0lRvnWMmpeH41Bla6U1qLZ -AmZuyIF+mQC/cgojIsrBMzBxb1kKqzATF4+XwPwgKz7fmiddmHyYz2WDJfAjIveJ -ZjdMjM4+EytGlkkJ52T8V8ds0/L2qKexJ+NBLxkeQLfV8n1mIk7zX7jguwbCG1Pr -nEMdJ3Sew20vnje+RsngAzdPChoJpVsWi/K7cettX/tbnre1DL02GXc5qJoQYk7b -3zkmhz31TgFrd9VVtmUGyFXAysuSAb3EN+5VnHGr0xKkeg8utErea2FNtNIgua8H -ONfm9Eiyaav1SVKzPHlyqLtcdxH3I8Wg7yqMsaprZ1n5A1v/levxnL8+It02KseD -5HqV4rf/cImSlCt3lpRg8U5E1pyFQ2IVEC/XTDMiI3c+AR+w2jSRB3Bwn9zJtFlW -KHG3m1xGI4ck+Lci1JvWWLXQagQSPtZTsubxTQNx1gsgZhgv1JHVZMdbVlAbbRMC -1nSuJNl7KPAS/VfzAgEDAoIBgHRXxaynbVP5gkO0ug6Qw/E27wzIw4SmjsxG6Wpe -K7kfDeRskKxESdsA/xCrKkwGwhcx1iIgS5+Qscd1Yg+1D9X9asd/P7waPmWoZd+Z -AhlKwhdPsO7PiF3e1AzHhGQwsUTt/Y/aSI1MpHBvy2/s1h9mFCslOUxTmWw0oj/Q -ldIEgWeNR72CE2+jFIJIyml6ftnb6qzPiga8Bm48ubKh0kvySOqnkmnPzgh+JBD6 -JnBmtZbfPT97bwTT+N6rnPqOOApvfHPf15kWI8yDbprG1l4OCUaIUH1AszxLd826 -5IPM+8gINLRDP1MA6azECPjTyHXhtnSIBZCyWSVkc05vYmNXYUNiXWMajcxW9M02 -wKzFELO8NCEAkaTPxwo4SCyIjUxiK1LbQ9h8PSy4c1+gGP4LAMR8xqP4QKg6zdu9 -osUGG/xRe/uufgTBFkcjqBHtK5L5VI0jeNIUAgW/6iNbYXjBMJ0GfauLs+g1VsOm -WfdgXzsb9DYdMa0OXXHypmV4GwKBwQDUwQj8RKJ6c8cT4vcWCoJvJF00+RFL+P3i -Gx2DLERxRrDa8AVGfqaCjsR+3vLgG8V/py+z+dxZYSqeB80Qeo6PDITcRKoeAYh9 -xlT3LJOS+k1cJcEmlbbO2IjLkTmzSwa80fWexKu8/Xv6vv15gpqYl1ngYoqJM3pd -vzmTIOi7MKSZ0WmEQavrZj8zK4endE3v0eAEeQ55j1GImbypSf7Idh7wOXtjZ7WD -Dg6yWDrri+AP/L3gClMj8wsAxMV4ZR8CgcEA0fzDHkFa6raVOxWnObmRoDhAtE0a -cjUj976NM5yyfdf2MrKy4/RhdTiPZ6b08/lBC/+xRfV3xKVGzacm6QjqjZrUpgHC -0LKiZaMtccCJjLtPwQd0jGQEnKfMFaPsnhOc5y8qVkCzVOSthY5qhz0XNotHHFmJ -gffVgB0iqrMTvSL7IA2yqqpOqNRlhaYhNl8TiFP3gIeMtVa9rZy31JPgT2uJ+kfo -gV7sdTPEjPWZd7OshGxWpT6QfVDj/T9T7L6tAoHBAI3WBf2DFvxNL2KXT2QHAZ9t -k3imC4f7U+wSE6zILaDZyzygA4RUbwG0gv8/TJVn2P/Eynf76DuWHGlaiLWnCbSz -Az2DHBQBBaku409zDQym3j1ugMRjzzSQWzJg0SIyBH3hTmnYcn3+Uqcp/lEBvGW6 -O+rsXFt3pukqJmIV8HzLGGaLm62BHUeZf3dyWm+i3p/hQAL7Xvu04QW70xuGqdr5 -afV7p5eaeQIJXyGQJ0eylV/90+qxjMKiB1XYg6WYvwKBwQCL/ddpgOdHJGN8uRom -e7Zq0Csi3hGheMKlKbN3vcxT5U7MdyHtTZZOJbTvxKNNUNYH/8uD+PqDGNneb29G -BfGzvI3EASyLIcGZF3OhKwZd0jUrWk2y7Vhob91jwp2+t73vdMbkKyI4mHOuXvGv -fg95si9oO7EBT+Oqvhccd2J+F1IVXncccYnF4u5ZGWt5lLewN/pVr7MjjykeaHqN -t+rfnQam2psA6fL4zS2zTmZPzR2tnY8Y1GBTi0Ko1OKd1HMCgcAb5cB/7/AQlhP9 -yQa04PLH9ygQkKKptZp7dy5WcWRx0K/hAHRoi2aw1wZqfm7VBNu2SLcs90kCCCxp -6C5sfJi6b8NpNbIPC+sc9wsFr7pGo9SFzQ78UlcWYK2Gu2FxlMjonhka5hvo4zvg -WxlpXKEkaFt3gLd92m/dMqBrHfafH7VwOJY2zT3WIpjwuk0ZzmRg5p0pG/svVQEH -NZmwRwlopysbR69B/n1nefJ84UO50fLh5s5Zr3gBRwbWNZyzhXk= ------END RSA PRIVATE KEY----- diff --git a/sdk/example_containers/sgx-task/src/worker/untrusted/App.cc b/sdk/example_containers/sgx-task/src/worker/untrusted/App.cc deleted file mode 100644 index 6063048..0000000 --- a/sdk/example_containers/sgx-task/src/worker/untrusted/App.cc +++ /dev/null @@ -1,219 +0,0 @@ -#define WORKER_ENCLAVE_FILENAME "Worker_Enclave.signed.so" -#define REQUEST_COUNT 4 - -#include -#include - -#include "common/hexutil.h" -#include "worker.h" - -// for test -#include -#include -#include - -#include -#include -#include -#include - -#define MAX_SESSION_KEY_IV_LENGTH 16 - -/* A 128 bit key */ -unsigned char output_key[16] = {0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35}; - -// /* A 96 bit IV */ -// unsigned char iv[12] = {0x99, 0xaa, 0x3e, 0x68, 0xed, 0x81, 0x73, 0xa0, -// 0xee, 0xd0, 0x66, 0x84}; - -// expect the input should be in the form of |encrytped secret data|encrypted -// session key(384B)|IV(16B)| the secret data should consists of |input|output -// key(16B)| -std::string encrypt_request(const std::string& data, - const std::string& enclave_public_key) { - std::string plaintext{data}; - plaintext.append((char*)output_key, 16); - int encrypted_message_len = 0; - unsigned char* ek = NULL; - int ek_len = 0; - unsigned char iv[MAX_SESSION_KEY_IV_LENGTH]; - memset(iv, 0, MAX_SESSION_KEY_IV_LENGTH); - size_t encrypted_message_max_length = plaintext.size() + EVP_MAX_IV_LENGTH; - unsigned char encrypted[encrypted_message_max_length]; - memset(encrypted, 0, encrypted_message_max_length); - - BIO* bio = NULL; - bio = BIO_new_mem_buf(enclave_public_key.data(), enclave_public_key.size()); - RSA* rsaPublicKey = NULL; - PEM_read_bio_RSA_PUBKEY(bio, &rsaPublicKey, NULL, NULL); - EVP_PKEY* pkey = EVP_PKEY_new(); - EVP_PKEY_assign_RSA(pkey, rsaPublicKey); - EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new(); - EVP_CIPHER_CTX_init(ctx); - ek = (unsigned char*)malloc(EVP_PKEY_size(pkey)); - EVP_SealInit(ctx, EVP_aes_256_cbc(), &ek, &ek_len, iv, &pkey, 1); - printf("encrypted session key length: %d\n", ek_len); - - // BIO_dump_fp(stdout, (const char*) plaintext.data(), plaintext.size()); - // BIO_dump_fp(stdout, (const char*) ek, ek_len); - - int encrypted_block_len = 0; - EVP_SealUpdate(ctx, encrypted, &encrypted_block_len, - (const unsigned char*)plaintext.data(), plaintext.size()); - encrypted_message_len = encrypted_block_len; - EVP_SealFinal(ctx, encrypted + encrypted_block_len, &encrypted_block_len); - encrypted_message_len += encrypted_block_len; - EVP_CIPHER_CTX_free(ctx); - BIO_free(bio); - EVP_PKEY_free(pkey); - - std::string ret = std::string((char*)encrypted, encrypted_message_len); - ret.append((char*)ek, ek_len); - ret.append((char*)iv, MAX_SESSION_KEY_IV_LENGTH); - - if (ek) free(ek); - - return ret; -} - -std::string decrypt_response(const std::string& data) { - uint8_t tag[16]; - memcpy(tag, data.data() + data.size() - 16, 16); - uint8_t iv[12]; - memcpy(iv, data.data() + data.size() - 28, 12); - size_t encrypt_len = data.size() - 28; - EVP_CIPHER_CTX* ctx; - /* Create and initialise the context */ - if (!(ctx = EVP_CIPHER_CTX_new())) return ""; - - if (EVP_DecryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, output_key, iv) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - - // // BIO_dump_fp(stdout, (const char*) data.data(), data.size()-32); - // printf("encrypted data (%dB): \n", encrypt_len); - // BIO_dump_fp(stdout, (const char*) data.data(), encrypt_len); - // printf("tag: \n"); - // BIO_dump_fp(stdout, (const char*) tag, 16); - - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag); - - uint8_t plaintext[encrypt_len]; - int plaintext_len; - int len; - if (EVP_DecryptUpdate(ctx, plaintext, &len, (const unsigned char*)data.data(), - encrypt_len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - plaintext_len = len; - // printf("so far plaintext len: %d\n", plaintext_len); - - if (EVP_DecryptFinal_ex(ctx, plaintext + len, &len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - plaintext_len += len; - // printf("plaintext (%dB):\n", plaintext_len); - // std::string ret; - // ret.assign((char*)plaintext, plaintext_len); - // printf("%s\n", ret.c_str()); - - EVP_CIPHER_CTX_free(ctx); - return std::string((char*)plaintext, plaintext_len); -} - -int main(int argc, char* argv[]) { - std::cout << "Program starts at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - - std::string sample_request_path; - std::string sample_request; - - if (argc == 2) { - sample_request = argv[1]; - printf("sample request: %s\n", sample_request.c_str()); - } else { - fprintf(stderr, "Usage: sample_request\n"); - exit(1); - } - - // test enclave recreate - for (int j = 0; j < 2; j++) { - printf("Starting a worker\n"); - Worker worker(WORKER_ENCLAVE_FILENAME); - auto ret = worker.Initialize(); - if (!ret) { - printf("failed to initialize worker\n"); - return 1; - } - printf("Worker initialized\n"); - std::cout << "worker init done at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - - std::string key; - std::string report = worker.GetKeyAndReport(&key); - std::cout << "report: " << report << "\n"; - std::cout << "key: " << key << "\n"; - - printf("check input: %s, %ld\n", sample_request.c_str(), - sample_request.size()); - // prepare the sample request - std::string prepared_sample = encrypt_request(sample_request, key); - - // parallel - std::vector handlers; - std::array outputs; - for (uint64_t i = 0; i < REQUEST_COUNT; i++) { - handlers.emplace_back(&Worker::Handle, &worker, i, prepared_sample, - &outputs[i]); - } - for (auto& h : handlers) { - h.join(); - } - - std::cout << "processing " << REQUEST_COUNT << " requests done at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - - // print response - int id = 0; - for (auto& output : outputs) { - std::string outdata = hex_decode(output.data(), output.size()); - outdata = decrypt_response(outdata); - printf("{\"msg\": \"id-%d, %s\"}\n", id++, outdata.c_str()); - } - - // test subsequent - { - std::string out; - worker.Handle(0, prepared_sample, &out); - std::string outdata = hex_decode(out.data(), out.size()); - outdata = decrypt_response(outdata); - printf("decoded: %s", outdata.c_str()); - printf("{\"msg\": \"id-%d, %s\"}\n", 0, outdata.c_str()); - } - - // test subsequent (check reuse) - { - std::string out; - worker.Handle(0, prepared_sample, &out); - std::string outdata = hex_decode(out.data(), out.size()); - outdata = decrypt_response(outdata); - printf("decoded: %s", outdata.c_str()); - printf("{\"msg\": \"id-%d, %s\"}\n", 0, outdata.c_str()); - } - - // tear down - printf("worker closing.\n"); - worker.Close(); - } - return 0; -} diff --git a/sdk/example_containers/sgx-task/src/worker/untrusted/App2.cc b/sdk/example_containers/sgx-task/src/worker/untrusted/App2.cc deleted file mode 100644 index 62b23eb..0000000 --- a/sdk/example_containers/sgx-task/src/worker/untrusted/App2.cc +++ /dev/null @@ -1,577 +0,0 @@ -// a fully untrusted version of App for debugging -// #define WORKER_ENCLAVE_FILENAME "Worker_Enclave.signed.so" -#define REQUEST_COUNT 4 - -#include -#include - -#include "common/hexutil.h" -#include "worker.h" - -// for test -#include -#include -#include -#include -#include - -#include "openssl/ec.h" -#include "openssl/evp.h" -#include "openssl/pem.h" -#include "openssl/rsa.h" - -#define RSA_PUBLIC_KEY_SIZE 512 -#define RSA_PRIVATE_KEY_SIZE 2048 - -#define RSA_3072_PUBLIC_KEY_SIZE 650 -#define RSA_3072_PRIVATE_KEY_SIZE 3072 - -#define RSA_TYPE 0 -#define EC_TYPE 1 // EC-P384 - -#define MAX_SESSION_KEY_IV_LENGTH 16 -// #define ENCRYPTED_SESSION_KEY_LENGTH 256 -#define ENCRYPTED_SESSION_KEY_LENGTH 384 -#define OUTPUT_KEY_LENGTH 16 - -#define AES_GCM_IV_SIZE 12 -#define AES_GCM_TAG_SIZE 16 - -#define MAX_SESSION_KEY_IV_LENGTH 16 - -/* A 128 bit key */ -unsigned char output_key[16] = {0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35}; - -// /* A 96 bit IV */ -// unsigned char iv[12] = {0x99, 0xaa, 0x3e, 0x68, 0xed, 0x81, 0x73, 0xa0, -// 0xee, 0xd0, 0x66, 0x84}; - -struct EncryptedRequest { - std::string encrypted_secret_data; - std::string encrypted_session_key; - std::string iv; -}; - -uint8_t* enclave_public_key = nullptr; -size_t enclave_public_key_size = 0; -uint8_t* enclave_private_key = nullptr; -size_t enclave_private_key_size = 0; - -uint8_t enc_iv_buf[AES_GCM_IV_SIZE] = {0}; - -thread_local char output_buf[4000]; -thread_local size_t actual_output_size; - -int get_pkey_by_rsa(EVP_PKEY* pk) { - int res = -1; - EVP_PKEY_CTX* ctx = NULL; - - ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL); - if (ctx == NULL) return res; - res = EVP_PKEY_keygen_init(ctx); - if (res <= 0) { - // PRINT("keygen_init failed (%d)\n", res); - goto done; - } - - res = EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, RSA_3072_PRIVATE_KEY_SIZE); - if (res <= 0) { - // PRINT("set_rsa_kengen_bits failed (%d)\n", res); - goto done; - } - - /* Generate key */ - res = EVP_PKEY_keygen(ctx, &pk); - if (res <= 0) { - // PRINT("keygen failed (%d)\n", res); - goto done; - } - -done: - if (ctx) EVP_PKEY_CTX_free(ctx); - - return res; -} - -int get_pkey_by_ec(EVP_PKEY* pk) { - int res = -1; - EVP_PKEY_CTX* ctx; - - ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL); - if (ctx == NULL) return res; - res = EVP_PKEY_keygen_init(ctx); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - - res = EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, NID_secp384r1); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - - /* Generate key */ - res = EVP_PKEY_keygen(ctx, &pk); - if (res <= 0) { - // PRINT("EC_generate_key failed (%d)\n", res); - goto done; - } - -done: - if (ctx) EVP_PKEY_CTX_free(ctx); - - return res; -} - -// actually is generating RSA pair -// hardware independant -sgx_status_t generate_key_pair(int type, uint8_t** public_key, - size_t* public_key_size, uint8_t** private_key, - size_t* private_key_size) { - sgx_status_t result = SGX_ERROR_UNEXPECTED; - uint8_t* local_public_key = nullptr; - uint8_t* local_private_key = nullptr; - int res = -1; - EVP_PKEY* pkey = nullptr; - BIO* bio = nullptr; - - pkey = EVP_PKEY_new(); - if (!pkey) { - // PRINT("EVP_PKEY_new failed\n"); - result = SGX_ERROR_UNEXPECTED; - goto done; - } - - if (type != RSA_TYPE && type != EC_TYPE) { - type = RSA_TYPE; // by default, we use RSA_TYPE - } - - switch (type) { - case RSA_TYPE: - res = get_pkey_by_rsa(pkey); - break; - case EC_TYPE: - res = get_pkey_by_ec(pkey); - break; - } - - if (res <= 0) { - // PRINT("get_pkey failed (%d)\n", res); - result = SGX_ERROR_UNEXPECTED; - goto done; - } - - // Allocate memory - local_public_key = (uint8_t*)malloc(RSA_3072_PUBLIC_KEY_SIZE); - if (!local_public_key) { - // PRINT("out-of-memory:calloc(local_public_key failed\n"); - result = SGX_ERROR_OUT_OF_EPC; - goto done; - } - memset(local_public_key, 0x00, RSA_3072_PUBLIC_KEY_SIZE); - - local_private_key = (uint8_t*)malloc(RSA_3072_PRIVATE_KEY_SIZE); - if (!local_private_key) { - // PRINT("out-of-memory: calloc(local_private_key) failed\n"); - result = SGX_ERROR_OUT_OF_EPC; - goto done; - } - memset(local_private_key, 0x00, RSA_3072_PRIVATE_KEY_SIZE); - - // Write out the public/private key in PEM format for exchange with - // other enclaves. - bio = BIO_new(BIO_s_mem()); - if (!bio) { - // PRINT("BIO_new for local_public_key failed\n"); - goto done; - } - - res = PEM_write_bio_PUBKEY(bio, pkey); - if (!res) { - // PRINT("PEM_write_bio_PUBKEY failed (%d)\n", res); - goto done; - } - - res = BIO_read(bio, local_public_key, RSA_3072_PUBLIC_KEY_SIZE); - if (!res) { - // PRINT("BIO_read public key failed (%d)\n", res); - goto done; - } - BIO_free(bio); - bio = nullptr; - - bio = BIO_new(BIO_s_mem()); - if (!bio) { - // PRINT("BIO_new for local_public_key failed\n"); - goto done; - } - - res = PEM_write_bio_PrivateKey(bio, pkey, nullptr, nullptr, 0, nullptr, - nullptr); - if (!res) { - // PRINT("PEM_write_bio_PrivateKey failed (%d)\n", res); - goto done; - } - - res = BIO_read(bio, local_private_key, RSA_3072_PRIVATE_KEY_SIZE); - if (!res) { - // PRINT("BIO_read private key failed (%d)\n", res); - goto done; - } - - BIO_free(bio); - bio = nullptr; - - *public_key = local_public_key; - *private_key = local_private_key; - - *public_key_size = strlen(reinterpret_cast(local_public_key)); - *private_key_size = strlen(reinterpret_cast(local_private_key)); - - // PRINT("public_key_size %d, private_key_size %d\n", *public_key_size, - // *private_key_size); - result = SGX_SUCCESS; - -done: - if (bio) BIO_free(bio); - if (pkey) EVP_PKEY_free(pkey); // When this is called, rsa is also freed - if (result != SGX_SUCCESS) { - if (local_public_key) free(local_public_key); - if (local_private_key) free(local_private_key); - } - return result; -} - -std::string get_key_and_report() { - uint8_t* public_key_buffer = nullptr; - size_t public_key_size = 0; - uint8_t* private_key_buffer = nullptr; - size_t private_key_size = 0; - sgx_status_t status = - generate_key_pair(RSA_TYPE, &public_key_buffer, &public_key_size, - &private_key_buffer, &private_key_size); - if (status != SGX_SUCCESS) { - if (private_key_buffer) free(private_key_buffer); - if (public_key_buffer) free(public_key_buffer); - return ""; - } - - // copy the private key to enclave global variable - enclave_public_key = public_key_buffer; - enclave_public_key_size = public_key_size; - enclave_private_key = private_key_buffer; - enclave_private_key_size = private_key_size; - - // print the keys to file - std::ofstream public_key_file; - public_key_file.open("public_key.pem"); - public_key_file << std::string((char*)public_key_buffer, public_key_size); - public_key_file.close(); - - std::ofstream private_key_file; - private_key_file.open("private_key.pem"); - private_key_file << std::string((char*)private_key_buffer, private_key_size); - private_key_file.close(); - - return std::string((char*)enclave_public_key, enclave_public_key_size); -} - -std::string encrypt_request(const std::string& data, - const std::string& enclave_public_key) { - std::string plaintext{data}; - plaintext.append((char*)output_key, 16); - int encrypted_message_len = 0; - unsigned char* ek = NULL; - int ek_len = 0; - unsigned char iv[MAX_SESSION_KEY_IV_LENGTH]; - memset(iv, 0, MAX_SESSION_KEY_IV_LENGTH); - size_t encrypted_message_max_length = plaintext.size() + EVP_MAX_IV_LENGTH; - unsigned char encrypted[encrypted_message_max_length]; - memset(encrypted, 0, encrypted_message_max_length); - - BIO* bio = NULL; - bio = BIO_new_mem_buf(enclave_public_key.data(), enclave_public_key.size()); - RSA* rssPublicKey = NULL; - PEM_read_bio_RSA_PUBKEY(bio, &rssPublicKey, NULL, NULL); - EVP_PKEY* pkey = EVP_PKEY_new(); - EVP_PKEY_assign_RSA(pkey, rssPublicKey); - EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new(); - EVP_CIPHER_CTX_init(ctx); - ek = (unsigned char*)malloc(EVP_PKEY_size(pkey)); - EVP_SealInit(ctx, EVP_aes_256_cbc(), &ek, &ek_len, iv, &pkey, 1); - printf("encrypted session key length: %d\n", ek_len); - - printf("secret data to send (%ldB): \n", plaintext.size()); - BIO_dump_fp(stdout, (const char*)plaintext.data(), plaintext.size()); - printf("encrypted symmetric key to send (%dB): \n", ek_len); - BIO_dump_fp(stdout, (const char*)ek, ek_len); - printf("symmetric key iv to send (%dB): \n", MAX_SESSION_KEY_IV_LENGTH); - BIO_dump_fp(stdout, (const char*)iv, MAX_SESSION_KEY_IV_LENGTH); - - int encrypted_block_len = 0; - EVP_SealUpdate(ctx, encrypted, &encrypted_block_len, - (const unsigned char*)plaintext.data(), plaintext.size()); - encrypted_message_len = encrypted_block_len; - printf("encrypt message length update to %d\n", encrypted_message_len); - EVP_SealFinal(ctx, encrypted + encrypted_block_len, &encrypted_block_len); - encrypted_message_len += encrypted_block_len; - printf("encrypt message length update to %d\n", encrypted_message_len); - EVP_CIPHER_CTX_free(ctx); - BIO_free(bio); - EVP_PKEY_free(pkey); - - std::string ret = std::string((char*)encrypted, encrypted_message_len); - ret.append((char*)ek, ek_len); - ret.append((char*)iv, MAX_SESSION_KEY_IV_LENGTH); - - if (ek) free(ek); - - printf("the whole message to send (%ldB): \n", ret.size()); - BIO_dump_fp(stdout, ret.data(), ret.size()); - - // save the encrypted message to file - std::ofstream encrypted_message_file; - encrypted_message_file.open("encrypted_message.txt"); - encrypted_message_file << ret; - encrypted_message_file.close(); - - return ret; -} - -std::string decrypt_response(const std::string& data) { - uint8_t tag[16]; - memcpy(tag, data.data() + data.size() - 16, 16); - uint8_t iv[12]; - memcpy(iv, data.data() + data.size() - 28, 12); - size_t encrypt_len = data.size() - 28; - EVP_CIPHER_CTX* ctx; - /* Create and initialise the context */ - if (!(ctx = EVP_CIPHER_CTX_new())) return ""; - - if (EVP_DecryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, output_key, iv) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - - // BIO_dump_fp(stdout, (const char*) data.data(), data.size()-32); - printf("encrypted data (%ldB): \n", encrypt_len); - BIO_dump_fp(stdout, (const char*)data.data(), encrypt_len); - printf("tag: \n"); - BIO_dump_fp(stdout, (const char*)tag, 16); - - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag); - - uint8_t plaintext[encrypt_len]; - int plaintext_len; - int len; - if (EVP_DecryptUpdate(ctx, plaintext, &len, (const unsigned char*)data.data(), - encrypt_len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - plaintext_len = len; - // printf("so far plaintext len: %d\n", plaintext_len); - - if (EVP_DecryptFinal_ex(ctx, plaintext + len, &len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return ""; - } - plaintext_len += len; - // printf("plaintext (%dB):\n", plaintext_len); - // std::string ret; - // ret.assign((char*)plaintext, plaintext_len); - // printf("%s\n", ret.c_str()); - - EVP_CIPHER_CTX_free(ctx); - return std::string((char*)plaintext, plaintext_len); -} - -unsigned char* rsa_decrypt_data(const unsigned char* data, int len, - const unsigned char* ek, int ek_len, - const unsigned char* iv, - const uint8_t* rsa_private_key, - int rsa_private_key_len, int* output_len) { - printf("iv for decrypt (%dB): \n", MAX_SESSION_KEY_IV_LENGTH); - BIO_dump_fp(stdout, (const char*)iv, MAX_SESSION_KEY_IV_LENGTH); - printf("ek for decrypt (%dB): \n", ENCRYPTED_SESSION_KEY_LENGTH); - BIO_dump_fp(stdout, (const char*)ek, ENCRYPTED_SESSION_KEY_LENGTH); - printf("data to decrypt (%dB): \n", len); - BIO_dump_fp(stdout, (const char*)data, len); - printf("decrypt rsa key (%dB): \n", rsa_private_key_len); - BIO_dump_fp(stdout, (const char*)rsa_private_key, rsa_private_key_len); - - BIO* bio = nullptr; - bio = BIO_new_mem_buf(rsa_private_key, rsa_private_key_len); - RSA* loaded_private_key = NULL; - PEM_read_bio_RSAPrivateKey(bio, &loaded_private_key, NULL, NULL); - EVP_PKEY* pkey = EVP_PKEY_new(); - EVP_PKEY_assign_RSA(pkey, loaded_private_key); - EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new(); - EVP_CIPHER_CTX_init(ctx); - - unsigned char* decrypted = (unsigned char*)malloc(len + EVP_MAX_IV_LENGTH); - EVP_OpenInit(ctx, EVP_aes_256_cbc(), ek, ek_len, iv, pkey); - int decrypted_len = 0; - int decyprtedBlockLen = 0; - EVP_OpenUpdate(ctx, decrypted, &decyprtedBlockLen, data, len); - decrypted_len = decyprtedBlockLen; - EVP_OpenFinal(ctx, decrypted + decrypted_len, &decyprtedBlockLen); - decrypted_len += decyprtedBlockLen; - *output_len = decrypted_len; - // free ctx - EVP_CIPHER_CTX_free(ctx); - BIO_free(bio); - EVP_PKEY_free(pkey); - return decrypted; -} - -std::string decrypt_content(const char* input_data, size_t input_data_size, - std::string* output_key) { - const char* iv = input_data + (input_data_size - MAX_SESSION_KEY_IV_LENGTH); - const char* ek = input_data + (input_data_size - MAX_SESSION_KEY_IV_LENGTH - - ENCRYPTED_SESSION_KEY_LENGTH); - int encrypted_input_len = input_data_size - (MAX_SESSION_KEY_IV_LENGTH + - ENCRYPTED_SESSION_KEY_LENGTH); - // perform decryption - int decrypted_len; - unsigned char* decrypted = rsa_decrypt_data( - (unsigned char*)input_data, encrypted_input_len, (unsigned char*)ek, - ENCRYPTED_SESSION_KEY_LENGTH, (unsigned char*)iv, enclave_private_key, - enclave_private_key_size, &decrypted_len); - printf("decrypted (%dB):\n", decrypted_len); - // BIO_dump_fp(stdout, (const char*) decrypted, decrypted_len); - output_key->assign((char*)decrypted + decrypted_len - OUTPUT_KEY_LENGTH, - OUTPUT_KEY_LENGTH); - std::string ret{(char*)decrypted, decrypted_len - OUTPUT_KEY_LENGTH}; - - if (decrypted) free(decrypted); - return ret; -} - -void encrypt_output(const std::string& output_plain, - const std::string& output_key, - std::string* encrypt_output) { - EVP_CIPHER_CTX* ctx; - if (!(ctx = EVP_CIPHER_CTX_new())) return; - - if (EVP_EncryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, - (const unsigned char*)output_key.data(), - enc_iv_buf) != 1) { - // if(EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, key, iv) != 1) { - EVP_CIPHER_CTX_free(ctx); - return; - } - - unsigned char ciphertext[output_plain.size()]; - - int ciphertext_len; - int len; - if (EVP_EncryptUpdate(ctx, ciphertext, &len, - (const unsigned char*)output_plain.data(), - output_plain.size()) != 1) { - EVP_CIPHER_CTX_free(ctx); - return; - } - ciphertext_len = len; - - if (EVP_EncryptFinal_ex(ctx, ciphertext + len, &len) != 1) { - EVP_CIPHER_CTX_free(ctx); - return; - } - ciphertext_len += len; - - unsigned char tag[AES_GCM_TAG_SIZE] = { - 0, - }; - // tag needed for gcm, not for cbc - EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag); - EVP_CIPHER_CTX_free(ctx); - encrypt_output->assign((char*)ciphertext, ciphertext_len); - encrypt_output->append((char*)enc_iv_buf, AES_GCM_IV_SIZE); - encrypt_output->append((char*)tag, AES_GCM_TAG_SIZE); -} - -void run(const char* input_data, size_t input_data_size, size_t* output_size) { - std::string output_key; - auto decrypt_input = - decrypt_content(input_data, input_data_size, &output_key); - std::string output_plain = "hello" + decrypt_input; - std::string output; - encrypt_output(output_plain, output_key, &output); - memcpy(output_buf, output.data(), output.size()); - actual_output_size = output.size(); - *output_size = actual_output_size; -} - -void get_output(uint8_t* ret, size_t size) { - memcpy(ret, output_buf, actual_output_size); -} - -void execute(const std::string& request, std::string* output) { - size_t output_size; - run(request.data(), request.size(), &output_size); - uint8_t* output_buf = (uint8_t*)malloc(output_size); - get_output(output_buf, output_size); - *output = std::string((char*)output_buf, output_size); - free(output_buf); -} - -void worker_handle(const std::string& request, std::string* output_encoded) { - std::string output; - execute(request, &output); - const char* hex_result = hexstring(output.data(), output.size()); - output_encoded->assign(hex_result, strlen(hex_result)); -} - -int main(int argc, char* argv[]) { - std::cout << "Program starts at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - - std::string sample_request_path; - std::string sample_request; - - if (argc == 2) { - sample_request = argv[1]; - printf("sample request: %s\n", sample_request.c_str()); - } else { - fprintf(stderr, "Usage: sample_request\n"); - exit(1); - } - - // test enclave recreate - // for (int j = 0; j < 2; j++) { - std::string key = get_key_and_report(); - - // prepare the sample request as described below - // expect the input should be in the form of |encrytped secret data|encrypted - // session key(256B)|IV(16)| the secret data should consists of |input|output - // key(16B)| - printf("check input: %s, %ld\n", sample_request.c_str(), - sample_request.size()); - printf("check rsa pubkey size: %ld\n", sample_request.size()); - std::string prepared_sample = encrypt_request(sample_request, key); - - // test subsequent - std::string out; - // worker.Handle(0, prepared_sample, &out); - worker_handle(prepared_sample, &out); - std::string outdata = hex_decode(out.data(), out.size()); - outdata = decrypt_response(outdata); - printf("decoded: %s", outdata.c_str()); - printf("{\"msg\": \"id-%d, %s\"}\n", 0, outdata.c_str()); - - if (enclave_public_key) { - free(enclave_public_key); - enclave_public_key = nullptr; - } - if (enclave_private_key) { - free(enclave_private_key); - enclave_private_key = nullptr; - } - // } - return 0; -} diff --git a/sdk/example_containers/sgx-task/src/worker/untrusted/ocall_patches.cc b/sdk/example_containers/sgx-task/src/worker/untrusted/ocall_patches.cc deleted file mode 100644 index ff1ab0c..0000000 --- a/sdk/example_containers/sgx-task/src/worker/untrusted/ocall_patches.cc +++ /dev/null @@ -1,32 +0,0 @@ -/** - * implement the ocalls defined in worker edl files. - * packaged into libworker.a - */ - -#include -#include -#include -#include - -#include "Enclave_u.h" // include this helps to solve the undefined reference error for ocalls. Enclave_u.c is compiled as c. - -void ocall_debug_print(const void* s, size_t len) { - assert(len < INT_MAX); - printf("DEBUG PRINT: %.*s\n", (int)len, (const char*)s); -} -void ocall_debug_print_string(const char* s) { printf("DEBUG PRINT: %s\n", s); } -void ocall_debug_print_hexstring(const char* s) { - printf("DEBUG PRINT (hex): "); - for (unsigned int i = 0; i < strlen(s); i++) { - printf("%02hhx", (unsigned char)s[i]); - } - printf("\n"); -} -void ocall_debug_print_hex(const void* s, size_t len) { - printf("DEBUG PRINT (hex): "); - auto it = (const unsigned char*)s; - for (unsigned int i = 0; i < len; i++) { - printf("%02hhx", *(it++)); - } - printf("\n"); -} diff --git a/sdk/example_containers/sgx-task/src/worker/untrusted/worker.cc b/sdk/example_containers/sgx-task/src/worker/untrusted/worker.cc deleted file mode 100644 index 38b328e..0000000 --- a/sdk/example_containers/sgx-task/src/worker/untrusted/worker.cc +++ /dev/null @@ -1,192 +0,0 @@ -#include "worker.h" - -#include -#include -#include - -#include "Enclave_u.h" -#include "common/hexutil.h" -#include "sgx_dcap_ql_wrapper.h" -#include "sgx_urts.h" - -// for test -#include -#include -#include - -namespace { -std::mutex debug_log_mutex; -// int PUBKEY_SIZE = 625; -int PUBKEY_SIZE = 215; -} // anonymous namespace - -std::string Worker::GetKeyAndReport(std::string* retKey) { - quote3_error_t qe_error = SGX_QL_SUCCESS; - sgx_target_info_t qe_target_info = { - 0, - }; - sgx_report_t app_report = { - 0, - }; - // prepare target info - qe_error = sgx_qe_get_target_info(&qe_target_info); - if (qe_error != SGX_QL_SUCCESS) { - printf("sgx_qe_get_target_info failed with %d\n", qe_error); - return ""; - } - // create report and key - sgx_status_t retval; - uint8_t* pubkey = (uint8_t*)malloc(PUBKEY_SIZE); - - sgx_status_t status = enc_get_key_and_report( - eid_, &retval, &qe_target_info, &app_report, pubkey, PUBKEY_SIZE); - - if ((status != SGX_SUCCESS) || (retval != SGX_SUCCESS)) { - printf("ecall_get_key_and_report failed with %d - %d\n", status, retval); - return ""; - } - - uint32_t quote_size = 0; - qe_error = sgx_qe_get_quote_size("e_size); - if (qe_error != SGX_QL_SUCCESS) { - printf("sgx_qe_get_quote_size failed with %d\n", qe_error); - return ""; - } - - uint8_t* quote_buf = (uint8_t*)malloc(quote_size); - if (quote_buf == NULL) { - printf("malloc failed for quote buffer %d\n", quote_size); - return ""; - } - memset(quote_buf, 0, quote_size); - - qe_error = sgx_qe_get_quote(&app_report, quote_size, quote_buf); - if (qe_error != SGX_QL_SUCCESS) { - printf("sgx_qe_get_quote failed with %d\n", qe_error); - free(quote_buf); - return ""; - } - - // prepare the return - retKey->assign((char*)pubkey, PUBKEY_SIZE); - if (pubkey) free(pubkey); - return hex_encode(quote_buf, quote_size); -} - -bool Worker::Initialize() { - std::cout << "init started at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - - sgx_launch_token_t t; - int updated = 0; - memset(t, 0, sizeof(sgx_launch_token_t)); - auto sgxStatus = sgx_create_enclave( - enclave_file_name_.c_str(), SGX_DEBUG_FLAG, &t, &updated, &eid_, NULL); - if (sgxStatus != SGX_SUCCESS) { - printf("Failed to create Enclave : error %d - %#x.\n", sgxStatus, - sgxStatus); - return false; - } else - printf("Enclave launched.\n"); - - std::cout << "init finished at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - - initialize_ = true; - return true; -} - -int Worker::Execute(const std::string& request, std::string* output) { - if (!initialize_) return -1; - sgx_status_t retval; - size_t output_size; - - std::cout << "exec started at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - enc_run(eid_, &retval, request.data(), request.size(), &output_size); - if (retval != SGX_SUCCESS) { - printf("Failed to run task execution : error %d - %#x.\n", retval, retval); - *output = "failed on task execution " + std::to_string(retval); - return -1; - } - - uint8_t* output_buf = (uint8_t*)malloc(output_size); - if (output_buf == NULL) { - printf("malloc failed for output buffer %ld\n", output_size); - return -1; - } - - enc_get_output(eid_, &retval, output_buf, output_size); - if (retval != SGX_SUCCESS) { - printf("Failed to get encrypted result : error %d - %#x.\n", retval, - retval); - *output = "failed on encrypted result " + std::to_string(retval); - return -1; - } - *output = std::string((char*)output_buf, output_size); - free(output_buf); - - { - std::lock_guard lg(debug_log_mutex); - std::cout << "result done at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - } - - return 0; -} - -void Worker::Close() { - if (closed_) return; - closed_ = true; - initialize_ = false; - sgx_status_t ret = enc_clear_exec_context(eid_); - printf("returned status from close %d\n", ret); - assert(ret == SGX_SUCCESS); - ret = sgx_destroy_enclave(eid_); - assert(ret == SGX_SUCCESS); -} - -bool Worker::Handle(uint64_t handle_id, const std::string& request, - std::string* output_encoded) { - auto msg_prefix = "[id-" + std::to_string(handle_id) + "]"; - -#ifndef NDEBUG - { - std::lock_guard lg(debug_log_mutex); - std::cout << msg_prefix << " request handle start at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - } -#endif // NDEBUG - - std::string output; - auto ret = Execute(request, &output); - -#ifndef NDEBUG - { - std::lock_guard lg(debug_log_mutex); - - std::cout << msg_prefix << "done at: " - << std::chrono::system_clock::now().time_since_epoch() / - std::chrono::microseconds(1) - << "\n"; - } -#endif // NDEBUG - - const char* hex_result = hexstring(output.data(), output.size()); - if (ret == 0) { - output_encoded->append(hex_result, strlen(hex_result)); - } else - *output_encoded = output; - - return ret; -} diff --git a/sdk/example_containers/sgx-task/src/worker/untrusted/worker.h b/sdk/example_containers/sgx-task/src/worker/untrusted/worker.h deleted file mode 100644 index 5e69f20..0000000 --- a/sdk/example_containers/sgx-task/src/worker/untrusted/worker.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef MECA_SGX_WORKER_U_WORKER_H_ -#define MECA_SGX_WORKER_U_WORKER_H_ - -#include - -#include "sgx_urts.h" - -class Worker { - public: - Worker(const std::string& enclave_file_name) - : initialize_(false), - closed_(false), - enclave_file_name_(std::move(enclave_file_name)), - eid_(0) {} - - ~Worker() { Close(); } - - // delete copy and move constructors and assigment operators - Worker(const Worker&) = delete; - Worker& operator=(const Worker&) = delete; - Worker(Worker&&) = delete; - Worker& operator=(Worker&&) = delete; - - bool Initialize(); - - std::string GetKeyAndReport(std::string* key); - - bool Handle(uint64_t handle_id, const std::string& sample_request, - std::string* output); - - /** - * @brief execute the inference request in the worker managed enclave. - * - * @param request : user request - * @return int : 0 for success; -1 for failure - */ - int Execute(const std::string& request, std::string* output); - - void Close(); - - private: - bool initialize_; - bool closed_; - const std::string enclave_file_name_; - sgx_enclave_id_t eid_; -}; - -#endif // MECA_SGX_WORKER_U_WORKER_H_ diff --git a/sdk/example_containers/stablediffusion/Dockerfile b/sdk/example_containers/stablediffusion/Dockerfile deleted file mode 100644 index 9545f79..0000000 --- a/sdk/example_containers/stablediffusion/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM python:3.9.0 -WORKDIR /app -COPY /src/requirements.txt requirements.txt -RUN pip install openvino-dev[onnx,pytorch]==2022.3.0 -RUN pip install -r requirements.txt -RUN pip install flask -RUN pip install flask-restful -RUN apt-get update && apt-get install -y python3-opencv -RUN pip install opencv-python -RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash -RUN apt-get install -y git-lfs -RUN git lfs install -RUN git clone https://huggingface.co/bes-dev/stable-diffusion-v1-4-openvino -COPY /src . -EXPOSE 8080 -CMD ["flask", "run", "--host=0.0.0.0", "--port=8080"] - diff --git a/sdk/example_containers/stablediffusion/config.json b/sdk/example_containers/stablediffusion/config.json deleted file mode 100644 index 09b9d34..0000000 --- a/sdk/example_containers/stablediffusion/config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "resource": { - "cpu": 16, - "mem": 32000 - } -} diff --git a/sdk/example_containers/stablediffusion/description.txt b/sdk/example_containers/stablediffusion/description.txt deleted file mode 100644 index b08230d..0000000 --- a/sdk/example_containers/stablediffusion/description.txt +++ /dev/null @@ -1 +0,0 @@ -This app serves HTTP requests to do text-to-image generation using Stable Diffusion on Intel CPU. diff --git a/sdk/example_containers/stablediffusion/example_input.bin b/sdk/example_containers/stablediffusion/example_input.bin deleted file mode 100644 index 4c69dad..0000000 --- a/sdk/example_containers/stablediffusion/example_input.bin +++ /dev/null @@ -1 +0,0 @@ -'{\"prompt\": \"cute cat 4k, high-res, masterpiece, best quality, soft lighting, dynamic angle\", \"num_inference_steps\": 8}' diff --git a/sdk/example_containers/stablediffusion/example_output.bin b/sdk/example_containers/stablediffusion/example_output.bin deleted file mode 100644 index d92340b..0000000 Binary files a/sdk/example_containers/stablediffusion/example_output.bin and /dev/null differ diff --git a/sdk/example_containers/stablediffusion/name.txt b/sdk/example_containers/stablediffusion/name.txt deleted file mode 100644 index 95c3537..0000000 --- a/sdk/example_containers/stablediffusion/name.txt +++ /dev/null @@ -1 +0,0 @@ -stable_diffusion_example diff --git a/sdk/example_containers/stablediffusion/src/LICENSE b/sdk/example_containers/stablediffusion/src/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/sdk/example_containers/stablediffusion/src/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/sdk/example_containers/stablediffusion/src/LICENSE_MODEL b/sdk/example_containers/stablediffusion/src/LICENSE_MODEL deleted file mode 100644 index 928aa73..0000000 --- a/sdk/example_containers/stablediffusion/src/LICENSE_MODEL +++ /dev/null @@ -1,82 +0,0 @@ -Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors - -CreativeML Open RAIL-M -dated August 22, 2022 - -Section I: PREAMBLE - -Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation. - -Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations. - -In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation. - -Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI. - -This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model. - -NOW THEREFORE, You and Licensor agree as follows: - -1. Definitions - -- "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document. -- "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License. -- "Output" means the results of operating a Model as embodied in informational content resulting therefrom. -- "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material. -- "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model. -- "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any. -- "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access. -- "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model. -- "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator. -- "Third Parties" means individuals or legal entities that are not under common control with Licensor or You. -- "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." -- "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model. - -Section II: INTELLECTUAL PROPERTY RIGHTS - -Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model. -3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed. - -Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION - -4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions: -Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material. -You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License; -You must cause any modified files to carry prominent notices stating that You changed the files; -You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model. -You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License. -5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5). -6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License. - -Section IV: OTHER PROVISIONS - -7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model. -8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. -9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License. -10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. -11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. -12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. - -END OF TERMS AND CONDITIONS - - - - -Attachment A - -Use Restrictions - -You agree not to use the Model or Derivatives of the Model: -- In any way that violates any applicable national, federal, state, local or international law or regulation; -- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way; -- To generate or disseminate verifiably false information and/or content with the purpose of harming others; -- To generate or disseminate personal identifiable information that can be used to harm an individual; -- To defame, disparage or otherwise harass others; -- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation; -- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics; -- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm; -- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories; -- To provide medical advice and medical results interpretation; -- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use). \ No newline at end of file diff --git a/sdk/example_containers/stablediffusion/src/README.md b/sdk/example_containers/stablediffusion/src/README.md deleted file mode 100644 index 7130aca..0000000 --- a/sdk/example_containers/stablediffusion/src/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# stable_diffusion.openvino - -Implementation of Text-To-Image generation using Stable Diffusion on Intel CPU or GPU. -

- -

- -## Requirements - -* Linux, Windows, MacOS -* Python <= 3.9.0 -* CPU or GPU compatible with OpenVINO. - -## Install requirements - -* Set up and update PIP to the highest version -* Install OpenVINO™ Development Tools 2022.3.0 release with PyPI -* Download requirements - -```bash -python -m pip install --upgrade pip -pip install openvino-dev[onnx,pytorch]==2022.3.0 -pip install -r requirements.txt -``` - -## Generate image from text description - -```bash -usage: demo.py [-h] [--model MODEL] [--device DEVICE] [--seed SEED] [--beta-start BETA_START] [--beta-end BETA_END] [--beta-schedule BETA_SCHEDULE] - [--num-inference-steps NUM_INFERENCE_STEPS] [--guidance-scale GUIDANCE_SCALE] [--eta ETA] [--tokenizer TOKENIZER] [--prompt PROMPT] [--params-from PARAMS_FROM] - [--init-image INIT_IMAGE] [--strength STRENGTH] [--mask MASK] [--output OUTPUT] - -optional arguments: - -h, --help show this help message and exit - --model MODEL model name - --device DEVICE inference device [CPU, GPU] - --seed SEED random seed for generating consistent images per prompt - --beta-start BETA_START - LMSDiscreteScheduler::beta_start - --beta-end BETA_END LMSDiscreteScheduler::beta_end - --beta-schedule BETA_SCHEDULE - LMSDiscreteScheduler::beta_schedule - --num-inference-steps NUM_INFERENCE_STEPS - num inference steps - --guidance-scale GUIDANCE_SCALE - guidance scale - --eta ETA eta - --tokenizer TOKENIZER - tokenizer - --prompt PROMPT prompt - --params-from PARAMS_FROM - Extract parameters from a previously generated image. - --init-image INIT_IMAGE - path to initial image - --strength STRENGTH how strong the initial image should be noised [0.0, 1.0] - --mask MASK mask of the region to inpaint on the initial image - --output OUTPUT output image name -``` - -## Examples - -### Example Text-To-Image -```bash -python demo.py --prompt "Street-art painting of Emilia Clarke in style of Banksy, photorealism" -``` - -### Example Image-To-Image -```bash -python demo.py --prompt "Photo of Emilia Clarke with a bright red hair" --init-image ./data/input.png --strength 0.5 -``` - -### Example Inpainting -```bash -python demo.py --prompt "Photo of Emilia Clarke with a bright red hair" --init-image ./data/input.png --mask ./data/mask.png --strength 0.5 -``` - -## Performance - -| CPU | Time per iter | Total time | -|----------------------------------------------------|---------------|------------| -| AMD Ryzen 7 4800H | 4.8 s/it | 2.58 min | -| AMD Ryzen Threadripper 1900X | 5.34 s/it | 2.58 min | -| Intel(R) Core(TM) i7-4790K @ 4.00GHz | 10.1 s/it | 5.39 min | -| Intel(R) Core(TM) i5-8279U | 7.4 s/it | 3.59 min | -| Intel(R) Core(TM) i5-8569U @ 2.8GHz (MBP13-2019) | 6.17 s/it | 3.23 min | -| Intel(R) Core(TM) i7-1165G7 @ 2.80GHz | 7.4 s/it | 3.59 min | -| Intel(R) Core(TM) i7-11800H @ 2.30GHz (16 threads) | 2.9 s/it | 1.54 min | -| Intel(R) Core(TM) i7-1280P @ 1.80GHz (6P/8E) | 5.45 s/it | 2.55 min | -| Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz | 1 s/it | 33 s | -| Intel Arc A770M | 6.64 it/s | 7.53 s | - - -## Acknowledgements - -* Original implementation of Stable Diffusion: https://github.com/CompVis/stable-diffusion -* diffusers library: https://github.com/huggingface/diffusers - -## Disclaimer - -The authors are not responsible for the content generated using this project. -Please, don't use this project to produce illegal, harmful, offensive etc. content. diff --git a/sdk/example_containers/stablediffusion/src/app.py b/sdk/example_containers/stablediffusion/src/app.py deleted file mode 100644 index 66dd8c3..0000000 --- a/sdk/example_containers/stablediffusion/src/app.py +++ /dev/null @@ -1,51 +0,0 @@ -import base64 -from flask import Flask -from flask_restful import reqparse -import demo -from openvino.runtime import Core - -app = Flask(__name__) - - -@app.route('/', methods=['POST']) -def entry_point(): - parser = reqparse.RequestParser() - # pipeline configure - parser.add_argument("model", type=str, default="bes-dev/stable-diffusion-v1-4-openvino", help="model name") - # inference device - parser.add_argument("device", type=str, default="CPU", help=f"inference device [{', '.join(Core().available_devices)}]") - # randomizer params - parser.add_argument("seed", type=int, default=None, help="random seed for generating consistent images per prompt") - # scheduler params - parser.add_argument("beta_start", type=float, default=0.00085, help="LMSDiscreteScheduler::beta_start") - parser.add_argument("beta_end", type=float, default=0.012, help="LMSDiscreteScheduler::beta_end") - parser.add_argument("beta_schedule", type=str, default="scaled_linear", help="LMSDiscreteScheduler::beta_schedule") - # diffusion params - parser.add_argument("num_inference_steps", type=int, default=32, help="num inference steps") - parser.add_argument("guidance_scale", type=float, default=7.5, help="guidance scale") - parser.add_argument("eta", type=float, default=0.0, help="eta") - # tokenizer - parser.add_argument("tokenizer", type=str, default="openai/clip-vit-large-patch14", help="tokenizer") - # prompt - parser.add_argument("prompt", type=str, default="Street-art painting of Emilia Clarke in style of Banksy, photorealism", help="prompt") - # Parameter re-use: - parser.add_argument("params_from", type=str, required=False, help="Extract parameters from a previously generated image.") - # img2img params - parser.add_argument("init_image", type=str, default=None, help="path to initial image") - parser.add_argument("strength", type=float, default=0.5, help="how strong the initial image should be noised [0.0, 1.0]") - # inpainting - parser.add_argument("mask", type=str, default=None, help="mask of the region to inpaint on the initial image") - # output name - parser.add_argument("output", type=str, default="output.png", help="output image name") - - args = parser.parse_args() - print(args) - - demo.main(args) - - with open(args.output, 'rb') as f: - base64_bytes = base64.b64encode(f.read()) - - return base64_bytes - -print("meca-init-done") diff --git a/sdk/example_containers/stablediffusion/src/data/input.png b/sdk/example_containers/stablediffusion/src/data/input.png deleted file mode 100644 index 14506e3..0000000 Binary files a/sdk/example_containers/stablediffusion/src/data/input.png and /dev/null differ diff --git a/sdk/example_containers/stablediffusion/src/data/mask.png b/sdk/example_containers/stablediffusion/src/data/mask.png deleted file mode 100644 index 146ee1f..0000000 Binary files a/sdk/example_containers/stablediffusion/src/data/mask.png and /dev/null differ diff --git a/sdk/example_containers/stablediffusion/src/data/title.png b/sdk/example_containers/stablediffusion/src/data/title.png deleted file mode 100644 index 96ff67b..0000000 Binary files a/sdk/example_containers/stablediffusion/src/data/title.png and /dev/null differ diff --git a/sdk/example_containers/stablediffusion/src/demo.py b/sdk/example_containers/stablediffusion/src/demo.py deleted file mode 100644 index e93eefe..0000000 --- a/sdk/example_containers/stablediffusion/src/demo.py +++ /dev/null @@ -1,95 +0,0 @@ -# -- coding: utf-8 --` -import argparse -import os -import json -import random -# engine -from stable_diffusion_engine import StableDiffusionEngine -# scheduler -from diffusers import LMSDiscreteScheduler, PNDMScheduler -# utils -import cv2 -import numpy as np -from openvino.runtime import Core -import time - -engine = None - -def start(args): - global engine - if args.seed is None: - args.seed = random.randint(0, 2**30) - np.random.seed(args.seed) - if args.init_image is None: - print(time.time()) - scheduler = LMSDiscreteScheduler( - beta_start=args.beta_start, - beta_end=args.beta_end, - beta_schedule=args.beta_schedule, - tensor_format="np" - ) - else: - print(time.time()) - scheduler = PNDMScheduler( - beta_start=args.beta_start, - beta_end=args.beta_end, - beta_schedule=args.beta_schedule, - skip_prk_steps = True, - tensor_format="np" - ) - print(time.time()) - engine = StableDiffusionEngine( - model=args.model, - scheduler=scheduler, - tokenizer=args.tokenizer, - device=args.device - ) - print(time.time()) - -def main(args): - global engine - if engine is None: - start(args) - print(time.time()) - image = engine( - prompt=args.prompt, - init_image=None if args.init_image is None else cv2.imread(args.init_image), - mask=None if args.mask is None else cv2.imread(args.mask, 0), - strength=args.strength, - num_inference_steps=args.num_inference_steps, - guidance_scale=args.guidance_scale, - eta=args.eta - ) - cv2.imwrite(args.output, image) - -# if __name__ == "__main__": -# parser = argparse.ArgumentParser() -# # pipeline configure -# parser.add_argument("--model", type=str, default="bes-dev/stable-diffusion-v1-4-openvino", help="model name") -# # inference device -# parser.add_argument("--device", type=str, default="CPU", help=f"inference device [{', '.join(Core().available_devices)}]") -# # randomizer params -# parser.add_argument("--seed", type=int, default=None, help="random seed for generating consistent images per prompt") -# # scheduler params -# parser.add_argument("--beta-start", type=float, default=0.00085, help="LMSDiscreteScheduler::beta_start") -# parser.add_argument("--beta-end", type=float, default=0.012, help="LMSDiscreteScheduler::beta_end") -# parser.add_argument("--beta-schedule", type=str, default="scaled_linear", help="LMSDiscreteScheduler::beta_schedule") -# # diffusion params -# parser.add_argument("--num-inference-steps", type=int, default=32, help="num inference steps") -# parser.add_argument("--guidance-scale", type=float, default=7.5, help="guidance scale") -# parser.add_argument("--eta", type=float, default=0.0, help="eta") -# # tokenizer -# parser.add_argument("--tokenizer", type=str, default="openai/clip-vit-large-patch14", help="tokenizer") -# # prompt -# parser.add_argument("--prompt", type=str, default="Street-art painting of Emilia Clarke in style of Banksy, photorealism", help="prompt") -# # Parameter re-use: -# parser.add_argument("--params-from", type=str, required=False, help="Extract parameters from a previously generated image.") -# # img2img params -# parser.add_argument("--init-image", type=str, default=None, help="path to initial image") -# parser.add_argument("--strength", type=float, default=0.5, help="how strong the initial image should be noised [0.0, 1.0]") -# # inpainting -# parser.add_argument("--mask", type=str, default=None, help="mask of the region to inpaint on the initial image") -# # output name -# parser.add_argument("--output", type=str, default="output.png", help="output image name") -# args = parser.parse_args() -# main(args) diff --git a/sdk/example_containers/stablediffusion/src/requirements.txt b/sdk/example_containers/stablediffusion/src/requirements.txt deleted file mode 100644 index b5c3a54..0000000 --- a/sdk/example_containers/stablediffusion/src/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -numpy==1.19.5 -opencv-python==4.5.5.64 -transformers==4.16.2 -diffusers==0.2.4 -tqdm==4.64.0 -openvino==2022.3.0 -huggingface_hub==0.9.0 -scipy==1.9.0 -streamlit==1.12.0 -watchdog==2.1.9 -ftfy==6.1.1 -streamlit_drawable_canvas==0.9.1 -pillow==9.0.1 -piexif==1.1.3 diff --git a/sdk/example_containers/stablediffusion/src/stable_diffusion_engine.py b/sdk/example_containers/stablediffusion/src/stable_diffusion_engine.py deleted file mode 100644 index 949a10d..0000000 --- a/sdk/example_containers/stablediffusion/src/stable_diffusion_engine.py +++ /dev/null @@ -1,239 +0,0 @@ -import inspect -import numpy as np -# openvino -from openvino.runtime import Core -# tokenizer -from transformers import CLIPTokenizer -# utils -from tqdm import tqdm -from huggingface_hub import hf_hub_download -from diffusers import LMSDiscreteScheduler, PNDMScheduler -import cv2 - - -def result(var): - return next(iter(var.values())) - - -class StableDiffusionEngine: - def __init__( - self, - scheduler, - model="bes-dev/stable-diffusion-v1-4-openvino", - tokenizer="openai/clip-vit-large-patch14", - device="CPU" - ): - self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer) - self.scheduler = scheduler - # models - self.core = Core() - self.core.set_property({'CACHE_DIR': './cache'}) - - if model == "bes-dev/stable-diffusion-v1-4-openvino": - # read from local - self._text_encoder = self.core.read_model( - "stable-diffusion-v1-4-openvino/text_encoder.xml" - ) - self.text_encoder = self.core.compile_model(self._text_encoder, device) - # diffusion - self._unet = self.core.read_model( - "stable-diffusion-v1-4-openvino/unet.xml" - ) - self.unet = self.core.compile_model(self._unet, device) - self.latent_shape = tuple(self._unet.inputs[0].shape)[1:] - # decoder - self._vae_decoder = self.core.read_model( - "stable-diffusion-v1-4-openvino/vae_decoder.xml" - ) - self.vae_decoder = self.core.compile_model(self._vae_decoder, device) - # encoder - self._vae_encoder = self.core.read_model( - "stable-diffusion-v1-4-openvino/vae_encoder.xml" - ) - self.vae_encoder = self.core.compile_model(self._vae_encoder, device) - self.init_image_shape = tuple(self._vae_encoder.inputs[0].shape)[2:] - return - - # text features - self._text_encoder = self.core.read_model( - hf_hub_download(repo_id=model, filename="text_encoder.xml"), - hf_hub_download(repo_id=model, filename="text_encoder.bin") - ) - self.text_encoder = self.core.compile_model(self._text_encoder, device) - # diffusion - self._unet = self.core.read_model( - hf_hub_download(repo_id=model, filename="unet.xml"), - hf_hub_download(repo_id=model, filename="unet.bin") - ) - self.unet = self.core.compile_model(self._unet, device) - self.latent_shape = tuple(self._unet.inputs[0].shape)[1:] - # decoder - self._vae_decoder = self.core.read_model( - hf_hub_download(repo_id=model, filename="vae_decoder.xml"), - hf_hub_download(repo_id=model, filename="vae_decoder.bin") - ) - self.vae_decoder = self.core.compile_model(self._vae_decoder, device) - # encoder - self._vae_encoder = self.core.read_model( - hf_hub_download(repo_id=model, filename="vae_encoder.xml"), - hf_hub_download(repo_id=model, filename="vae_encoder.bin") - ) - self.vae_encoder = self.core.compile_model(self._vae_encoder, device) - self.init_image_shape = tuple(self._vae_encoder.inputs[0].shape)[2:] - - def _preprocess_mask(self, mask): - h, w = mask.shape - if h != self.init_image_shape[0] and w != self.init_image_shape[1]: - mask = cv2.resize( - mask, - (self.init_image_shape[1], self.init_image_shape[0]), - interpolation = cv2.INTER_NEAREST - ) - mask = cv2.resize( - mask, - (self.init_image_shape[1] // 8, self.init_image_shape[0] // 8), - interpolation = cv2.INTER_NEAREST - ) - mask = mask.astype(np.float32) / 255.0 - mask = np.tile(mask, (4, 1, 1)) - mask = mask[None].transpose(0, 1, 2, 3) - mask = 1 - mask - return mask - - def _preprocess_image(self, image): - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - h, w = image.shape[1:] - if h != self.init_image_shape[0] and w != self.init_image_shape[1]: - image = cv2.resize( - image, - (self.init_image_shape[1], self.init_image_shape[0]), - interpolation=cv2.INTER_LANCZOS4 - ) - # normalize - image = image.astype(np.float32) / 255.0 - image = 2.0 * image - 1.0 - # to batch - image = image[None].transpose(0, 3, 1, 2) - return image - - def _encode_image(self, init_image): - moments = result(self.vae_encoder.infer_new_request({ - "init_image": self._preprocess_image(init_image) - })) - mean, logvar = np.split(moments, 2, axis=1) - std = np.exp(logvar * 0.5) - latent = (mean + std * np.random.randn(*mean.shape)) * 0.18215 - return latent - - def __call__( - self, - prompt, - init_image = None, - mask = None, - strength = 0.5, - num_inference_steps = 32, - guidance_scale = 7.5, - eta = 0.0 - ): - # extract condition - tokens = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True - ).input_ids - text_embeddings = result( - self.text_encoder.infer_new_request({"tokens": np.array([tokens])}) - ) - - # do classifier free guidance - if guidance_scale > 1.0: - tokens_uncond = self.tokenizer( - "", - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True - ).input_ids - uncond_embeddings = result( - self.text_encoder.infer_new_request({"tokens": np.array([tokens_uncond])}) - ) - text_embeddings = np.concatenate((uncond_embeddings, text_embeddings), axis=0) - - # set timesteps - accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) - extra_set_kwargs = {} - offset = 0 - if accepts_offset: - offset = 1 - extra_set_kwargs["offset"] = 1 - - self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) - - # initialize latent latent - if init_image is None: - latents = np.random.randn(*self.latent_shape) - init_timestep = num_inference_steps - else: - init_latents = self._encode_image(init_image) - init_timestep = int(num_inference_steps * strength) + offset - init_timestep = min(init_timestep, num_inference_steps) - timesteps = np.array([[self.scheduler.timesteps[-init_timestep]]]).astype(np.long) - noise = np.random.randn(*self.latent_shape) - latents = self.scheduler.add_noise(init_latents, noise, timesteps)[0] - - if init_image is not None and mask is not None: - mask = self._preprocess_mask(mask) - else: - mask = None - - # if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas - if isinstance(self.scheduler, LMSDiscreteScheduler): - latents = latents * self.scheduler.sigmas[0] - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - t_start = max(num_inference_steps - init_timestep + offset, 0) - for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])): - # expand the latents if we are doing classifier free guidance - latent_model_input = np.stack([latents, latents], 0) if guidance_scale > 1.0 else latents[None] - if isinstance(self.scheduler, LMSDiscreteScheduler): - sigma = self.scheduler.sigmas[i] - latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5) - - # predict the noise residual - noise_pred = result(self.unet.infer_new_request({ - "latent_model_input": latent_model_input, - "t": np.float64(t), - "encoder_hidden_states": text_embeddings - })) - - # perform guidance - if guidance_scale > 1.0: - noise_pred = noise_pred[0] + guidance_scale * (noise_pred[1] - noise_pred[0]) - - # compute the previous noisy sample x_t -> x_t-1 - if isinstance(self.scheduler, LMSDiscreteScheduler): - latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs)["prev_sample"] - else: - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"] - - # masking for inapinting - if mask is not None: - init_latents_proper = self.scheduler.add_noise(init_latents, noise, t) - latents = ((init_latents_proper * mask) + (latents * (1 - mask)))[0] - - image = result(self.vae_decoder.infer_new_request({ - "latents": np.expand_dims(latents, 0) - })) - - # convert tensor to opencv's image format - image = (image / 2 + 0.5).clip(0, 1) - image = (image[0].transpose(1, 2, 0)[:, :, ::-1] * 255).astype(np.uint8) - return image diff --git a/sdk/js/.npmignore b/sdk/js/.npmignore deleted file mode 100644 index 765b7bf..0000000 --- a/sdk/js/.npmignore +++ /dev/null @@ -1 +0,0 @@ -consumer.js \ No newline at end of file diff --git a/sdk/js/api.js b/sdk/js/api.js deleted file mode 100644 index af58a10..0000000 --- a/sdk/js/api.js +++ /dev/null @@ -1,36 +0,0 @@ -const io = require('socket.io-client'); - -async function initiateConnection(containerRef, callbackOnReceive) { - return new Promise((resolve, reject) => { - const socket = io('http://localhost:3000', {query: {containerRef}}); - socket.on('connect', () => { - console.log('Connected to server'); - }); - socket.on('connect_error', (error) => { - console.error('Error connecting to server:', error); - reject(error); - }); - socket.on('disconnect', () => { - console.log('Disconnected from server'); - }); - socket.on('job-results-received', (id, result) => { - console.log('Received result:', result, 'for job:', id); - callbackOnReceive(id, result); - }); - socket.on('registered', (registered) => { - console.log('Registered with server: ', registered); - if (!registered) { - reject('Container not registered'); - } - resolve(socket); - }) - }); -} - -async function offloadTask(task, callback) { // publish job - console.log('Offloading task...'); - socket.emit('offload', task, callback); -} - -exports.connectAndListen = connectAndListen; -exports.offloadTask = offloadTask; diff --git a/sdk/js/package-lock.json b/sdk/js/package-lock.json deleted file mode 100644 index 445dee2..0000000 --- a/sdk/js/package-lock.json +++ /dev/null @@ -1,116 +0,0 @@ -{ - "name": "meca_connector", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "meca_connector", - "version": "1.0.0", - "license": "ISC", - "dependencies": { - "socket.io-client": "^4.7.1" - } - }, - "node_modules/@socket.io/component-emitter": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz", - "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==" - }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/engine.io-client": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.5.1.tgz", - "integrity": "sha512-hE5wKXH8Ru4L19MbM1GgYV/2Qo54JSMh1rlJbfpa40bEWkCKNo3ol2eOtGmowcr+ysgbI7+SGL+by42Q3pt/Ng==", - "dependencies": { - "@socket.io/component-emitter": "~3.1.0", - "debug": "~4.3.1", - "engine.io-parser": "~5.1.0", - "ws": "~8.11.0", - "xmlhttprequest-ssl": "~2.0.0" - } - }, - "node_modules/engine.io-parser": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.1.0.tgz", - "integrity": "sha512-enySgNiK5tyZFynt3z7iqBR+Bto9EVVVvDFuTT0ioHCGbzirZVGDGiQjZzEp8hWl6hd5FSVytJGuScX1C1C35w==", - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/socket.io-client": { - "version": "4.7.1", - "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-4.7.1.tgz", - "integrity": "sha512-Qk3Xj8ekbnzKu3faejo4wk2MzXA029XppiXtTF/PkbTg+fcwaTw1PlDrTrrrU4mKoYC4dvlApOnSeyLCKwek2w==", - "dependencies": { - "@socket.io/component-emitter": "~3.1.0", - "debug": "~4.3.2", - "engine.io-client": "~6.5.1", - "socket.io-parser": "~4.2.4" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/socket.io-parser": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.4.tgz", - "integrity": "sha512-/GbIKmo8ioc+NIWIhwdecY0ge+qVBSMdgxGygevmdHj24bsfgtCmcUUcQ5ZzcylGFHsN3k4HB4Cgkl96KVnuew==", - "dependencies": { - "@socket.io/component-emitter": "~3.1.0", - "debug": "~4.3.1" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/ws": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz", - "integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xmlhttprequest-ssl": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-2.0.0.tgz", - "integrity": "sha512-QKxVRxiRACQcVuQEYFsI1hhkrMlrXHPegbbd1yn9UHOmRxY+si12nQYzri3vbzt8VdTTRviqcKxcyllFas5z2A==", - "engines": { - "node": ">=0.4.0" - } - } - } -} diff --git a/sdk/js/package.json b/sdk/js/package.json deleted file mode 100644 index 839a5b1..0000000 --- a/sdk/js/package.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "name": "meca_connector", - "version": "1.0.0", - "description": "API for using MECA", - "main": "api.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "author": "", - "license": "ISC", - "dependencies": { - "socket.io-client": "^4.7.1" - } -} diff --git a/sdk/knn.py b/sdk/knn.py deleted file mode 100644 index b6ea2e5..0000000 --- a/sdk/knn.py +++ /dev/null @@ -1,106 +0,0 @@ -import asyncio -import json -from py import meca_api -import torch -from dotenv import load_dotenv - -def euclidean_distance(a, b): - return torch.sqrt(torch.sum((a - b) ** 2)) - -def find_k_nearest_neighbors(input_data, dataset_chunk, k): - distances = [(index, euclidean_distance(input_data, data_point)) for index, data_point in enumerate(dataset_chunk)] - distances.sort(key=lambda x: x[1]) - return [index for index, _ in distances[:k]] - -def process_data_chunk(input_data, dataset_chunk, k, chunk_start_index, result_queue): - neighbors = find_k_nearest_neighbors(input_data, dataset_chunk, k) - adjusted_neighbors = [index + chunk_start_index for index in neighbors] - result_queue.put(adjusted_neighbors) - -async def distributed_knn(input_data, dataset, k, num_processes): - num_data_points = len(dataset) - chunk_size = num_data_points // num_processes - - # ==================== MECA OFFLOAD API ==================== - - result_list = [] - - def callback_on_receive(task_id, status, response, err, corr_id): - print("Received in test:", task_id, status, response, err, corr_id) - if status == 0: - print(err, "for task: ", task_id, "corr_id:", corr_id) - try: - result = json.loads(response) - except: - print("Error parsing response:", response) - return - result_list.extend(result) - - try: - await meca_api.initiate_connection() - except Exception as e: - await meca_api.disconnect() - raise SystemExit() - - for i in range(num_processes): - start_index = i * chunk_size - end_index = (i + 1) * chunk_size if i < num_processes - 1 else num_data_points - sliced_dataset = dataset[start_index:end_index].clone().detach() - - await meca_api.offload_task( - task_id=str(i), - container_ref='my_knn_app:latest', - data=json.dumps({ - "dataset": sliced_dataset.tolist(), - "point": input_data.tolist(), - "k": k, - "num_processes": num_processes - }), - resource={ - "cpu": 1, - "memory": 256 - }, - callback=callback_on_receive) - - await meca_api.join(task_timeout=30, join_timeout=30) - - # ==================== ==================== - - distances = [(index, euclidean_distance(input_data, dataset[index])) for index in result_list] - distances.sort(key=lambda x: x[1]) - overall_neighbors = [index for index, _ in distances[:k]] - - return overall_neighbors - -# Example usage: -async def main(): - load_dotenv() - - dataset = torch.Tensor([ - [1, 2], - [3, 4], - [5, 6], - [7, 8], - [9, 10] - ]) - - # Example input data point - input_data = torch.Tensor([2, 3]) - k = 2 - num_processes = 1 - - print("Number of processes:", num_processes) - print("Number of nearest neighbors:", k) - - neighbors = await distributed_knn(input_data, dataset, k, num_processes) - - print("Nearest neighbors indices:", neighbors) - print("Nearest neighbors data points:", dataset[neighbors]) - await meca_api.disconnect() - -if __name__ == '__main__': - try: - asyncio.run(main()) - except KeyboardInterrupt: - print("Program closed by user.") - asyncio.run(meca_api.disconnect()) diff --git a/sdk/py/meca_api.py b/sdk/py/meca_api.py deleted file mode 100644 index d95420d..0000000 --- a/sdk/py/meca_api.py +++ /dev/null @@ -1,114 +0,0 @@ -import json -import asyncio -import socketio -from typing import Callable - -sio = socketio.AsyncClient() - -registered_event = asyncio.Event() -task_events = [] - -@sio.event -async def connect(): - print('Connected to the Socket.IO server') - -@sio.event -async def connect_error(info): - print('Failed to connect to server:', info) - -@sio.event -async def disconnect(): - print('Disconnected from the Socket.IO server') - await sio.emit('disconnect') - -@sio.on('registered') -async def on_registered(registered): - print('Registered with server: ', registered) - if not registered: - print('Failed to register with server') - return - registered_event.set() - -async def initiate_connection(timeout=10): - server_url = 'http://localhost:3001' - try: - await sio.connect(server_url) - except Exception as e: - print("Exception: ", e) - await sio.disconnect() - raise e - try: - await asyncio.wait_for(registered_event.wait(), timeout=timeout) - except asyncio.TimeoutError: - print('Connection timed out') - await sio.disconnect() - raise Exception('Connection timed out') - -async def offload_task( - task_id: str, - container_ref: str, - data: str, - callback: Callable[[str], None], - resource: dict = None, - runtime: str = None, - use_gpu: bool = False, - gpu_count: int = 0, - ) -> str: - print('Offloading task...', container_ref, data) - payload = { - 'task_id': task_id, - 'container_reference': container_ref, - 'content': data, - 'use_gpu': use_gpu, - 'gpu_count': gpu_count, - } - if resource: - payload['resource'] = resource - if runtime: - payload['runtime'] = runtime - - ### - @sio.on('job_results_received') - async def on_job_results_received(status, response, error, task_id, transaction_id): - print('Received result:', response, 'for job:', task_id) - if status == 0: - print('Error:', error) - callback(task_id, status, response, error, transaction_id) - task_events.pop(0).set() - ### - - offloaded_event = asyncio.Event() - @sio.on('offloaded') - async def on_offload(err, result): - if err: - print('Offload failed', err) - else: - print('Offloaded', result) - offloaded_event.set() - - input_json = json.dumps(payload) - try: - await sio.emit('offload', input_json) - await offloaded_event.wait() - task_events.append(asyncio.Event()) - except Exception as e: - print("Exception: ", e) - await sio.disconnect() - -async def disconnect(): - if sio.connected: - await sio.disconnect() - -async def _wait_for_all_tasks(task_timeout=5): - task_events_copy = task_events.copy() - for task_event in task_events_copy: - try: - await asyncio.wait_for(task_event.wait(), timeout=task_timeout) - except asyncio.TimeoutError: - print('A task timed out') - -async def join(task_timeout=5, join_timeout=10): - try: - await asyncio.wait_for(_wait_for_all_tasks(task_timeout), timeout=join_timeout) - except asyncio.TimeoutError: - print('Join timed out') diff --git a/sdk/py/requirements.txt b/sdk/py/requirements.txt deleted file mode 100644 index ef91721..0000000 --- a/sdk/py/requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -aiohttp==3.8.5 -aiosignal==1.3.1 -async-timeout==4.0.2 -attrs==23.1.0 -bidict==0.22.1 -certifi==2023.7.22 -charset-normalizer==3.2.0 -frozenlist==1.4.0 -idna==3.4 -multidict==6.0.4 -python-engineio==4.5.1 -python-socketio==5.8.0 -requests==2.31.0 -urllib3==2.0.4 -yarl==1.9.2 diff --git a/sdk/requirements.txt b/sdk/requirements.txt deleted file mode 100644 index 0c78e74..0000000 --- a/sdk/requirements.txt +++ /dev/null @@ -1,24 +0,0 @@ -aiohttp==3.8.5 -aiosignal==1.3.1 -async-timeout==4.0.2 -attrs==23.1.0 -bidict==0.22.1 -certifi==2023.7.22 -charset-normalizer==3.2.0 -filelock==3.12.2 -frozenlist==1.4.0 -idna==3.4 -Jinja2==3.1.2 -MarkupSafe==2.1.3 -mpmath==1.3.0 -multidict==6.0.4 -networkx==3.1 -numpy==1.25.2 -python-dotenv==1.0.0 -python-engineio==4.5.1 -python-socketio==5.8.0 -requests==2.31.0 -sympy==1.12 -typing_extensions==4.7.1 -urllib3==2.0.4 -yarl==1.9.2 diff --git a/sdk/stablediffusion.py b/sdk/stablediffusion.py deleted file mode 100644 index 8782d6f..0000000 --- a/sdk/stablediffusion.py +++ /dev/null @@ -1,52 +0,0 @@ -from py import meca_api -import asyncio -import base64 -import sys -import os - - -def callback_on_receive(task_id, status, response, err, corr_id): - if status == 1: - try: - os.mkdir('build') - except OSError as error: - print(error) - try: - with open("build/output.png", "wb") as f: - f.write(base64.b64decode(response)) - print("Received result for task", task_id, "at build/output.png") - except: - print("Received result for task", task_id, ":", response) - else: - print(err, "for task: ", task_id, "corr_id:", corr_id) - -async def main(): - args = ' '.join(sys.argv[1:]) - - if args == '': - print("Usage: python3 stablediffusion.py ") - return - - try: - await meca_api.initiate_connection() - except Exception as e: - await meca_api.disconnect() - return - - await meca_api.offload_task( - str(101), - 'sdtest:latest', - args, - callback=callback_on_receive, - resource={'cpu': 8, 'memory': 8192}, - ) - - await meca_api.join(500, 500) - await meca_api.disconnect() - -if __name__ == '__main__': - try: - asyncio.run(main()) - except KeyboardInterrupt: - print("Program closed by user.") - asyncio.run(meca_api.disconnect()) diff --git a/sdk/stablediffusion_ui.py b/sdk/stablediffusion_ui.py deleted file mode 100644 index 2e7dee0..0000000 --- a/sdk/stablediffusion_ui.py +++ /dev/null @@ -1,92 +0,0 @@ -import tkinter as tk -from PIL import Image, ImageTk -from async_tkinter_loop import async_handler, async_mainloop -from py import meca_api -import base64 -import sys -import os - -connected = False - -async def connect(): - global connected - try: - label.config(text="Connecting ...") - await meca_api.initiate_connection() - connected = True - except Exception as e: - await meca_api.disconnect() - sys.exit(1) - -def callback_on_receive(task_id, status, response, err, corr_id): - if status == 1: - try: - os.mkdir('build') - except OSError as error: - print(error) - try: - with open("build/output.png", "wb") as f: - f.write(base64.b64decode(response)) - label.config(text="Received result for task " + task_id + " at output.png") - - pil_image = Image.open('build/output.png') - image = ImageTk.PhotoImage(pil_image) - label.image = image - label.config(image=image, text="") - button.config(state=tk.NORMAL) - except Exception as e: - label.config(text="Received result for task " + task_id + " : " + response + " : " + str(e)) - else: - label.config(text=err + " for task: " + task_id + " corr_id: " + corr_id) - -async def load_image(): - global connected - if not connected: - await connect() - - prompt = prompt_field.get() - steps = steps_field.get() - - button.config(state=tk.DISABLED) - label.config(text="Offoading task ...", image="") - - await meca_api.offload_task( - str(101), - 'sdtest:latest', - '{\"prompt\": \"' + prompt + '\", \"num_inference_steps\": ' + steps + '}', - callback=callback_on_receive, - resource={'cpu': 8, 'memory': 8192}, - ) - - label.config(text="Computing via MECAnywhere ...", image="") - - await meca_api.join(500, 500) - -root = tk.Tk() -root.title("Image generator") -root.geometry("800x640") - -prompt = tk.Label(root, text="prompt") -prompt.pack() - -prompt_field = tk.Entry(root, width=100) -prompt_field.insert(0, "cute cat 4k, high-res, masterpiece, best quality, soft lighting, dynamic angle") -prompt_field.pack(padx=10, pady=10) -prompt_field.focus() - -steps = tk.Label(root, text="inference steps") -steps.pack() - -steps_field = tk.Entry(root, width=100) -steps_field.insert(0, "8") -steps_field.pack(padx=10, pady=10) -steps_field.focus() - -button = tk.Button(root, text="Load image", command=async_handler(load_image)) -button.config(bd=1, bg="#66cdaa", relief="ridge", fg="white", padx=10, pady=10) -button.pack() - -label = tk.Label(root) -label.pack(expand=1, fill=tk.BOTH) - -async_mainloop(root) diff --git a/src/common/channels.js b/src/common/channels.js index cdd2bf0..e6edd4b 100644 --- a/src/common/channels.js +++ b/src/common/channels.js @@ -14,6 +14,8 @@ const Channels = { CHECK_CONTAINER_GPU_SUPPORT_RESPONSE: 'check-container-gpu-support-response', CHECK_CONTAINER_EXIST: 'check-container-exist', CHECK_CONTAINER_EXIST_RESPONSE: 'check-container-exist-response', + STOP_DOCKER_CONTAINER: 'stop-docker-container', + STOP_DOCKER_CONTAINER_RESPONSE: 'stop-docker-container-response', BUILD_IMAGE: 'build-image', BUILD_IMAGE_RESPONSE: 'build-image-response', APP_CLOSE_INITIATED: 'app-close-initiated', @@ -22,10 +24,6 @@ const Channels = { APP_RELOAD_CONFIRMED: 'app-reload-confirmed', JOB_RECEIVED: 'job-received', JOB_RESULTS_RECEIVED: 'job-results-received', - REGISTER_CLIENT: 'register-client', - CLIENT_REGISTERED: 'client-registered', - DEREGISTER_CLIENT: 'deregister-client', - OFFLOAD_JOB: 'offload-job', OPEN_FILE_DIALOG: 'open-file-dialog', OPEN_FOLDER_DIALOG: 'open-folder-dialog', UPLOAD_FILE_TO_IPFS: 'upload-file-to-ipfs', diff --git a/src/main/dockerIntegration.ts b/src/main/dockerIntegration.ts index 0404b9a..e12c92d 100644 --- a/src/main/dockerIntegration.ts +++ b/src/main/dockerIntegration.ts @@ -421,3 +421,36 @@ function getContainerCreateOptions( } return containerOptions; } + +export const stopDockerContainer = ( + event: IpcMainEvent, + containerName: string +) => { + docker.listContainers({ all: true }, (err, containers) => { + if (err) { + console.error('Error listing containers:', err); + event.reply(Channels.STOP_DOCKER_CONTAINER_RESPONSE, false, err.message); + return; + } + + const containerInfo = containers?.find((c) => + c.Names.some((name) => name.includes(containerName)) + ); + if (containerInfo) { + const container = docker.getContainer(containerInfo.Id); + container.stop((err) => { + if (err) { + console.error(err); + event.reply( + Channels.STOP_DOCKER_CONTAINER_RESPONSE, + false, + err.message + ); + } else { + console.log(`Container ${containerName} stopped.`); + event.reply(Channels.STOP_DOCKER_CONTAINER_RESPONSE, true); + } + }); + } + }); +}; diff --git a/src/main/jobs.ts b/src/main/jobs.ts index f0c06fa..f0e3822 100644 --- a/src/main/jobs.ts +++ b/src/main/jobs.ts @@ -1,5 +1,4 @@ import Channels from '../common/channels'; -import { IpcMainEvent } from 'electron'; export const jobResultsReceived = async (mainWindow, event, id, result) => { if (!mainWindow) { @@ -14,37 +13,3 @@ export const jobReceived = async (mainWindow, event, id, result) => { } mainWindow.webContents.send(Channels.JOB_RECEIVED, id, result); }; - -export const onClientRegistered = - (socket) => async (event: IpcMainEvent, registered: boolean) => { - console.log('Client registered: ', registered); - socket.emit('registered', registered); - }; - -export const onJobResultsReceived = - (socket) => - async ( - event: IpcMainEvent, - status: Number, - response: String, - error: String, - taskId: String, - transactionId: String - ) => { - console.log( - 'Sending job results to client... ', - status, - response, - error, - taskId, - transactionId - ); - socket.emit( - 'job_results_received', - status, - response, - error, - taskId, - transactionId - ); - }; diff --git a/src/main/main.ts b/src/main/main.ts index 71db0fa..2dda9e2 100644 --- a/src/main/main.ts +++ b/src/main/main.ts @@ -23,18 +23,14 @@ import { checkContainerExists, checkContainerGPUSupport, buildImage, + stopDockerContainer, } from './dockerIntegration'; import { equalsElectronStore, getElectronStore, setElectronStore, } from './electronStore'; -import { - jobResultsReceived, - jobReceived, - onClientRegistered, - onJobResultsReceived, -} from './jobs'; +import { jobResultsReceived, jobReceived } from './jobs'; import { openFileDialog, @@ -70,33 +66,6 @@ if (isDebug) { require('electron-debug')({ showDevTools: false }); } -const SDK_SOCKET_PORT = process.env.SDK_SOCKET_PORT || 3001; -const io = require('socket.io')(); - -const appDevServer = io.listen(SDK_SOCKET_PORT); - -appDevServer.on('connection', (socket) => { - console.log('A user connected'); - ipcMain.on(Channels.CLIENT_REGISTERED, onClientRegistered(socket)); - ipcMain.on(Channels.JOB_RESULTS_RECEIVED, onJobResultsReceived(socket)); - socket.on('offload', async (jobJson: string) => { - console.log('Received job...', jobJson); - try { - mainWindow?.webContents.send(Channels.OFFLOAD_JOB, jobJson); - socket.emit('offloaded', null, 'success'); - } catch (error) { - socket.emit('offloaded', error, null); - } - }); - socket.on('disconnect', () => { - console.log('A user disconnected'); - mainWindow?.webContents.send(Channels.DEREGISTER_CLIENT); - ipcMain.removeAllListeners(Channels.CLIENT_REGISTERED); - ipcMain.removeListener(Channels.JOB_RESULTS_RECEIVED, onJobResultsReceived); - }); - mainWindow?.webContents.send(Channels.REGISTER_CLIENT); -}); - ipcMain.on(Channels.STORE_GET, getElectronStore); ipcMain.on(Channels.STORE_SET, setElectronStore); @@ -127,6 +96,7 @@ ipcMain.on(Channels.CHECK_DOCKER_DAEMON_RUNNING, checkDockerDaemonRunning); ipcMain.on(Channels.CHECK_CONTAINER_EXIST, checkContainerExists); ipcMain.on(Channels.CHECK_CONTAINER_GPU_SUPPORT, checkContainerGPUSupport); ipcMain.on(Channels.BUILD_IMAGE, buildImage); +ipcMain.on(Channels.STOP_DOCKER_CONTAINER, stopDockerContainer); // ipfs integration ipcMain.on(Channels.OPEN_FILE_DIALOG, openFileDialog); diff --git a/src/main/preload.ts b/src/main/preload.ts index cf605b4..0b3c431 100644 --- a/src/main/preload.ts +++ b/src/main/preload.ts @@ -149,6 +149,22 @@ const electronHandler = { }); }, + stopDockerContainer: (containerName: string) => { + return new Promise((resolve, reject) => { + ipcRenderer.send(Channels.STOP_DOCKER_CONTAINER, containerName); + ipcRenderer.once( + Channels.STOP_DOCKER_CONTAINER_RESPONSE, + (event, success, error) => { + if (success) { + resolve(); + } else { + reject(new Error(error)); + } + } + ); + }); + }, + buildImage: (tag: string, dockerfilePath: string) => { return new Promise((resolve, reject) => { ipcRenderer.send(Channels.BUILD_IMAGE, tag, dockerfilePath); @@ -199,19 +215,6 @@ const electronHandler = { onSubscribeJobResults: (callback: (...args: any[]) => void) => { subscribe(Channels.JOB_RESULTS_RECEIVED, callback); }, - // to client - onRegisterClient: (callback: (...args: any[]) => void) => { - subscribe(Channels.REGISTER_CLIENT, callback); - }, - onOffloadJob: (callback: (...args: any[]) => void) => { - subscribe(Channels.OFFLOAD_JOB, callback); - }, - onDeregisterClient: (callback: (...args: any[]) => void) => { - subscribe(Channels.DEREGISTER_CLIENT, callback); - }, - // from client - clientRegistered: (status: boolean) => - ipcRenderer.send(Channels.CLIENT_REGISTERED, status), removeListener: (channel: string, func: (...args: any[]) => void) => { ipcRenderer.removeListener(channel, func); diff --git a/src/renderer/App.tsx b/src/renderer/App.tsx index a958b07..50fee64 100644 --- a/src/renderer/App.tsx +++ b/src/renderer/App.tsx @@ -29,7 +29,6 @@ import UploadTask from './components/tasks/UploadTask'; import Settings from './components/settings/Settings'; import { getDesignTokens } from './utils/theme'; import useHandleAppExitHook from './utils/useHandleAppExitHook'; -import useClientHooks from './utils/useClientHooks'; import TowerManagement from './components/tower/TowerManagement'; const PrivateRoutes = () => { @@ -156,7 +155,6 @@ const App = () => { ) as PaletteMode; const theme = useMemo(() => createTheme(getDesignTokens(mode)), [mode]); useHandleAppExitHook(); - useClientHooks(); return ( diff --git a/src/renderer/components/billing/BillingDashboard.tsx b/src/renderer/components/billing/BillingDashboard.tsx index b18c5f3..d9b5ca1 100644 --- a/src/renderer/components/billing/BillingDashboard.tsx +++ b/src/renderer/components/billing/BillingDashboard.tsx @@ -69,18 +69,6 @@ const BillingDashboard: React.FC = () => { - {/* Left card */} - - - - {/* Right card */} { - - - Billing History - - diff --git a/src/renderer/components/billing/list/PastBillingList.tsx b/src/renderer/components/billing/list/PastBillingList.tsx index 01d9414..abd22eb 100644 --- a/src/renderer/components/billing/list/PastBillingList.tsx +++ b/src/renderer/components/billing/list/PastBillingList.tsx @@ -24,7 +24,7 @@ const PastBillingList: React.FC<{ groupedData: GroupedDataEntry[] }> = ({ }; const editedData: EditedDataEntry[] = groupedData.map( (item: GroupedDataEntry) => { - const status = Math.random() > 0.5 ? 'Completed' : 'Pending'; + const status = 'Completed'; const [month, year] = item.date.split(' '); const startDate = new Date(`${month} 1, ${year}`); const dueDate = new Date( diff --git a/src/renderer/components/componentsCommon/fetchTransactionHistory.tsx b/src/renderer/components/componentsCommon/fetchTransactionHistory.tsx deleted file mode 100644 index f385161..0000000 --- a/src/renderer/components/componentsCommon/fetchTransactionHistory.tsx +++ /dev/null @@ -1,40 +0,0 @@ -import { - findHostHistory, - findClientHistory, -} from '../../services/TransactionServices'; -import { DataEntry, DataEntryWithoutRole } from '../../utils/dataTypes'; - -function combineHistories( - hostDidHistory: DataEntryWithoutRole[], - clientDidHistory: DataEntryWithoutRole[] -): DataEntry[] { - const hostWithRole = hostDidHistory.map((item) => ({ - ...item, - role: 'host', - })); - const clientWithRole = clientDidHistory.map((item) => ({ - ...item, - role: 'client', - })); - return [...hostWithRole, ...clientWithRole]; -} - -export default async function fetchTransactionHistory( - did: string -): Promise { - try { - const hostDidHistoryResponse = await findHostHistory(did); - const clientDidHistoryResponse = await findClientHistory(did); - const hostDidHistory = await hostDidHistoryResponse?.json(); - const clientDidHistory = await clientDidHistoryResponse?.json(); - return combineHistories(hostDidHistory, clientDidHistory); - } catch (error) { - if (error instanceof Error) { - console.error('There was a problem with the fetching transaction history:', error.message); - throw new Error(`There was a problem with the fetching transaction history: ${error.message}`); - } else { - console.error('Unknown Error:', error); - throw new Error('An unknown error occurred'); - } - } -} diff --git a/src/renderer/components/componentsCommon/handleRegistration.tsx b/src/renderer/components/componentsCommon/handleRegistration.tsx index 761e347..17c15ce 100644 --- a/src/renderer/components/componentsCommon/handleRegistration.tsx +++ b/src/renderer/components/componentsCommon/handleRegistration.tsx @@ -85,20 +85,3 @@ export const handleDeactivateHost = async () => { } } }; - -export const handleRegisterClient = async () => { - const response = await initActor("user"); - if (!response) { - throw new Error('Client registration failed'); - } - log.info('successfully registered as client'); -}; - -export const handleDeregisterClient = async () => { - const response = await closeActor(); - if (response) { - log.info('successfully deregistered as client'); - } else { - throw new Error('Deregistration failed'); - } -}; diff --git a/src/renderer/components/transactions/TxnDashboard.tsx b/src/renderer/components/transactions/TxnDashboard.tsx index 0e263c5..d1b6e21 100644 --- a/src/renderer/components/transactions/TxnDashboard.tsx +++ b/src/renderer/components/transactions/TxnDashboard.tsx @@ -4,7 +4,6 @@ import React, { useEffect, useState } from 'react'; import { Typography, Stack, IconButton } from '@mui/material'; import { motion } from 'framer-motion'; import CircularProgress from '@mui/material/CircularProgress'; -import reduxStore from 'renderer/redux/store'; import RefreshIcon from '@mui/icons-material/Refresh'; import { scrollbarHeight } from 'renderer/utils/constants'; import CustomLineChart from './linechart/CustomLineChart'; @@ -12,7 +11,6 @@ import { DataEntry } from '../../utils/dataTypes'; import { PropConfigList } from './propConfig'; import Datagrid from './datagrid/Datagrid'; import Transitions from '../../utils/Transition'; -import fetchTransactionHistory from '../componentsCommon/fetchTransactionHistory'; import { tablePaginationMinHeight, maxRowHeight, @@ -22,9 +20,7 @@ import { import dummyData from './dummyData'; import ErrorDialog from '../componentsCommon/ErrorDialogue'; - const TxnDashboard: React.FC = () => { - const did = window.electron.store.get('did'); const [data, setData] = useState([]); const [hasData, setHasData] = useState(false); const [isTableExpanded, setIsTableExpanded] = useState(false); @@ -43,9 +39,7 @@ const TxnDashboard: React.FC = () => { const fetchAndSetData = async () => { setIsLoading(true); try { - const transactionHistory = await fetchTransactionHistory( - did - ); + const transactionHistory = dummyData; if (transactionHistory.length > 0) { setHasData(true); } diff --git a/src/renderer/services/OffloadServices.tsx b/src/renderer/services/OffloadServices.tsx deleted file mode 100644 index 5da3dd8..0000000 --- a/src/renderer/services/OffloadServices.tsx +++ /dev/null @@ -1,20 +0,0 @@ -const url = process.env.REGISTRATION_SERVICE_API_URL; - -export default async function offloadTask(payload: any) { - const response = await fetch( - `${url}/offloading/offload_task_and_get_result`, - { - method: 'POST', - headers: { - 'content-type': 'application/json', - }, - body: payload, - }); - - if (!response.ok) { - throw new Error('Network response not ok'); - } - - const data = await response.json(); - return data; -} diff --git a/src/renderer/services/TransactionServices.tsx b/src/renderer/services/TransactionServices.tsx deleted file mode 100644 index aae180b..0000000 --- a/src/renderer/services/TransactionServices.tsx +++ /dev/null @@ -1,91 +0,0 @@ -const transactionUrl = process.env.TRANSACTION_SERVICE_API_URL; - -export async function findClientHistory( - did: string, - retryCount = 0 -) { - try { - const response = await fetch(`${transactionUrl}/find_client_history`, { - method: 'POST', - headers: { - 'content-type': 'application/json', - }, - body: JSON.stringify({ did }), - }); - if (!response.ok) { - if (response.status === 401 && retryCount < 1) { - return findClientHistory(did, retryCount + 1); - } - throw new Error('Network response not ok'); - } - return response; - } catch (error) { - if (error instanceof Error) { - console.error('There was a problem with the fetch operation:', error.message); - throw new Error(`There was a problem with the fetch operation: ${error.message}`); - } else { - console.error('Unknown Error:', error); - throw new Error('An unknown error occurred'); - } - } -} - -export async function findHostHistory( - did: string, - retryCount = 0 -) { - try { - const response = await fetch(`${transactionUrl}/find_host_history`, { - method: 'POST', - headers: { - 'content-type': 'application/json', - }, - body: JSON.stringify({ did }), - }); - if (!response.ok) { - if (response.status === 401 && retryCount < 1) { - return findHostHistory(did, retryCount + 1); - } - throw new Error('Network response not ok'); - } - return response; - } catch (error) { - if (error instanceof Error) { - console.error('There was a problem with the fetch operation:', error.message); - throw new Error(`There was a problem with the fetch operation: ${error.message}`); - } else { - console.error('Unknown Error:', error); - throw new Error('An unknown error occurred'); - } - } -} - -export async function findPoHistory( - did: string, - retryCount = 0 -) { - try { - const response = await fetch(`${transactionUrl}/find_po_history`, { - method: 'POST', - headers: { - 'content-type': 'application/json', - }, - body: JSON.stringify({ did }), - }); - if (!response.ok) { - if (response.status === 401 && retryCount < 1) { - return findPoHistory(did, retryCount + 1); - } - throw new Error('Network response not ok'); - } - return response; - } catch (error) { - if (error instanceof Error) { - console.error('There was a problem with the fetch operation:', error.message); - throw new Error(`There was a problem with the fetch operation: ${error.message}`); - } else { - console.error('Unknown Error:', error); - throw new Error('An unknown error occurred'); - } - } -} diff --git a/src/renderer/utils/useClientHooks.tsx b/src/renderer/utils/useClientHooks.tsx deleted file mode 100644 index 7a5f5bb..0000000 --- a/src/renderer/utils/useClientHooks.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import { useEffect } from 'react'; -import { - handleRegisterClient, - handleDeregisterClient, -} from 'renderer/components/componentsCommon/handleRegistration'; -import offloadTask from 'renderer/services/OffloadServices'; -import { useSelector } from 'react-redux'; -import { RootState } from 'renderer/redux/store'; -import Channels from 'common/channels'; - -const useClientHooks = () => { - const isClient = useSelector( - (state: RootState) => state.userReducer.authenticated - ); - - useEffect(() => { - const registerClient = async () => { - try { - await handleRegisterClient(); - window.electron.clientRegistered(true); - } catch (error) { - console.log(error); - window.electron.clientRegistered(false); - } - }; - - const deregisterClient = async () => { - await handleDeregisterClient(); - }; - - window.electron.onRegisterClient(registerClient); - window.electron.onDeregisterClient(deregisterClient); - }, []); - - useEffect(() => { - if (!isClient) return () => {}; - - // const updateJob = (id: string, status: string) => { - // actions.addJob(id, status); - // }; - - // const updateJobResults = (id: string, result: string) => { - // actions.addJobResults(id, result); - // }; - const did = window.electron.store.get('did'); - - const offloadJob = async (jobJson: string) => { - // post task on chain - - const jobJsonWithDid = `${jobJson.slice(0, -1)},"did":"${did}"}`; - const reply = await offloadTask(jobJsonWithDid); - const { status, response, error, task_id, transaction_id } = reply; - - // window.electron.jobResultsReceived( - // status, - // response, - // error, - // task_id, - // transaction_id - // ); - // actions.addJob(id, status); - }; - - // window.electron.onSubscribeJobs(updateJob); - // window.electron.onSubscribeJobResults(updateJobResults); - window.electron.onOffloadJob(offloadJob); - - return () => { - // window.electron.removeListener(Channels.JOB_RECEIVED, updateJob); - // window.electron.removeListener(Channels.JOB_RESULTS_RECEIVED, updateJobResults); - window.electron.removeListener(Channels.OFFLOAD_JOB, offloadJob); - }; - }, [isClient]); -}; - -export default useClientHooks; diff --git a/src/renderer/utils/useHandleAppExitHook.tsx b/src/renderer/utils/useHandleAppExitHook.tsx index f5dc299..939ffed 100644 --- a/src/renderer/utils/useHandleAppExitHook.tsx +++ b/src/renderer/utils/useHandleAppExitHook.tsx @@ -1,11 +1,13 @@ // import log from 'electron-log/renderer'; import { useEffect } from 'react'; import { stopExecutor } from 'renderer/services/ExecutorServices'; +import { ContainerName } from 'common/dockerNames'; import { handleDeactivateHost } from '../components/componentsCommon/handleRegistration'; const handleAppExit = async () => { - await stopExecutor(); await handleDeactivateHost(); + await window.electron.stopDockerContainer(ContainerName.PYMECA_SERVER_1); + await stopExecutor(); }; const useHandleAppExitHook = () => {