From 6db4df7f3fc38a2c4e78959226ca8757bea94ec9 Mon Sep 17 00:00:00 2001 From: Ankica Barisic <156663621+ankicabarisic@users.noreply.github.com> Date: Mon, 19 Aug 2024 09:43:45 +0200 Subject: [PATCH] Doc/deployment (#86) * SAL installation * SAL usage * SAL restart, deployment validation and debugging * table of content * update docker-compose.yaml * update docker-compose.yaml * comments addressed * comments addressed * spotless apply --- README.md | 495 ++++++++++++++++++++++++++++++++++--- docker/docker-compose.yaml | 2 + documentation/README.md | 94 +------ 3 files changed, 461 insertions(+), 130 deletions(-) diff --git a/README.md b/README.md index 160123f..e4f214d 100644 --- a/README.md +++ b/README.md @@ -8,98 +8,519 @@ Scheduling Abstraction Layer (SAL) is an abstraction layer initially developed a ## Table of Contents -- [Introduction](#introduction) -- [Installation](#installation) -- [Usage](#usage) -- [Configuration](#configuration) -- [Endpoints](#endpoints) -- [Contributing](#contributing) -- [License](#license) +- [1. Introduction](#1-introduction) +- [2. Installation](#2-installation) +- [3. Usage](#3-usage) +- [4. Contributing](#4-contributing) +- [5. License](#5-license) -## Introduction +## 1. Introduction SAL is a project initially developed under the Morphemic project, part of the EU's Horizon 2020 initiative. Its development continued through the Nebulous EU project, part of Horizon Europe inititative. It offers an abstraction layer on top of the ProActive Scheduler & Resource Manager, making it easier for users to interact with the scheduler and take advantage of its features. Seamlessly supporting REST calls and direct communication with the Proactive API, SAL empowers users to harness the scheduler's capabilities. Whether you want to use SAL as a microservice or deploy it as a Docker container, this repository provides the necessary resources to get you started. -## Installation +## 2. Installation -SAL can be used either as a standalone microservice or as a Docker container. Choose the approach that best suits your requirements. +SAL can be deployed in several ways: as a standalone microservice, within a Docker container, or as a Kubernetes pod. Below are the detailed instructions for each deployment method. -### As Microservice +### 2.1. Deploying SAL as a Standalone Microservice +In this deployment approach, SAL runs directly on the host system using a Java runtime environment. Management is handled manually, meaning you control the environment, dependencies, and configurations. However, this method is limited by the capabilities of the host system, and scaling requires manually setting up additional instances. The deployment relies on the host's network settings, with external access and load balancing also requiring manual setup. This approach is suitable for development, small-scale deployments, or when direct control over the runtime environment is necessary. +#### 2.1.1. Build and Run the Microservice To use SAL as a microservice, follow these steps: -1. Clone the repository: +1. Clone the SAL repository: ```bash +# pull the SAL project git clone https://github.com/ow2-proactive/scheduling-abstraction-layer.git + +# go to your SAL folder cd scheduling-abstraction-layer ``` 2. Build the microservice: ```bash -./gradlew clean build +# Apply Spotless code formatting, clean the project, and build using Gradle +./gradlew spotlessApply clean build --refresh-dependencies + +# Use the '-x test' flag at the end to skip running tests during the build process ``` +The generated `.war` file will be located at: `scheduling-abstraction-layer/sal-service/build/libs/scheduling-abstraction-layer-xxx.war`. + +3. Run the Microservice: +```bash +./gradlew bootRun +``` +This command starts SAL as microservice on default port `8080` on your host. + +#### 2.1.2. Client Library + +The `sal-common` Java library provides class definitions for SAL concepts.  It can be added to gradle projects by adding the following into `build.gradle`: + +```groovy +repositories { + + maven { + url 'http://repository.activeeon.com/content/groups/proactive/' + allowInsecureProtocol = true + } +} +dependencies { + // SAL client library + implementation 'org.ow2.proactive:sal-common:13.1.0-SNAPSHOT' +} +``` + +### 2.2. Deploying SAL as a Docker Container +In this deployment approach, SAL runs inside a Docker container, providing a consistent environment across different systems. Management is handled via Docker commands or Docker Compose, with containerization isolating the application and its dependencies. While SAL can scale across multiple containers on the same machine, scalability is limited to a single-node setup unless additional tools are utilized. Docker manages networking, though more complex configurations may require manual setup. This method is ideal for consistent deployment across various environments, easier distribution, and meeting basic scalability needs. + +SAL can be deployed as a Docker container either by using a pre-built image or by building your own. -### As Docker Container -To use SAL as a Docker container, pull the public Docker image from DockerHub: +#### 2.2.1. Using Pre-Built SAL Docker Images + +You can pull the latest or a specific version of the SAL Docker image from remote Docker repository [DockerHub](https://hub.docker.com/r/activeeon/sal/tags): +- `activeeon/sal:dev`: The latest daily release of SAL. +- `activeeon/sal:dev-YYYY-MM-DD`: A specific version of SAL released on a particular date. Replace YYYY-MM-DD with the desired date to pull that specific version. + +To pull an image: ```bash -docker pull activeeon/sal +docker pull activeeon/sal:dev ``` -## Usage +#### 2.2.2. Creating a Custom SAL Docker Image: -### Using SAL as a Microservice +To create your own Docker image for SAL: -To run SAL as a microservice, execute the following command: +1. Clone the Docker repository: ```bash -./gradlew bootRun +git clone https://github.com/ow2-proactive/docker +``` + +2. Copy the built `.war` file: + +Copy the `.war` file generated in section 2.1.1 to the `docker/sal/artefacts` directory. + +3. Build the Docker image: + +Navigate to the `docker/sal` directory and build the image: +```bash +cd docker/sal +docker build -t activeeon/sal:test -f ./Dockerfile --no-cache . +``` + +4. Publish the Docker image: + +```bash +docker push activeeon/sal:test +``` + +#### 2.2.3. Run SAL as Docker Container: + +**Prerequisites:** Docker installed on your machine. + +1. Edit the Docker Compose File: + +* Open [docker-compose.yaml](https://github.com/ow2-proactive/scheduling-abstraction-layer/blob/master/docker/docker-compose.yaml) + +* Setup connection to the ProActive scheduler + +```bash +sal: + #Set up connection to ProActive server (PWS) + PWS_URL: + PWS_USERNAME: + PWS_PASSWORD: +``` + +* Setup which SAL image will be used: + +```bash +sal: +#Set up image to be used for SAL from https://hub.docker.com/r/activeeon/sal/tags +image: activeeon/sal:test +``` +NOTE: It is possible to generate automatically an image from the `.war` file generated in section 2.1.1. In this case the image tag (e.g. test) should not exist in DockerHub repository. + +* Setup SAL ports: +```bash +sal: + ports: + - "8088:8080" # sal service ports + - "9001:9001" # sal-pda service ports for debugging +``` + +2. Build and Start the Containers: + +Open a terminal and navigate to the directory containing your docker-compose.yaml (e.g. docker) file to start docker containers: +```bash +cd .\docker\ +docker-compose up --build +``` +NOTE: Make sure that previous containers are removed (Step 4) +3. Verify Deployment + +Check the status of the containers + +```bash +docker-compose ps +``` + +4. Stop and Remove Containers + +```bash +docker-compose down +``` + +### 2.3. Deploying SAL as a Kubernetes Pod + +In this deployment approach, SAL is deployed as a pod within a Kubernetes cluster, which offers advanced orchestration and management features. +Kubernetes automatically handles deployment, scaling, and operations across a cluster of nodes, providing native support for horizontal scaling, automatic load balancing, and self-healing capabilities. The robust networking solutions provided by Kubernetes include service discovery, Ingress controllers, and built-in load balancing. This method is ideal for large-scale, production environments where high availability, scalability, and complex orchestration are required. + +To deploy SAL on Kubernetes, it is to use or create a Docker image as described in section 2.2. from remote Docker repository [DockerHub](https://hub.docker.com/r/activeeon/sal/tags). You can then deploy this image as a Kubernetes pod. + +**Prerequisites:** Kubernetes cluster (local or cloud-based) and kubectl CLI installed and configured. + +1. Edit Kubernetes Deployment and Service Manifests: + +Edit [sal.yaml](https://github.com/ow2-proactive/scheduling-abstraction-layer/blob/master/deployment/sal.yaml) +Setup ProActive connection, SAL image and ports as described in 2.2.3. Step 1 + +NOTE: Update `/path/to/scripts` to the path where your scripts are located on the host machine. + +2. Deploy to Kubernetes: + +Apply the deployment and service manifests to your Kubernetes cluster: + +```bash +kubectl apply -f sal.yaml +``` + +3. Verify Deployment: + +* Check the status of the pods: +```bash +kubectl get pods +``` + +* Check the status of the services: +```bash +kubectl get services +``` + +* Access SAL using the service's external IP or via a port-forward: +```bash +kubectl port-forward service/sal-service 8080:8080 +``` + +4. Clean Up: + +To delete the deployment and service: +```bash +kubectl delete -f sal.yaml +``` + +## 3. Usage +Once SAL is deployed, you can interact with it via its REST API, monitor its operation, and view logs to ensure everything is functioning correctly. Here’s how to use SAL effectively. + +### 3.1. Using SAL REST Endpoints +SAL exposes several REST API endpoints which serves as interfaces that you can use to interact with the ProActive Scheduler & Resource Manager. For detailed information on each endpoint, please go [here](https://github.com/ow2-proactive/scheduling-abstraction-layer/blob/master/documentation/README.md). + +To showcase usage [Connect](https://github.com/ow2-proactive/scheduling-abstraction-layer/blob/master/documentation/1-connection-endpoints.md#11--connect-endpoint) endpoint will use, which should have SAL protocol, host, port and ProActive username and password set as it was done in deployment `.yaml` files. + +Below are the instructions for connecting to and disconnecting from the ProActive server, using tools such as Postman or cURL. + +#### 3.1.1 Using Postman + +Download and install [Postman](https://www.postman.com/) if you haven’t already. + +* Set Up Request: + * URL: `http://localhost:8080/sal/pagateway/connect` + * Method: POST + * Headers: None + * Body: +```bash +{ +"username": "Proactive server username", +"password": "Proactive server password" +} +``` +* Send Request: Click the "Send" button to execute the request and review the response. +* Replay: A text format reply containing the session ID. + +#### 3.1.1 Using cURL + +* Open Terminal +* Execute cURL Command: +```bash +{ +curl -X POST "http://localhost:8080/sal/pagateway/connect" \ + --header "Content-Type: application/json" \ + -d '{"username": "Proactive server username", "password": "Proactive server password"}' + +} +``` +* Replay: A text format reply containing the session ID. + +### 3.2. View SAL logs + +#### 3.2.1 View Logs for SAL deployed as Docker Container +When SAL is deployed as a Docker Container like in section 2.2, you can view its logs using Docker commands. + +* Launch your command line interface (CLI). +* List Running Containers: To find the container name or ID, use: +```bash +docker ps +``` +Look for the container name `myComposeSAL` or the name you used. +* View SAL general Logs: +```bash +docker logs myComposeSAL +``` +* View SAL database general Logs: +```bash +docker logs myComposeMariaDB ``` -This will start the microservice allowing you to interact with it through various endpoints. +* View detail SAL Logs inside container: +```bash +docker exec -it myComposeSAL /bin/bash #To enter the SAL container’s shell +cd logs +cat scheduling-abstraction-layer.log #View detail logs +``` +* Query SAL Database i.e. MariaDB: +```bash +docker exec -it myComposeMariaDB /bin/bash +# Replace with the password you've set for the MariaDB root user +mariadb -uroot -p proactive -### Using SAL as a Docker Container +``` -To deploy SAL as a Docker container, run the following command: +#### 3.2.1 View Logs for SAL deployed as Kubernetes Pod +When SAL is deployed as a Kubernetes pod, you can access the logs using `kubectl` commands. +* Get the name of the SAL pod +```bash +kubectl get po -o wide +``` +* View SAL general Logs: +```bash +# Replace with the appropriate namespace and with the actual pod name obtained in previous step +kubectl -n logs sal +``` +* View detail SAL Logs inside container: ```bash -docker run -p 8080:8080 activeeon/sal +kubectl exec -it -c sal -- /bin/bash #To enter the SAL container’s shell +cd logs +cat scheduling-abstraction-layer.log ``` -This will start the SAL service within a Docker container, and it will be accessible on port 8080. +* Query SAL Database i.e. MariaDB: +```bash +kubectl exec -it -c mariadb -- mariadb -uroot -p proactive +#Replace with the appropriate MariaDB root password. +``` +### 3.3. Restarting SAL Service + +Restarting the SAL service can be necessary for applying configuration changes, recovering from issues, or for routine maintenance. Note that on the restart SAL logs and database will be erased. Below are the instructions for restarting SAL, both when deployed as a Docker container and as a Kubernetes pod. -## Configuration +#### 3.3.1 Restarting SAL as a Docker Container -Before using SAL, you need to configure the ProActive Server it will connect to. Use the following endpoints for configuration: +If SAL is deployed as a Docker container using `docker-compose.yaml`, you can restart the service using the following methods: -- To initialize the ProActive Server, use the init endpoint: +2. Restart a Specific Container: + +To restart just the SAL container without affecting other services (like MariaDB): +```bash +docker restart myComposeSAL ``` -{protocol}://{host}:{port}/sal/pagateway/init + +2. Restart All Services in Docker Compose: +To restart all services defined in your docker-compose.yaml (including the database): + +```bash +docker-compose restart ``` -- To connect to the ProActive Server, use the connect endpoint: +3. Rebuild and Restart SAL: +If you need to apply changes to the Docker image or configuration: +```bash +docker-compose up --build -d ``` -{protocol}://{host}:{port}/sal/pagateway/connect +This command will rebuild the containers if necessary and restart them in detached mode. + +#### 3.3.2 Restarting SAL as a Kubernetes Pod + +When SAL is deployed as a Kubernetes pod, you can restart the service by following these methods: + +1. Rolling Restart (Preferred Method): +Kubernetes allows for a rolling restart, which updates the pods one by one without downtime: + +```bash +kubectl rollout restart deployment/sal-deployment +#Replace sal-deployment with the actual name of your SAL deployment. +``` + +2. Manual Pod Deletion: +Alternatively, you can delete the existing pod(s) manually, and Kubernetes will automatically recreate them: + + +```bash +kubectl get pods +# Copy sal pod name: +kubectl delete pod +``` + +Kubernetes will automatically recreate the pod using the existing deployment configuration. + +3. Scaling the Deployment to Zero and Back: +Another method to restart SAL in Kubernetes is by scaling the deployment down to zero replicas and then scaling it back up to the desired number of replicas. This effectively stops and restarts the pods: + +```bash +kubectl get deployment +# Replace with the appropriate namespace (e.g., nebulous-cd) and with your SAL deployment name. +kubectl scale -n --replicas=0 deploy/ +kubectl scale -n --replicas=1 deploy/ ``` +### 3.4. Checking SAL Deployment +Below are steps for checking a SAL deployment, including checking image versions, container status, and overall health, for both Docker Compose and Kubernetes deployments. -## Endpoints +#### 3.4. Checking SAL Deployment in Docker Compose +1. Check Running Containers: -[//]: #TODO (javadoc link to be added) -SAL provides multiple endpoints that you can use to interact with the ProActive Scheduler & Resource Manager. For detailed information on each endpoint, please refer to the project's [Javadoc](https://link-to-javadoc). +Ensure all services defined in your `docker-compose.yaml` file are running: +```bash +docker-compose ps +``` +This command shows the status of each container, including whether they are up and running. + +2. Check Image Versions: + +Verify that the correct images and versions are used for each service: + +```bash +docker inspect | grep Image +# Replace with the container's name or ID (e.g., myComposeSAL, myComposeMariaDB). +``` +This will display the images used by each container. + + +4. Check Health of Containers: -## Contributing +If a health check is defined in the `docker-compose.yaml` (as in the MariaDB service): + +```bash +docker inspect --format='{{json .State.Health.Status}}' +``` +This will show if the container is healthy, unhealthy, or starting. + +#### 3.4.2 Checking SAL Deployment in Kubernetes +For Kubernetes, more advanced checks are available due to the nature of Kubernetes as an orchestrator. + +1. Check Running Pods: + +List all running pods and their statuses in the relevant namespace: + +```bash +kubectl get pods -n +# Replace with the appropriate namespace +``` +This command will show if the pods are running, pending, or in error. + +2. Check Deployment Details: + +Verify that the correct images are being used in your deployment: + +```bash +kubectl get deploy -n -o yaml | grep image +``` +This command extracts the image versions specified in the deployment YAML. + + +3. Check Pod Health and Status: + +Check the status and details of the running pods: + +```bash +kubectl describe pod -n +``` +This provides a detailed description of the pod’s state, including events, container statuses, and any errors. + +4. Check Service Endpoints: + +Ensure that the SAL services are correctly exposed and accessible: + +```bash +kubectl get svc -n +``` + +This command lists all services in the namespace, showing their external IPs, ports, and status. + +5. Check Resource Utilization: + +Monitor resource usage to ensure the deployment is operating within expected parameters: + +```bash +kubectl top pods -n +``` + +This shows CPU and memory usage, helping you identify any resource constraints or anomalies. + +### 3.5. Debugging SAL + +1. Ensure that SAL is running + +SAL need to be deployed and prepared for usage as described in section 2. + +* **SAL in Docker:** + +The `docker-compose.yaml` file includes a debugging service for SAL, exposing port 9001 by default (see section 2.2). This port is typically configured for remote debugging using the Java Debug Wire Protocol (JDWP) so it is sufficient that SAL container is running. + +* **SAL as Kubernetes Pod:** + +The `sal.yaml` file for Kubernetes also includes configuration for the debugging service, exposing port 9001 by default (see section 2.3). To use debugging service To access the debugging port on your local machine, set up port forwarding from your Kubernetes pod to your local machine: +```bash +kubectl port-forward deployment/sal 9001:9001 +#In case the SAL is not deployed as sal, replace it with the actual name of your SAL deployment. +``` +Another approach is to use sal-pda service which is deployed by default using `sal.yaml`: +```bash +kubectl get services +#Use the actual name of sal-pda-service and ports with ports as defined in deployment script +kubectl port-forward service/sal-pda-service 9001:9001 +``` + +2. Configure Your IDE for Remote Debugging: + +In IntelliJ IDEA: +* Go to Run > Edit Configurations. +* Click the + button and select Remote JVM Debug. +* Set the Host to `localhost` and the Port to `9001` or the one which is used for SAL deployment. In a case there is problem localhost can be replaced with your IP address. +* Set the Debugger mode to Attach to remote JVM. +* Click Apply and then OK + +3. Start Debugging: + +With your IDE configured, you can now start a debugging session.The following message should show: +```bash +Connected to the target VM, address: 'localhost:9001', transport: 'socket' +``` +Use SAL endpoints as described in section 3.1. and set breakpoints in your code. As the SAL service executes, your IDE will stop at these breakpoints, allowing you to inspect variables, step through code, and diagnose issues. +During debugging is advised to check SAL logs as described in section 3.2. + +## 4. Contributing Contributions to SAL are welcome! If you have any bug fixes, improvements, or new features to propose, please feel free to open a pull request. For major changes, it is recommended to discuss your ideas with the maintainers first. -## License +## 5. License Scheduling Abstraction Layer (SAL) is distributed under the [MIT License](https://github.com/ow2-proactive/scheduling-abstraction-layer/blob/master/LICENSE). Please see the [LICENSE](https://github.com/ow2-proactive/scheduling-abstraction-layer/blob/master/LICENSE) file for more information. - +Note that to use SAL it is necessary to have licence for [ProActive Scheduler & Resource Manager](https://proactive.activeeon.com/). --- Thank you for using Scheduling Abstraction Layer (SAL)! If you encounter any issues or have questions, please feel free to open an issue in the repository. We hope SAL enhances your experience with ProActive Scheduler & Resource Manager! diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index cbe233f..290b81a 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -27,7 +27,9 @@ services: database: condition: service_healthy ports: + #Port for sal service - "8088:8080" + #Port for sal-pda debugging service - "9001:9001" links: - "database:myComposeMariaDB" diff --git a/documentation/README.md b/documentation/README.md index 6278dde..c0ea7a8 100644 --- a/documentation/README.md +++ b/documentation/README.md @@ -1,96 +1,4 @@ -# Scheduling Abstraction Layer (SAL) - -Scheduling Abstraction Layer (SAL) is an abstraction layer developed as part of the EU project Morphemic. SAL aims to enhance the usability of ProActive Scheduler & Resource Manager by providing abstraction and additional features. - -## Table of Contents - -* [Introduction](https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/deployment-manager-sal-1/#introduction) -* [Installation](https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/deployment-manager-sal-1/#installation) -* [Endpoints](https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/deployment-manager-sal-1/#endpoints) -* [License](https://openproject.nebulouscloud.eu/projects/nebulous-collaboration-hub/wiki/deployment-manager-sal-1/#license) - -## Introduction - -SAL is a project developed under the Morphemic project, part of the EU's Horizon 2020 initiative. It offers an abstraction layer on top of the ProActive Scheduler & Resource Manager, making it easier for users to interact with the scheduler and take advantage of its features. Whether you want to use SAL as a microservice or deploy it as a Docker container, this repository provides the necessary resources to get you started. - -## Installation - -SAL can be used either as a standalone microservice or as a Docker container. Choose the approach that best suits your requirements. - -For running SAL via docker-compose in front of an already-running ProActive server, use the following docker-compose file (adapted from [https://raw.githubusercontent.com/ow2-proactive/docker/master/sal/docker-compose.yaml](https://raw.githubusercontent.com/ow2-proactive/docker/master/sal/docker-compose.yaml) ). - -```yaml -# Place login information into a file named `.env`, it should contain the following: -# MYSQL_ROOT_PASSWORD= -# PWS_URL= -# PWS_USERNAME= -# PWS_PASSWORD= -services: - database: - image: mariadb - ports: - - "3307:3306" - networks: - - db-tier - environment: - MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD} - MYSQL_DATABASE: proactive - container_name: myComposeMariaDB - healthcheck: - test: [ "CMD", "mariadb-admin" , "ping", "-h", "localhost", "--password=${MYSQL_ROOT_PASSWORD}" ] - interval: 5s - timeout: 5s - retries: 5 - sal: - image: activeeon/sal:latest - depends_on: - database: - condition: service_healthy - ports: - - "8088:8080" - - "9001:9001" - links: - - "database:myComposeMariaDB" - networks: - - db-tier - environment: - PROPERTIES_FILENAME: sal - PWS_URL: ${PWS_URL} - PWS_USERNAME: ${PWS_USERNAME} - PWS_PASSWORD: ${PWS_PASSWORD} - DB_USERNAME: root - DB_PASSWORD: ${MYSQL_ROOT_PASSWORD} - DB_DRIVER_CLASSNAME: org.mariadb.jdbc.Driver - DB_URL: jdbc:mariadb://myComposeMariaDB:3306/proactive - DB_PLATFORM: org.hibernate.dialect.MariaDB53Dialect - JPDA_ADDRESS: 9001 - JPDA_TRANSPORT: dt_socket - container_name: myComposeSAL - -networks: - # The presence of these objects is sufficient to define them - db-tier: {} -``` - -## Client Library - -The `sal-common` Java library provides class definitions for SAL concepts.  It can be added to gradle projects by adding the following into `build.gradle`: - -```groovy -repositories { - - maven { - url 'http://repository.activeeon.com/content/groups/proactive/' - allowInsecureProtocol = true - } -} -dependencies { - // SAL client library - implementation 'org.ow2.proactive:sal-common:13.1.0-SNAPSHOT' -} -``` - -## Endpoints +# Scheduling Abstraction Layer (SAL) Endpoints SAL provides multiple endpoints that you can use to interact with the ProActive Scheduler & Resource Manager: