From 952f4e06244dc15bcddcbecb908ea36fd06f97e4 Mon Sep 17 00:00:00 2001 From: Ankica Barisic <156663621+ankicabarisic@users.noreply.github.com> Date: Mon, 4 Nov 2024 14:20:52 +0100 Subject: [PATCH] Remove cloud (#105) * removing deployed regions over iaas * status vars * remove cloud documentation (cherry picked from commit 03f3ef1a3d5ee3888236757be1593c761aa2a56d) --- endpoints/2-cloud-endpoints.md | 10 +++- .../sal/service/service/CloudService.java | 9 ++- .../sal/service/service/ClusterService.java | 55 +++++++++++-------- 3 files changed, 46 insertions(+), 28 deletions(-) diff --git a/endpoints/2-cloud-endpoints.md b/endpoints/2-cloud-endpoints.md index af0cd9a..6585c27 100644 --- a/endpoints/2-cloud-endpoints.md +++ b/endpoints/2-cloud-endpoints.md @@ -212,15 +212,19 @@ Contains authentication details for accessing the cloud. The fields are: #### 2.7- RemoveClouds endpoint: -**Description**: An endpoint to get all the remove a list of defined clouds. +**Description**: This endpoint removes a specified list of cloud infrastructures. **Path:** ```url 🔴 DEL {{protocol}}://{{sal_host}}:{{sal_port}}/sal/cloud/remove ``` +**Path Variable (optional):** `preempt` = Boolean -**Headers:** `sessionid` +- `true` - Removes all deployed nodes within the specified cloud infrastructures. +- `false`: (default) Removes only the specified cloud infrastructures without affecting deployed nodes. + +**Headers (optional):** `sessionid` **Body:** @@ -230,4 +234,4 @@ Contains authentication details for accessing the cloud. The fields are: "{{cloud_name2}}" ] ``` -**Reply:** Boolean, True if cloud infrastructure was removed. False, otherwise. +**Reply:** Boolean, `true` if cloud infrastructure was removed. `false`, otherwise. diff --git a/sal-service/src/main/java/org/ow2/proactive/sal/service/service/CloudService.java b/sal-service/src/main/java/org/ow2/proactive/sal/service/service/CloudService.java index 3cd1170..3a7f9b1 100644 --- a/sal-service/src/main/java/org/ow2/proactive/sal/service/service/CloudService.java +++ b/sal-service/src/main/java/org/ow2/proactive/sal/service/service/CloudService.java @@ -7,7 +7,6 @@ import java.util.*; import java.util.concurrent.Future; -import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -56,6 +55,8 @@ public class CloudService { @Autowired private RepositoryService repositoryService; + private static final String DUMMY_INFRA_NAME_TEMPLATE = "iamadummy%s_%s"; + /** * Add clouds to the ProActive Resource Manager * @param sessionId A valid session id @@ -96,7 +97,9 @@ public Integer addClouds(String sessionId, List clouds) throws repositoryService.saveCredentials(credentials); newCloud.setCredentials(credentials); - String dummyInfraName = "iamadummy" + newCloud.getCloudProviderName() + "_" + newCloud.getCloudId(); + String dummyInfraName = String.format(DUMMY_INFRA_NAME_TEMPLATE, + newCloud.getCloudProviderName(), + newCloud.getCloudId()); connectorIaasGateway.defineInfrastructure(dummyInfraName, newCloud, ""); newCloud.setDummyInfrastructureName(dummyInfraName); @@ -282,7 +285,7 @@ public Boolean removeIaasCloudNS(String sessionId, PACloud cloud, Boolean preemp Boolean flag = true; for (Map.Entry entry : cloud.getDeployedRegions().entrySet()) { try { - String nodeSourceName = cloud.getNodeSourceNamePrefix() + entry.getKey(); + String nodeSourceName = cloud.getNodeSourceNamePrefix() + "-" + entry.getKey(); LOGGER.info("Removing IAAS node source \"{}\" from the ProActive server.", nodeSourceName); resourceManagerGateway.removeNodeSource(nodeSourceName, preempt); } catch (NotConnectedException | PermissionRestException | IllegalArgumentException e) { diff --git a/sal-service/src/main/java/org/ow2/proactive/sal/service/service/ClusterService.java b/sal-service/src/main/java/org/ow2/proactive/sal/service/service/ClusterService.java index e9e459e..3918f0c 100644 --- a/sal-service/src/main/java/org/ow2/proactive/sal/service/service/ClusterService.java +++ b/sal-service/src/main/java/org/ow2/proactive/sal/service/service/ClusterService.java @@ -8,11 +8,9 @@ import java.io.IOException; import java.io.Serializable; import java.util.*; -import java.util.stream.Collectors; import org.apache.commons.lang3.Validate; import org.ow2.proactive.sal.model.*; -import org.ow2.proactive.sal.service.nc.NodeCandidateUtils; import org.ow2.proactive.sal.service.util.ByonUtils; import org.ow2.proactive.sal.service.util.ClusterUtils; import org.ow2.proactive.scheduler.common.exception.NotConnectedException; @@ -38,7 +36,20 @@ public class ClusterService { private JobService jobService; @Autowired - private EdgeService edgeServie; + private EdgeService edgeService; + + // Define cluster state constants + private static final String STATUS_DEFINED = "defined"; + + private static final String STATUS_DEPLOYED = "deployed"; + + private static final String STATUS_RUNNING = "running"; + + private static final String STATUS_FAILED = "failed"; + + private static final String STATUS_SUBMITTED = "submitted"; // New status + + private static final String STATUS_SCALING = "scaling"; public boolean defineCluster(String sessionId, ClusterDefinition clusterDefinition) throws NotConnectedException, IOException { @@ -55,7 +66,7 @@ public boolean defineCluster(String sessionId, ClusterDefinition clusterDefiniti Cluster newCluster = new Cluster(); newCluster.setName(clusterDefinition.getName()); newCluster.setMasterNode(clusterDefinition.getMasterNode()); - newCluster.setStatus("defined"); + newCluster.setStatus(STATUS_DEFINED); newCluster.setEnvVars(ClusterUtils.createEnvVarsScript(clusterDefinition.getEnvVars())); clusterDefinition.getNodes() .forEach(clusterNodeDef -> repositoryService.saveClusterNodeDefinition(clusterNodeDef)); @@ -100,7 +111,7 @@ public boolean defineCluster(String sessionId, ClusterDefinition clusterDefiniti repositoryService.saveJob(workerNodeJob); // Map edgeNodeMap = new HashMap<>(); // edgeNodeMap.put(edgeNode.getId(), edgeNode.getName() + "/_Task"); - // edgeServie.addEdgeNodes(sessionId, edgeNodeMap, jobId); + // edgeService.addEdgeNodes(sessionId, edgeNodeMap, jobId); } else { PACloud cloud = repositoryService.getPACloud(node.getCloudId()); Job workerNodeJob = ClusterUtils.createWorkerNodeJob(newCluster.getName(), @@ -124,28 +135,28 @@ public boolean deployCluster(String sessionId, String clusterName) throws NotCon } Validate.notNull(clusterName, "The received clusterName is empty. Nothing to be defined."); LOGGER.info("deployCluster endpoint is called to deploy the cluster: " + clusterName); - Cluster toDeployClutser = ClusterUtils.getClusterByName(clusterName, repositoryService.listCluster()); + Cluster toDeployCluster = ClusterUtils.getClusterByName(clusterName, repositoryService.listCluster()); // add nodes - if (toDeployClutser == null) { + if (toDeployCluster == null) { LOGGER.error("No Cluster was found! Nothing is deployed!"); return false; } else { - List workerNodes = ClusterUtils.getWrokerNodes(toDeployClutser); - LOGGER.info("Deploying the master node of the cluster [{}]", toDeployClutser.getName()); - submitClutserNode(sessionId, toDeployClutser, toDeployClutser.getMasterNode(), false); - LOGGER.info("Deploying the worker nodes of the cluster [{}]", toDeployClutser.getName()); + List workerNodes = ClusterUtils.getWrokerNodes(toDeployCluster); + LOGGER.info("Deploying the master node of the cluster [{}]", toDeployCluster.getName()); + submitClusterNode(sessionId, toDeployCluster, toDeployCluster.getMasterNode(), false); + LOGGER.info("Deploying the worker nodes of the cluster [{}]", toDeployCluster.getName()); for (ClusterNodeDefinition node : workerNodes) { - submitClutserNode(sessionId, toDeployClutser, node.getName(), true); + submitClusterNode(sessionId, toDeployCluster, node.getName(), true); } - toDeployClutser.setStatus("submited"); - repositoryService.saveCluster(toDeployClutser); + toDeployCluster.setStatus(STATUS_SUBMITTED); + repositoryService.saveCluster(toDeployCluster); repositoryService.flush(); } return true; } - private void submitClutserNode(String sessionId, Cluster cluster, String nodeName, boolean worker) + private void submitClusterNode(String sessionId, Cluster cluster, String nodeName, boolean worker) throws NotConnectedException { LOGGER.info("Deploying the node {}...", nodeName); ClusterNodeDefinition node = ClusterUtils.getNodeByName(cluster, nodeName); @@ -157,7 +168,7 @@ private void submitClutserNode(String sessionId, Cluster cluster, String nodeNam EdgeNode edgeNode = ByonUtils.getEdgeNodeFromNC(nc); Map edgeNodeMap = new HashMap<>(); edgeNodeMap.put(edgeNode.getId(), edgeNode.getName() + "/_Task"); - edgeServie.addEdgeNodes(sessionId, edgeNodeMap, jobId); + edgeService.addEdgeNodes(sessionId, edgeNodeMap, jobId); currentDeployment = repositoryService.getDeployment(edgeNode.getName()); } else { List defs = ClusterUtils.getNodeIaasDefinition(sessionId, cluster, node.getName()); @@ -196,17 +207,17 @@ public Cluster getCluster(String sessionId, String clusterName) throws NotConnec node.setState(state.getJobStatus().toString()); states.add(state.getJobStatus().toString()); } else { - node.setState("defined"); + node.setState(STATUS_DEFINED); } nodes.set(i, node); i += 1; node.setNodeUrl(getNodeUrl(sessionId, clusterName, node)); } if (states.contains("In-Error") || states.contains("Failed") || states.contains("Canceled")) { - getCluster.setStatus("failed"); + getCluster.setStatus(STATUS_FAILED); } else { if (checkAllStates(states)) { - getCluster.setStatus("deployed"); + getCluster.setStatus(STATUS_DEPLOYED); } } getCluster.setNodes(nodes); @@ -229,7 +240,7 @@ public Cluster scaleOutCluster(String sessionId, String clusterName, List repositoryService.saveClusterNodeDefinition(clusterNodeDef)); toScaleCluster.setNodes(newList); - toScaleCluster.setStatus("scaling"); + toScaleCluster.setStatus(STATUS_SCALING); repositoryService.saveCluster(toScaleCluster); LOGGER.info("Scaling out the worker nodes of the cluster [{}]", clusterName); for (ClusterNodeDefinition node : newNodes) { @@ -243,7 +254,7 @@ public Cluster scaleOutCluster(String sessionId, String clusterName, List } } toScaleCluster.setNodes(clusterNodes); - toScaleCluster.setStatus("scaling"); + toScaleCluster.setStatus(STATUS_SCALING); clusterNodes.forEach(clusterNodeDef -> repositoryService.saveClusterNodeDefinition(clusterNodeDef)); repositoryService.saveCluster(toScaleCluster); repositoryService.flush();