Skip to content

Commit

Permalink
Remove cloud (#105)
Browse files Browse the repository at this point in the history
* removing deployed regions over iaas

* status vars

* remove cloud documentation

(cherry picked from commit 03f3ef1)
  • Loading branch information
ankicabarisic authored and fviale committed Nov 23, 2024
1 parent 5465696 commit 952f4e0
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 28 deletions.
10 changes: 7 additions & 3 deletions endpoints/2-cloud-endpoints.md
Original file line number Diff line number Diff line change
Expand Up @@ -212,15 +212,19 @@ Contains authentication details for accessing the cloud. The fields are:

#### 2.7- RemoveClouds endpoint:

**Description**: An endpoint to get all the remove a list of defined clouds.
**Description**: This endpoint removes a specified list of cloud infrastructures.

**Path:**

```url
🔴 DEL {{protocol}}://{{sal_host}}:{{sal_port}}/sal/cloud/remove
```
**Path Variable (optional):** `preempt` = Boolean

**Headers:** `sessionid`
- `true` - Removes all deployed nodes within the specified cloud infrastructures.
- `false`: (default) Removes only the specified cloud infrastructures without affecting deployed nodes.

**Headers (optional):** `sessionid`

**Body:**

Expand All @@ -230,4 +234,4 @@ Contains authentication details for accessing the cloud. The fields are:
"{{cloud_name2}}"
]
```
**Reply:** Boolean, True if cloud infrastructure was removed. False, otherwise.
**Reply:** Boolean, `true` if cloud infrastructure was removed. `false`, otherwise.
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

import java.util.*;
import java.util.concurrent.Future;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;

Expand Down Expand Up @@ -56,6 +55,8 @@ public class CloudService {
@Autowired
private RepositoryService repositoryService;

private static final String DUMMY_INFRA_NAME_TEMPLATE = "iamadummy%s_%s";

/**
* Add clouds to the ProActive Resource Manager
* @param sessionId A valid session id
Expand Down Expand Up @@ -96,7 +97,9 @@ public Integer addClouds(String sessionId, List<CloudDefinition> clouds) throws
repositoryService.saveCredentials(credentials);
newCloud.setCredentials(credentials);

String dummyInfraName = "iamadummy" + newCloud.getCloudProviderName() + "_" + newCloud.getCloudId();
String dummyInfraName = String.format(DUMMY_INFRA_NAME_TEMPLATE,
newCloud.getCloudProviderName(),
newCloud.getCloudId());
connectorIaasGateway.defineInfrastructure(dummyInfraName, newCloud, "");
newCloud.setDummyInfrastructureName(dummyInfraName);

Expand Down Expand Up @@ -282,7 +285,7 @@ public Boolean removeIaasCloudNS(String sessionId, PACloud cloud, Boolean preemp
Boolean flag = true;
for (Map.Entry<String, String> entry : cloud.getDeployedRegions().entrySet()) {
try {
String nodeSourceName = cloud.getNodeSourceNamePrefix() + entry.getKey();
String nodeSourceName = cloud.getNodeSourceNamePrefix() + "-" + entry.getKey();
LOGGER.info("Removing IAAS node source \"{}\" from the ProActive server.", nodeSourceName);
resourceManagerGateway.removeNodeSource(nodeSourceName, preempt);
} catch (NotConnectedException | PermissionRestException | IllegalArgumentException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,9 @@
import java.io.IOException;
import java.io.Serializable;
import java.util.*;
import java.util.stream.Collectors;

import org.apache.commons.lang3.Validate;
import org.ow2.proactive.sal.model.*;
import org.ow2.proactive.sal.service.nc.NodeCandidateUtils;
import org.ow2.proactive.sal.service.util.ByonUtils;
import org.ow2.proactive.sal.service.util.ClusterUtils;
import org.ow2.proactive.scheduler.common.exception.NotConnectedException;
Expand All @@ -38,7 +36,20 @@ public class ClusterService {
private JobService jobService;

@Autowired
private EdgeService edgeServie;
private EdgeService edgeService;

// Define cluster state constants
private static final String STATUS_DEFINED = "defined";

private static final String STATUS_DEPLOYED = "deployed";

private static final String STATUS_RUNNING = "running";

private static final String STATUS_FAILED = "failed";

private static final String STATUS_SUBMITTED = "submitted"; // New status

private static final String STATUS_SCALING = "scaling";

public boolean defineCluster(String sessionId, ClusterDefinition clusterDefinition)
throws NotConnectedException, IOException {
Expand All @@ -55,7 +66,7 @@ public boolean defineCluster(String sessionId, ClusterDefinition clusterDefiniti
Cluster newCluster = new Cluster();
newCluster.setName(clusterDefinition.getName());
newCluster.setMasterNode(clusterDefinition.getMasterNode());
newCluster.setStatus("defined");
newCluster.setStatus(STATUS_DEFINED);
newCluster.setEnvVars(ClusterUtils.createEnvVarsScript(clusterDefinition.getEnvVars()));
clusterDefinition.getNodes()
.forEach(clusterNodeDef -> repositoryService.saveClusterNodeDefinition(clusterNodeDef));
Expand Down Expand Up @@ -100,7 +111,7 @@ public boolean defineCluster(String sessionId, ClusterDefinition clusterDefiniti
repositoryService.saveJob(workerNodeJob);
// Map<String, String> edgeNodeMap = new HashMap<>();
// edgeNodeMap.put(edgeNode.getId(), edgeNode.getName() + "/_Task");
// edgeServie.addEdgeNodes(sessionId, edgeNodeMap, jobId);
// edgeService.addEdgeNodes(sessionId, edgeNodeMap, jobId);
} else {
PACloud cloud = repositoryService.getPACloud(node.getCloudId());
Job workerNodeJob = ClusterUtils.createWorkerNodeJob(newCluster.getName(),
Expand All @@ -124,28 +135,28 @@ public boolean deployCluster(String sessionId, String clusterName) throws NotCon
}
Validate.notNull(clusterName, "The received clusterName is empty. Nothing to be defined.");
LOGGER.info("deployCluster endpoint is called to deploy the cluster: " + clusterName);
Cluster toDeployClutser = ClusterUtils.getClusterByName(clusterName, repositoryService.listCluster());
Cluster toDeployCluster = ClusterUtils.getClusterByName(clusterName, repositoryService.listCluster());

// add nodes
if (toDeployClutser == null) {
if (toDeployCluster == null) {
LOGGER.error("No Cluster was found! Nothing is deployed!");
return false;
} else {
List<ClusterNodeDefinition> workerNodes = ClusterUtils.getWrokerNodes(toDeployClutser);
LOGGER.info("Deploying the master node of the cluster [{}]", toDeployClutser.getName());
submitClutserNode(sessionId, toDeployClutser, toDeployClutser.getMasterNode(), false);
LOGGER.info("Deploying the worker nodes of the cluster [{}]", toDeployClutser.getName());
List<ClusterNodeDefinition> workerNodes = ClusterUtils.getWrokerNodes(toDeployCluster);
LOGGER.info("Deploying the master node of the cluster [{}]", toDeployCluster.getName());
submitClusterNode(sessionId, toDeployCluster, toDeployCluster.getMasterNode(), false);
LOGGER.info("Deploying the worker nodes of the cluster [{}]", toDeployCluster.getName());
for (ClusterNodeDefinition node : workerNodes) {
submitClutserNode(sessionId, toDeployClutser, node.getName(), true);
submitClusterNode(sessionId, toDeployCluster, node.getName(), true);
}
toDeployClutser.setStatus("submited");
repositoryService.saveCluster(toDeployClutser);
toDeployCluster.setStatus(STATUS_SUBMITTED);
repositoryService.saveCluster(toDeployCluster);
repositoryService.flush();
}
return true;
}

private void submitClutserNode(String sessionId, Cluster cluster, String nodeName, boolean worker)
private void submitClusterNode(String sessionId, Cluster cluster, String nodeName, boolean worker)
throws NotConnectedException {
LOGGER.info("Deploying the node {}...", nodeName);
ClusterNodeDefinition node = ClusterUtils.getNodeByName(cluster, nodeName);
Expand All @@ -157,7 +168,7 @@ private void submitClutserNode(String sessionId, Cluster cluster, String nodeNam
EdgeNode edgeNode = ByonUtils.getEdgeNodeFromNC(nc);
Map<String, String> edgeNodeMap = new HashMap<>();
edgeNodeMap.put(edgeNode.getId(), edgeNode.getName() + "/_Task");
edgeServie.addEdgeNodes(sessionId, edgeNodeMap, jobId);
edgeService.addEdgeNodes(sessionId, edgeNodeMap, jobId);
currentDeployment = repositoryService.getDeployment(edgeNode.getName());
} else {
List<IaasDefinition> defs = ClusterUtils.getNodeIaasDefinition(sessionId, cluster, node.getName());
Expand Down Expand Up @@ -196,17 +207,17 @@ public Cluster getCluster(String sessionId, String clusterName) throws NotConnec
node.setState(state.getJobStatus().toString());
states.add(state.getJobStatus().toString());
} else {
node.setState("defined");
node.setState(STATUS_DEFINED);
}
nodes.set(i, node);
i += 1;
node.setNodeUrl(getNodeUrl(sessionId, clusterName, node));
}
if (states.contains("In-Error") || states.contains("Failed") || states.contains("Canceled")) {
getCluster.setStatus("failed");
getCluster.setStatus(STATUS_FAILED);
} else {
if (checkAllStates(states)) {
getCluster.setStatus("deployed");
getCluster.setStatus(STATUS_DEPLOYED);
}
}
getCluster.setNodes(nodes);
Expand All @@ -229,7 +240,7 @@ public Cluster scaleOutCluster(String sessionId, String clusterName, List<Cluste
newList.addAll(newNodes);
newList.forEach(clusterNodeDef -> repositoryService.saveClusterNodeDefinition(clusterNodeDef));
toScaleCluster.setNodes(newList);
toScaleCluster.setStatus("scaling");
toScaleCluster.setStatus(STATUS_SCALING);
repositoryService.saveCluster(toScaleCluster);
LOGGER.info("Scaling out the worker nodes of the cluster [{}]", clusterName);
for (ClusterNodeDefinition node : newNodes) {
Expand All @@ -243,7 +254,7 @@ public Cluster scaleOutCluster(String sessionId, String clusterName, List<Cluste
}
repositoryService.flush();
for (ClusterNodeDefinition node : newNodes) {
submitClutserNode(sessionId, toScaleCluster, node.getName(), true);
submitClusterNode(sessionId, toScaleCluster, node.getName(), true);
}
return toScaleCluster;
}
Expand Down Expand Up @@ -282,7 +293,7 @@ public Cluster scaleInCluster(String sessionId, String clusterName, List<String>
}
}
toScaleCluster.setNodes(clusterNodes);
toScaleCluster.setStatus("scaling");
toScaleCluster.setStatus(STATUS_SCALING);
clusterNodes.forEach(clusterNodeDef -> repositoryService.saveClusterNodeDefinition(clusterNodeDef));
repositoryService.saveCluster(toScaleCluster);
repositoryService.flush();
Expand Down

0 comments on commit 952f4e0

Please sign in to comment.