Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DataNode: rename cluster manager property #18342

Merged
merged 3 commits into from
Feb 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions changelog/unreleased/pr-18342.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
type = "f"
message = "OpenSearch renamed cluster.initial_master_nodes to initial_cluster_manager_nodes, which was not reflected yet and is a possible cause of startup bugs."

pulls = ["18342"]
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ public class Configuration extends BaseConfiguration {
/**
* Comma separated list of opensearch nodes that are eligible as manager nodes.
*/
@Parameter(value = "cluster_initial_manager_nodes")
private String initialManagerNodes;
@Parameter(value = "initial_cluster_manager_nodes")
private String initialClusterManagerNodes;

@Parameter(value = "opensearch_http_port", converter = IntegerConverter.class)
private int opensearchHttpPort = 9200;
Expand Down Expand Up @@ -340,8 +340,8 @@ public String getDatanodeNodeName() {
return datanodeNodeName != null && !datanodeNodeName.isBlank() ? datanodeNodeName : getHostname();
}

public String getInitialManagerNodes() {
return initialManagerNodes;
public String getInitialClusterManagerNodes() {
return initialClusterManagerNodes;
}

public int getOpensearchHttpPort() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,11 +96,11 @@ public OpensearchConfiguration get() {
try {
ImmutableMap.Builder<String, String> opensearchProperties = ImmutableMap.builder();

if (localConfiguration.getInitialManagerNodes() != null && !localConfiguration.getInitialManagerNodes().isBlank()) {
opensearchProperties.put("cluster.initial_master_nodes", localConfiguration.getInitialManagerNodes());
if (localConfiguration.getInitialClusterManagerNodes() != null && !localConfiguration.getInitialClusterManagerNodes().isBlank()) {
opensearchProperties.put("cluster.initial_cluster_manager_nodes", localConfiguration.getInitialClusterManagerNodes());
} else if (isPreflight()) {
final var nodeList = String.join(",", nodeService.allActive().values().stream().map(Node::getHostname).collect(Collectors.toSet()));
opensearchProperties.put("cluster.initial_master_nodes", nodeList);
opensearchProperties.put("cluster.initial_cluster_manager_nodes", nodeList);
}
opensearchProperties.putAll(commonOpensearchConfig(localConfiguration));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
import org.hamcrest.Matchers;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.concurrent.ExecutionException;
Expand All @@ -33,6 +35,7 @@

@ExtendWith(DatanodeTestExtension.class)
public class DataNodePluginsIT {
private static final Logger LOG = LoggerFactory.getLogger(DataNodePluginsIT.class);
private final DatanodeContainerizedBackend backend;

public DataNodePluginsIT(DatanodeContainerizedBackend backend) {
Expand All @@ -43,25 +46,30 @@ public DataNodePluginsIT(DatanodeContainerizedBackend backend) {
void ensureUnneededPluginsAreNotLoaded() throws Exception {
final var opensearchRestPort = backend.getOpensearchRestPort();
final var baseUrl = "http://localhost:" + opensearchRestPort;
waitForOpensearch(baseUrl);
try {
waitForOpensearch(baseUrl);

given()
.get(baseUrl + "/_cat/plugins")
.then()
.statusCode(200)
.body(
Matchers.not(Matchers.containsString("opensearch-alerting")),
Matchers.not(Matchers.containsString("opensearch-custom-codecs")),
Matchers.not(Matchers.containsString("opensearch-geospatial")),
Matchers.not(Matchers.containsString("opensearch-knn")),
Matchers.not(Matchers.containsString("opensearch-neural-search")),
Matchers.not(Matchers.containsString("opensearch-notifications")),
Matchers.not(Matchers.containsString("opensearch-notifications-core")),
Matchers.not(Matchers.containsString("opensearch-performance-analyzer")),
Matchers.not(Matchers.containsString("opensearch-reports-scheduler")),
Matchers.not(Matchers.containsString("opensearch-security-analytics")),
Matchers.not(Matchers.containsString("opensearch-sql"))
);
given()
.get(baseUrl + "/_cat/plugins")
.then()
.statusCode(200)
.body(
Matchers.not(Matchers.containsString("opensearch-alerting")),
Matchers.not(Matchers.containsString("opensearch-custom-codecs")),
Matchers.not(Matchers.containsString("opensearch-geospatial")),
Matchers.not(Matchers.containsString("opensearch-knn")),
Matchers.not(Matchers.containsString("opensearch-neural-search")),
Matchers.not(Matchers.containsString("opensearch-notifications")),
Matchers.not(Matchers.containsString("opensearch-notifications-core")),
Matchers.not(Matchers.containsString("opensearch-performance-analyzer")),
Matchers.not(Matchers.containsString("opensearch-reports-scheduler")),
Matchers.not(Matchers.containsString("opensearch-security-analytics")),
Matchers.not(Matchers.containsString("opensearch-sql"))
);
} catch (Exception exception) {
LOG.error("DataNode Container logs follow:\n" + backend.getLogs());
throw exception;
}
}

private void waitForOpensearch(String baseUrl) throws ExecutionException, RetryException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ private DatanodeContainerizedBackend createDatanodeContainer(Network network,
datanodeContainer -> {
datanodeContainer.withNetwork(network);
datanodeContainer.withEnv("GRAYLOG_DATANODE_PASSWORD_SECRET", DatanodeContainerizedBackend.SIGNING_SECRET);
datanodeContainer.withEnv("GRAYLOG_DATANODE_CLUSTER_INITIAL_MANAGER_NODES", hostnameNodeA);
datanodeContainer.withEnv("GRAYLOG_DATANODE_INITIAL_CLUSTER_MANAGER_NODES", hostnameNodeA);
datanodeContainer.withEnv("GRAYLOG_DATANODE_OPENSEARCH_DISCOVERY_SEED_HOSTS", hostnameNodeA + ":9300");

datanodeContainer.withFileSystemBind(transportKeystore.location().toAbsolutePath().toString(), IMAGE_WORKING_DIR + "/config/datanode-transport-certificates.p12");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,12 +120,12 @@ public org.graylog.testing.datanode.DatanodeDevContainerBuilder network(final Ne

public GenericContainer<?> build() {
final Path graylog = getPath().resolve("graylog-datanode-" + getProjectVersion() + ".jar");
if(!Files.exists(graylog)) {
if (!Files.exists(graylog)) {
LOG.info("Searching for {} failed.", graylog.toAbsolutePath());
LOG.info("Project repos path: {}, absolute path: {}", getProjectReposPath(), getProjectReposPath().toAbsolutePath());
if(Files.exists(getPath())) {
if (Files.exists(getPath())) {
LOG.info("contents of base path {}:", getPath());
try(var files = Files.list(getPath())) {
try (var files = Files.list(getPath())) {
files.forEach(file -> LOG.info("{}", file.toString()));
} catch (IOException ex) {
LOG.info("listing files failed with exception: {}", ex.getMessage());
Expand Down Expand Up @@ -155,7 +155,7 @@ public GenericContainer<?> build() {
.withEnv("GRAYLOG_DATANODE_OPENSEARCH_DISCOVERY_SEED_HOSTS", nodeName + ":" + openSearchTransportPort)

.withEnv("GRAYLOG_DATANODE_OPENSEARCH_NETWORK_HOST", nodeName)
.withEnv("GRAYLOG_DATANODE_CLUSTER_INITIAL_MANAGER_NODES", nodeName)
.withEnv("GRAYLOG_DATANODE_INITIAL_CLUSTER_MANAGER_NODES", nodeName)

.withEnv("GRAYLOG_DATANODE_ROOT_USERNAME", rootUsername)
.withEnv("GRAYLOG_DATANODE_PASSWORD_SECRET", passwordSecret)
Expand Down
2 changes: 1 addition & 1 deletion misc/datanode.conf
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ mongodb_uri = mongodb://localhost/graylog
# if you're not using the automatic data node setup and want to create a cluster, you have to setup the initial manager nodes
# make sure to remove this setting after the cluster has formed
#
# cluster_initial_manager_nodes =
# initial_cluster_manager_nodes =

#### OpenSearch folders
#
Expand Down
Loading