---
.../indices/recovery/RecoveryTarget.java | 47 -------------------
.../replication/common/ReplicationTarget.java | 2 +-
2 files changed, 1 insertion(+), 48 deletions(-)
diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java
index 652f3c9a55f53..7acc6b8b54fdd 100644
--- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java
+++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java
@@ -177,51 +177,6 @@ public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOEx
return false;
}
- /**
- * cancel the recovery. calling this method will clean temporary files and release the store
- * unless this object is in use (in which case it will be cleaned once all ongoing users call
- * {@link #decRef()}
- *
- * if {@link #cancellableThreads()} was used, the threads will be interrupted.
- */
- public void cancel(String reason) {
- if (finished.compareAndSet(false, true)) {
- try {
- logger.debug("recovery canceled (reason: [{}])", reason);
- cancellableThreads.cancel(reason);
- } finally {
- // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
- decRef();
- }
- }
- }
-
- /**
- * fail the recovery and call listener
- *
- * @param e exception that encapsulating the failure
- * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure
- */
- public void fail(RecoveryFailedException e, boolean sendShardFailure) {
- super.fail(e, sendShardFailure);
- }
-
- /** mark the current recovery as done */
- public void markAsDone() {
- if (finished.compareAndSet(false, true)) {
- assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed";
- try {
- // this might still throw an exception ie. if the shard is CLOSED due to some other event.
- // it's safer to decrement the reference in a try finally here.
- indexShard.postRecovery("peer recovery done");
- } finally {
- // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
- decRef();
- }
- listener.onDone(state());
- }
- }
-
@Override
protected void closeInternal() {
try {
@@ -246,8 +201,6 @@ protected String getPrefix() {
@Override
protected void onDone() {
assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed";
- // this might still throw an exception ie. if the shard is CLOSED due to some other event.
- // it's safer to decrement the reference in a try finally here.
indexShard.postRecovery("peer recovery done");
}
diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java
index 501ff46eeb2ff..42f4572fef3e4 100644
--- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java
+++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java
@@ -155,7 +155,7 @@ public void markAsDone() {
public void cancel(String reason) {
if (finished.compareAndSet(false, true)) {
try {
- logger.debug("replication cancelled (reason: [{}])", reason);
+ logger.debug("replication/recovery cancelled (reason: [{}])", reason);
onCancel(reason);
} finally {
// release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now
From 5dd79479a2ca84a633a7583eb37df07504cd8a90 Mon Sep 17 00:00:00 2001
From: Kunal Kotwani
Date: Wed, 24 Aug 2022 15:13:33 -0700
Subject: [PATCH 010/187] Update the head ref to changelog verifier (#4296)
* Update changelog contribution guide
Signed-off-by: Kunal Kotwani
* Fix reference to pull request
Signed-off-by: Kunal Kotwani
Signed-off-by: Kunal Kotwani
---
.github/workflows/changelog_verifier.yml | 2 +-
CHANGELOG.md | 1 +
CONTRIBUTING.md | 2 --
3 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml
index 505b02426f22c..ee9bf5e18d0d5 100644
--- a/.github/workflows/changelog_verifier.yml
+++ b/.github/workflows/changelog_verifier.yml
@@ -11,7 +11,7 @@ jobs:
- uses: actions/checkout@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
- ref: ${{ github.event.pull_request.head.ref }}
+ ref: ${{ github.event.pull_request.head.sha }}
- uses: dangoslen/dependabot-changelog-helper@v1
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 513fb92ad2675..bc7baef8f83fa 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
### Fixed
- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289))
+- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296))
### Security
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 16821b1915032..fc02d52f0bc3b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -131,8 +131,6 @@ As a contributor, you must ensure that every pull request has the changes listed
Adding in the change is two step process -
1. Add your changes to the corresponding section within the CHANGELOG file with dummy pull request information, publish the PR
- `Your change here ([#PR_NUMBER](PR_URL))`
-
2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there.
From 1bfabed0780c228f4f3c9a26aac2169e361c9426 Mon Sep 17 00:00:00 2001
From: Kunal Kotwani
Date: Thu, 25 Aug 2022 15:17:16 -0700
Subject: [PATCH 011/187] Add 2.x version to CHANGELOG (#4297)
Signed-off-by: Kunal Kotwani
Signed-off-by: Kunal Kotwani
---
CHANGELOG.md | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bc7baef8f83fa..c258100894555 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -17,5 +17,21 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
### Security
+## [2.x]
+### Added
+- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085))
+
+### Changed
+
+### Deprecated
+
+### Removed
+
+### Fixed
+- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296))
+
+### Security
+
[Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD
+[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x
From 1dbb63a8ee04812bd5f5ef5bc4995eac8b090438 Mon Sep 17 00:00:00 2001
From: Andrew Ross
Date: Fri, 26 Aug 2022 12:58:47 -0700
Subject: [PATCH 012/187] Do not fail replica shard due to primary closure
(#4133)
This commit prevents a replica shard from being failed in the case that
a replication action to a replica is canceled due to the primary shard
being closed.
Signed-off-by: Andrew Ross
Signed-off-by: Andrew Ross
---
CHANGELOG.md | 1 +
.../index/store/CorruptedFileIT.java | 101 ++++++++++++++++++
.../org/opensearch/OpenSearchException.java | 7 ++
.../PendingReplicationActions.java | 14 ++-
.../replication/TransportWriteAction.java | 24 +++--
.../shard/PrimaryShardClosedException.java | 26 +++++
.../ExceptionSerializationTests.java | 2 +
.../PendingReplicationActionsTests.java | 3 +-
.../TransportWriteActionTests.java | 45 ++++++++
9 files changed, 205 insertions(+), 18 deletions(-)
create mode 100644 server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c258100894555..e988435a688da 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,6 +14,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
### Fixed
- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289))
- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296))
+- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133))
### Security
diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java
index ee2067c591cef..960e17b76acb5 100644
--- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java
@@ -40,6 +40,7 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;
+import org.hamcrest.MatcherAssert;
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.node.stats.NodeStats;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
@@ -48,6 +49,7 @@
import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.opensearch.action.index.IndexRequestBuilder;
import org.opensearch.action.search.SearchResponse;
+import org.opensearch.action.support.replication.TransportReplicationAction;
import org.opensearch.client.Requests;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.health.ClusterHealthStatus;
@@ -108,6 +110,7 @@
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS;
import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList;
@@ -698,6 +701,104 @@ public void testReplicaCorruption() throws Exception {
ensureGreen(TimeValue.timeValueSeconds(60));
}
+ public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() throws Exception {
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
+ final List dataNodeStats = nodeStats.getNodes()
+ .stream()
+ .filter(stat -> stat.getNode().isDataNode())
+ .collect(Collectors.toUnmodifiableList());
+ MatcherAssert.assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
+
+ final NodeStats primaryNode = dataNodeStats.get(0);
+ final NodeStats replicaNode = dataNodeStats.get(1);
+ assertAcked(
+ prepareCreate("test").setSettings(
+ Settings.builder()
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+ .put("index.routing.allocation.include._name", primaryNode.getNode().getName())
+ .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
+ .put("index.allocation.max_retries", Integer.MAX_VALUE) // keep on retrying
+
+ )
+ );
+ ensureGreen();
+
+ // Add custom send behavior between primary and replica that will
+ // count down a latch to indicate that a replication operation is
+ // currently in flight, and then block on a second latch that will
+ // be released once the primary shard has been corrupted.
+ final CountDownLatch indexingInFlight = new CountDownLatch(1);
+ final CountDownLatch corruptionHasHappened = new CountDownLatch(1);
+ final MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(
+ TransportService.class,
+ primaryNode.getNode().getName()
+ ));
+ mockTransportService.addSendBehavior(
+ internalCluster().getInstance(TransportService.class, replicaNode.getNode().getName()),
+ (connection, requestId, action, request, options) -> {
+ if (request instanceof TransportReplicationAction.ConcreteShardRequest) {
+ indexingInFlight.countDown();
+ try {
+ corruptionHasHappened.await();
+ } catch (InterruptedException e) {
+ logger.info("Interrupted while waiting for corruption");
+ }
+ }
+ connection.sendRequest(requestId, action, request, options);
+ }
+ );
+
+ // Configure the modified data node as a replica
+ final Settings build = Settings.builder()
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1")
+ .put("index.routing.allocation.include._name", primaryNode.getNode().getName() + "," + replicaNode.getNode().getName())
+ .build();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+
+ // Create a snapshot repository. This repo is used to take a snapshot after
+ // corrupting a file, which causes the node to notice the corrupt data and
+ // close the shard.
+ assertAcked(
+ client().admin()
+ .cluster()
+ .preparePutRepository("test-repo")
+ .setType("fs")
+ .setSettings(
+ Settings.builder()
+ .put("location", randomRepoPath().toAbsolutePath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
+ )
+ );
+
+ client().prepareIndex("test").setSource("field", "value").execute();
+ indexingInFlight.await();
+
+ // Corrupt a file on the primary then take a snapshot. Snapshot should
+ // finish in the PARTIAL state since the corrupted file will cause a checksum
+ // validation failure.
+ final ShardRouting corruptedShardRouting = corruptRandomPrimaryFile();
+ logger.info("--> {} corrupted", corruptedShardRouting);
+ final CreateSnapshotResponse createSnapshotResponse = client().admin()
+ .cluster()
+ .prepareCreateSnapshot("test-repo", "test-snap")
+ .setWaitForCompletion(true)
+ .setIndices("test")
+ .get();
+ final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state();
+ MatcherAssert.assertThat("Expect file corruption to cause PARTIAL snapshot state", snapshotState, equalTo(SnapshotState.PARTIAL));
+
+ // Unblock the blocked indexing thread now that corruption on the primary has been confirmed
+ corruptionHasHappened.countDown();
+
+ // Assert the cluster returns to green status because the replica will be promoted to primary
+ ensureGreen();
+ }
+
private int numShards(String... index) {
ClusterState state = client().admin().cluster().prepareState().get().getState();
GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false);
diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java
index 4ebcd9622ce38..87efc03734d26 100644
--- a/server/src/main/java/org/opensearch/OpenSearchException.java
+++ b/server/src/main/java/org/opensearch/OpenSearchException.java
@@ -68,6 +68,7 @@
import static java.util.Collections.singletonMap;
import static java.util.Collections.unmodifiableMap;
import static org.opensearch.Version.V_2_1_0;
+import static org.opensearch.Version.V_3_0_0;
import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE;
import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.opensearch.common.xcontent.XContentParserUtils.ensureFieldName;
@@ -1601,6 +1602,12 @@ private enum OpenSearchExceptionHandle {
org.opensearch.indices.replication.common.ReplicationFailedException::new,
161,
V_2_1_0
+ ),
+ PRIMARY_SHARD_CLOSED_EXCEPTION(
+ org.opensearch.index.shard.PrimaryShardClosedException.class,
+ org.opensearch.index.shard.PrimaryShardClosedException::new,
+ 162,
+ V_3_0_0
);
final Class extends OpenSearchException> exceptionClass;
diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java
index b305c4c8c83a7..7087b64758888 100644
--- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java
+++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java
@@ -35,6 +35,7 @@
import org.opensearch.action.support.RetryableAction;
import org.opensearch.common.lease.Releasable;
import org.opensearch.common.util.concurrent.ConcurrentCollections;
+import org.opensearch.index.shard.PrimaryShardClosedException;
import org.opensearch.index.shard.IndexShardClosedException;
import org.opensearch.index.shard.ReplicationGroup;
import org.opensearch.index.shard.ShardId;
@@ -45,6 +46,7 @@
import java.util.Map;
import java.util.Set;
import java.util.function.Consumer;
+import java.util.function.Supplier;
/**
* Pending Replication Actions
@@ -121,7 +123,7 @@ synchronized void acceptNewTrackedAllocationIds(Set trackedAllocationIds
}
}
- cancelActions(toCancel, "Replica left ReplicationGroup");
+ cancelActions(toCancel, () -> new IndexShardClosedException(shardId, "Replica left ReplicationGroup"));
}
@Override
@@ -129,15 +131,11 @@ public synchronized void close() {
ArrayList>> toCancel = new ArrayList<>(onGoingReplicationActions.values());
onGoingReplicationActions.clear();
- cancelActions(toCancel, "Primary closed.");
+ cancelActions(toCancel, () -> new PrimaryShardClosedException(shardId));
}
- private void cancelActions(ArrayList>> toCancel, String message) {
+ private void cancelActions(ArrayList>> toCancel, Supplier exceptionSupplier) {
threadPool.executor(ThreadPool.Names.GENERIC)
- .execute(
- () -> toCancel.stream()
- .flatMap(Collection::stream)
- .forEach(action -> action.cancel(new IndexShardClosedException(shardId, message)))
- );
+ .execute(() -> toCancel.stream().flatMap(Collection::stream).forEach(action -> action.cancel(exceptionSupplier.get())));
}
}
diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java
index 39fb89bc48568..7fc810808f560 100644
--- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java
+++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java
@@ -52,6 +52,7 @@
import org.opensearch.index.IndexingPressureService;
import org.opensearch.index.engine.Engine;
import org.opensearch.index.mapper.MapperParsingException;
+import org.opensearch.index.shard.PrimaryShardClosedException;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.ShardId;
import org.opensearch.index.translog.Translog;
@@ -514,15 +515,20 @@ public void failShardIfNeeded(
if (TransportActions.isShardNotAvailableException(exception) == false) {
logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception);
}
- shardStateAction.remoteShardFailed(
- replica.shardId(),
- replica.allocationId().getId(),
- primaryTerm,
- true,
- message,
- exception,
- listener
- );
+ // If a write action fails due to the closure of the primary shard
+ // then the replicas should not be marked as failed since they are
+ // still up-to-date with the (now closed) primary shard
+ if (exception instanceof PrimaryShardClosedException == false) {
+ shardStateAction.remoteShardFailed(
+ replica.shardId(),
+ replica.allocationId().getId(),
+ primaryTerm,
+ true,
+ message,
+ exception,
+ listener
+ );
+ }
}
@Override
diff --git a/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java
new file mode 100644
index 0000000000000..d1b2bf9079289
--- /dev/null
+++ b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.index.shard;
+
+import java.io.IOException;
+
+import org.opensearch.common.io.stream.StreamInput;
+
+/**
+ * Exception to indicate failures are caused due to the closure of the primary
+ * shard.
+ *
+ * @opensearch.internal
+ */
+public class PrimaryShardClosedException extends IndexShardClosedException {
+ public PrimaryShardClosedException(ShardId shardId) {
+ super(shardId, "Primary closed");
+ }
+
+ public PrimaryShardClosedException(StreamInput in) throws IOException {
+ super(in);
+ }
+}
diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java
index 5a93d7c0bd86e..26b0ce7e9e20c 100644
--- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java
+++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java
@@ -80,6 +80,7 @@
import org.opensearch.index.seqno.RetentionLeaseNotFoundException;
import org.opensearch.index.shard.IllegalIndexShardStateException;
import org.opensearch.index.shard.IndexShardState;
+import org.opensearch.index.shard.PrimaryShardClosedException;
import org.opensearch.index.shard.ShardId;
import org.opensearch.index.shard.ShardNotInPrimaryModeException;
import org.opensearch.indices.IndexTemplateMissingException;
@@ -858,6 +859,7 @@ public void testIds() {
ids.put(159, NodeHealthCheckFailureException.class);
ids.put(160, NoSeedNodeLeftException.class);
ids.put(161, ReplicationFailedException.class);
+ ids.put(162, PrimaryShardClosedException.class);
Map, Integer> reverse = new HashMap<>();
for (Map.Entry> entry : ids.entrySet()) {
diff --git a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java
index ec0cefed842cd..66d3b843529ab 100644
--- a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java
+++ b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java
@@ -38,6 +38,7 @@
import org.opensearch.common.UUIDs;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.index.shard.IndexShardClosedException;
+import org.opensearch.index.shard.PrimaryShardClosedException;
import org.opensearch.index.shard.ShardId;
import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.threadpool.TestThreadPool;
@@ -102,7 +103,7 @@ public void testAllocationIdActionWillBeCancelledOnClose() {
pendingReplication.addPendingAction(allocationId, action);
action.run();
pendingReplication.close();
- expectThrows(IndexShardClosedException.class, future::actionGet);
+ expectThrows(PrimaryShardClosedException.class, future::actionGet);
}
private class TestAction extends RetryableAction {
diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java
index 4da32a890fd0e..137aca4966936 100644
--- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java
+++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java
@@ -32,6 +32,7 @@
package org.opensearch.action.support.replication;
+import org.hamcrest.MatcherAssert;
import org.opensearch.OpenSearchException;
import org.opensearch.action.ActionListener;
import org.opensearch.action.support.ActionFilters;
@@ -57,6 +58,7 @@
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexingPressureService;
import org.opensearch.index.shard.IndexShard;
+import org.opensearch.index.shard.PrimaryShardClosedException;
import org.opensearch.index.shard.ShardId;
import org.opensearch.index.shard.ShardNotFoundException;
import org.opensearch.index.translog.Translog;
@@ -91,6 +93,7 @@
import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
+import static org.hamcrest.Matchers.emptyArray;
import static org.opensearch.test.ClusterServiceUtils.createClusterService;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.equalTo;
@@ -395,6 +398,48 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException {
}
}
+ public void testPrimaryClosedDoesNotFailShard() {
+ final CapturingTransport transport = new CapturingTransport();
+ final TransportService transportService = transport.createTransportService(
+ clusterService.getSettings(),
+ threadPool,
+ TransportService.NOOP_TRANSPORT_INTERCEPTOR,
+ x -> clusterService.localNode(),
+ null,
+ Collections.emptySet()
+ );
+ transportService.start();
+ transportService.acceptIncomingRequests();
+ final ShardStateAction shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool);
+ final TestAction action = new TestAction(
+ Settings.EMPTY,
+ "internal:testAction",
+ transportService,
+ clusterService,
+ shardStateAction,
+ threadPool
+ );
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, "_na_", 0);
+ final ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary(index, true, 1, 0);
+ ClusterServiceUtils.setState(clusterService, state);
+ final long primaryTerm = state.metadata().index(index).primaryTerm(0);
+ final ShardRouting shardRouting = state.routingTable().shardRoutingTable(shardId).replicaShards().get(0);
+
+ // Assert that failShardIfNeeded is a no-op for the PrimaryShardClosedException failure
+ final AtomicInteger callbackCount = new AtomicInteger(0);
+ action.newReplicasProxy()
+ .failShardIfNeeded(
+ shardRouting,
+ primaryTerm,
+ "test",
+ new PrimaryShardClosedException(shardId),
+ ActionListener.wrap(callbackCount::incrementAndGet)
+ );
+ MatcherAssert.assertThat(transport.getCapturedRequestsAndClear(), emptyArray());
+ MatcherAssert.assertThat(callbackCount.get(), equalTo(0));
+ }
+
private class TestAction extends TransportWriteAction {
private final boolean withDocumentFailureOnPrimary;
From c62cecb048bafe8b79709660956fe4cba5548872 Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Fri, 26 Aug 2022 17:46:01 -0400
Subject: [PATCH 013/187] Some dependency updates (#4308)
Signed-off-by: Andriy Redko
Signed-off-by: Andriy Redko
---
CHANGELOG.md | 1 +
buildSrc/version.properties | 10 +++++-----
client/rest/licenses/commons-codec-1.13.jar.sha1 | 1 -
client/rest/licenses/commons-codec-1.15.jar.sha1 | 1 +
client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 | 1 -
client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 | 1 +
client/rest/licenses/httpcore-4.4.12.jar.sha1 | 1 -
client/rest/licenses/httpcore-4.4.15.jar.sha1 | 1 +
client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 | 1 -
client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 | 1 +
client/sniffer/licenses/commons-codec-1.13.jar.sha1 | 1 -
client/sniffer/licenses/commons-codec-1.15.jar.sha1 | 1 +
client/sniffer/licenses/httpcore-4.4.12.jar.sha1 | 1 -
client/sniffer/licenses/httpcore-4.4.15.jar.sha1 | 1 +
.../licenses/commons-codec-1.13.jar.sha1 | 1 -
.../licenses/commons-codec-1.15.jar.sha1 | 1 +
.../licenses/commons-codec-1.13.jar.sha1 | 1 -
.../licenses/commons-codec-1.15.jar.sha1 | 1 +
.../licenses/httpcore-4.4.12.jar.sha1 | 1 -
.../licenses/httpcore-4.4.15.jar.sha1 | 1 +
.../discovery-ec2/licenses/commons-codec-1.13.jar.sha1 | 1 -
.../discovery-ec2/licenses/commons-codec-1.15.jar.sha1 | 1 +
.../discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 | 1 -
.../discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 | 1 +
.../discovery-gce/licenses/commons-codec-1.13.jar.sha1 | 1 -
.../discovery-gce/licenses/commons-codec-1.15.jar.sha1 | 1 +
.../discovery-gce/licenses/httpcore-4.4.12.jar.sha1 | 1 -
.../discovery-gce/licenses/httpcore-4.4.15.jar.sha1 | 1 +
.../licenses/commons-codec-1.13.jar.sha1 | 1 -
.../licenses/commons-codec-1.15.jar.sha1 | 1 +
.../licenses/slf4j-api-1.6.2.jar.sha1 | 1 -
.../licenses/slf4j-api-1.7.36.jar.sha1 | 1 +
.../repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 | 1 -
.../licenses/slf4j-api-1.7.36.jar.sha1 | 1 +
.../licenses/commons-codec-1.13.jar.sha1 | 1 -
.../licenses/commons-codec-1.15.jar.sha1 | 1 +
.../licenses/commons-codec-1.13.jar.sha1 | 1 -
.../licenses/commons-codec-1.15.jar.sha1 | 1 +
.../repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 | 1 -
.../repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 | 1 +
.../repository-s3/licenses/commons-codec-1.13.jar.sha1 | 1 -
.../repository-s3/licenses/commons-codec-1.15.jar.sha1 | 1 +
.../repository-s3/licenses/httpcore-4.4.12.jar.sha1 | 1 -
.../repository-s3/licenses/httpcore-4.4.15.jar.sha1 | 1 +
44 files changed, 27 insertions(+), 26 deletions(-)
delete mode 100644 client/rest/licenses/commons-codec-1.13.jar.sha1
create mode 100644 client/rest/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 client/rest/licenses/httpasyncclient-4.1.4.jar.sha1
create mode 100644 client/rest/licenses/httpasyncclient-4.1.5.jar.sha1
delete mode 100644 client/rest/licenses/httpcore-4.4.12.jar.sha1
create mode 100644 client/rest/licenses/httpcore-4.4.15.jar.sha1
delete mode 100644 client/rest/licenses/httpcore-nio-4.4.12.jar.sha1
create mode 100644 client/rest/licenses/httpcore-nio-4.4.15.jar.sha1
delete mode 100644 client/sniffer/licenses/commons-codec-1.13.jar.sha1
create mode 100644 client/sniffer/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 client/sniffer/licenses/httpcore-4.4.12.jar.sha1
create mode 100644 client/sniffer/licenses/httpcore-4.4.15.jar.sha1
delete mode 100644 plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1
create mode 100644 plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1
delete mode 100644 plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1
create mode 100644 plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1
delete mode 100644 plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1
create mode 100644 plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1
delete mode 100644 plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1
create mode 100644 plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1
delete mode 100644 plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1
create mode 100644 plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1
delete mode 100644 plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1
create mode 100644 plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1
delete mode 100644 plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1
create mode 100644 plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1
delete mode 100644 plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1
create mode 100644 plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e988435a688da..26ff011609635 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085))
### Changed
+ - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308))
### Deprecated
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index 4af1acfed0ab2..876910d5351d0 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -15,7 +15,7 @@ snakeyaml = 1.26
icu4j = 70.1
supercsv = 2.4.0
log4j = 2.17.1
-slf4j = 1.6.2
+slf4j = 1.7.36
asm = 9.3
# when updating the JNA version, also update the version in buildSrc/build.gradle
@@ -26,10 +26,10 @@ joda = 2.10.13
# client dependencies
httpclient = 4.5.13
-httpcore = 4.4.12
-httpasyncclient = 4.1.4
+httpcore = 4.4.15
+httpasyncclient = 4.1.5
commonslogging = 1.2
-commonscodec = 1.13
+commonscodec = 1.15
# plugin dependencies
aws = 1.12.270
@@ -42,7 +42,7 @@ bouncycastle=1.70
randomizedrunner = 2.7.1
junit = 4.13.2
hamcrest = 2.1
-mockito = 4.6.1
+mockito = 4.7.0
objenesis = 3.2
bytebuddy = 1.12.12
diff --git a/client/rest/licenses/commons-codec-1.13.jar.sha1 b/client/rest/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/client/rest/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/client/rest/licenses/commons-codec-1.15.jar.sha1 b/client/rest/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/client/rest/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1
deleted file mode 100644
index 8360ab45c7ab3..0000000000000
--- a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f3a3240681faae3fa46b573a4c7e50cec9db0d86
\ No newline at end of file
diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1
new file mode 100644
index 0000000000000..366a9e31069a6
--- /dev/null
+++ b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1
@@ -0,0 +1 @@
+cd18227f1eb8e9a263286c1d7362ceb24f6f9b32
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-4.4.12.jar.sha1
deleted file mode 100644
index 3c046171b30da..0000000000000
--- a/client/rest/licenses/httpcore-4.4.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21ebaf6d532bc350ba95bd81938fa5f0e511c132
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1
new file mode 100644
index 0000000000000..42a03b5d7a376
--- /dev/null
+++ b/client/rest/licenses/httpcore-4.4.15.jar.sha1
@@ -0,0 +1 @@
+7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1
deleted file mode 100644
index 4de932dc5aca0..0000000000000
--- a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-84cd29eca842f31db02987cfedea245af020198b
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1
new file mode 100644
index 0000000000000..251b35ab6a1a5
--- /dev/null
+++ b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1
@@ -0,0 +1 @@
+85d2b6825d42db909a1474f0ffbd6328429b7a32
\ No newline at end of file
diff --git a/client/sniffer/licenses/commons-codec-1.13.jar.sha1 b/client/sniffer/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/client/sniffer/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 b/client/sniffer/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/client/sniffer/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.12.jar.sha1
deleted file mode 100644
index 3c046171b30da..0000000000000
--- a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21ebaf6d532bc350ba95bd81938fa5f0e511c132
\ No newline at end of file
diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1
new file mode 100644
index 0000000000000..42a03b5d7a376
--- /dev/null
+++ b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1
@@ -0,0 +1 @@
+7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1
deleted file mode 100644
index 3c046171b30da..0000000000000
--- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21ebaf6d532bc350ba95bd81938fa5f0e511c132
\ No newline at end of file
diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1
new file mode 100644
index 0000000000000..42a03b5d7a376
--- /dev/null
+++ b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1
@@ -0,0 +1 @@
+7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d
\ No newline at end of file
diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1
deleted file mode 100644
index 3c046171b30da..0000000000000
--- a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21ebaf6d532bc350ba95bd81938fa5f0e511c132
\ No newline at end of file
diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1
new file mode 100644
index 0000000000000..42a03b5d7a376
--- /dev/null
+++ b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1
@@ -0,0 +1 @@
+7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d
\ No newline at end of file
diff --git a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1
deleted file mode 100644
index 3c046171b30da..0000000000000
--- a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21ebaf6d532bc350ba95bd81938fa5f0e511c132
\ No newline at end of file
diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1
new file mode 100644
index 0000000000000..42a03b5d7a376
--- /dev/null
+++ b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1
@@ -0,0 +1 @@
+7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d
\ No newline at end of file
diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1
deleted file mode 100644
index a2f93ea55802b..0000000000000
--- a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8619e95939167fb37245b5670135e4feb0ec7d50
\ No newline at end of file
diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1
new file mode 100644
index 0000000000000..77b9917528382
--- /dev/null
+++ b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1
@@ -0,0 +1 @@
+6c62681a2f655b49963a5983b8b0950a6120ae14
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1
deleted file mode 100644
index a2f93ea55802b..0000000000000
--- a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8619e95939167fb37245b5670135e4feb0ec7d50
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1
new file mode 100644
index 0000000000000..77b9917528382
--- /dev/null
+++ b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1
@@ -0,0 +1 @@
+6c62681a2f655b49963a5983b8b0950a6120ae14
\ No newline at end of file
diff --git a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1
deleted file mode 100644
index a2f93ea55802b..0000000000000
--- a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8619e95939167fb37245b5670135e4feb0ec7d50
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1
new file mode 100644
index 0000000000000..77b9917528382
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1
@@ -0,0 +1 @@
+6c62681a2f655b49963a5983b8b0950a6120ae14
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1
deleted file mode 100644
index 66b72c414d63a..0000000000000
--- a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3f18e1aa31031d89db6f01ba05d501258ce69d2c
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1
new file mode 100644
index 0000000000000..62d99837b87e1
--- /dev/null
+++ b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1
@@ -0,0 +1 @@
+49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1
deleted file mode 100644
index 3c046171b30da..0000000000000
--- a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21ebaf6d532bc350ba95bd81938fa5f0e511c132
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1
new file mode 100644
index 0000000000000..42a03b5d7a376
--- /dev/null
+++ b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1
@@ -0,0 +1 @@
+7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d
\ No newline at end of file
From 65f966ed71ff9bc0a53490ee801943869c0f536d Mon Sep 17 00:00:00 2001
From: Tianli Feng
Date: Fri, 26 Aug 2022 15:42:31 -0700
Subject: [PATCH 014/187] Restore using the class ClusterInfoRequest and
ClusterInfoRequestBuilder from package
'org.opensearch.action.support.master.info' for subclasses (#4307)
* Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses
Signed-off-by: Tianli Feng
* Add changelog
Signed-off-by: Tianli Feng
Signed-off-by: Tianli Feng
Co-authored-by: Andrew Ross
---
CHANGELOG.md | 1 +
.../admin/indices/get/GetIndexRequest.java | 2 +-
.../indices/get/GetIndexRequestBuilder.java | 2 +-
.../mapping/get/GetMappingsRequest.java | 2 +-
.../get/GetMappingsRequestBuilder.java | 2 +-
.../indices/get/GetIndexRequestTests.java | 21 +++++++++++++++++++
6 files changed, 26 insertions(+), 4 deletions(-)
create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 26ff011609635..8132c1281e412 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
### Fixed
- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289))
- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296))
+- Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307))
- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133))
### Security
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java
index ee0b204c77aa3..9a7fae9f84a98 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java
@@ -33,7 +33,7 @@
package org.opensearch.action.admin.indices.get;
import org.opensearch.action.ActionRequestValidationException;
-import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest;
+import org.opensearch.action.support.master.info.ClusterInfoRequest;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.util.ArrayUtils;
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java
index ed106c44ea36a..3019191e5570e 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java
@@ -32,7 +32,7 @@
package org.opensearch.action.admin.indices.get;
-import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder;
+import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder;
import org.opensearch.client.OpenSearchClient;
/**
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java
index 1fd9323edd2f8..2c9bec8398b66 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java
@@ -33,7 +33,7 @@
package org.opensearch.action.admin.indices.mapping.get;
import org.opensearch.action.ActionRequestValidationException;
-import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest;
+import org.opensearch.action.support.master.info.ClusterInfoRequest;
import org.opensearch.common.io.stream.StreamInput;
import java.io.IOException;
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
index 0a6d7cac79133..85bf8c2ffd9c6 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
@@ -32,7 +32,7 @@
package org.opensearch.action.admin.indices.mapping.get;
-import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder;
+import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder;
import org.opensearch.client.OpenSearchClient;
/**
diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java
new file mode 100644
index 0000000000000..f0d3db71c27b7
--- /dev/null
+++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java
@@ -0,0 +1,21 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.action.admin.indices.get;
+
+import org.opensearch.action.support.master.info.ClusterInfoRequest;
+import org.opensearch.test.OpenSearchTestCase;
+
+import static org.hamcrest.Matchers.is;
+
+public class GetIndexRequestTests extends OpenSearchTestCase {
+ public void testGetIndexRequestExtendsClusterInfoRequestOfDeprecatedClassPath() {
+ GetIndexRequest getIndexRequest = new GetIndexRequest().indices("test");
+ assertThat(getIndexRequest instanceof ClusterInfoRequest, is(true));
+ }
+}
From 7ea6e8865fa007471c187fe7b6cd7007059d6c69 Mon Sep 17 00:00:00 2001
From: Alex Burck
Date: Mon, 29 Aug 2022 10:05:48 -0500
Subject: [PATCH 015/187] [BUG] Create logs directory before running OpenSearch
on Windows (#4305)
* [BUG] Create logs directory before running OpenSearch on Windows
Signed-off-by: Alex Burck
* update changlog pr link
Signed-off-by: Alex Burck
Signed-off-by: Alex Burck
---
CHANGELOG.md | 1 +
distribution/src/bin/opensearch-service.bat | 4 ++++
distribution/src/bin/opensearch.bat | 6 ++++++
3 files changed, 11 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8132c1281e412..360b47d05ff8f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
### Fixed
- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289))
- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296))
+- `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305))
- Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307))
- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133))
diff --git a/distribution/src/bin/opensearch-service.bat b/distribution/src/bin/opensearch-service.bat
index 8b91d806ef64f..a11dc8316e8b1 100644
--- a/distribution/src/bin/opensearch-service.bat
+++ b/distribution/src/bin/opensearch-service.bat
@@ -24,6 +24,10 @@ exit /B 1
set OPENSEARCH_VERSION=${project.version}
if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs
+rem The logs directory must exist for the service to start.
+if not exist "%SERVICE_LOG_DIR%" (
+ mkdir "%SERVICE_LOG_DIR%"
+)
if "x%1x" == "xx" goto displayUsage
set SERVICE_CMD=%1
diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat
index 49a12aa5c968d..dda15124e1654 100644
--- a/distribution/src/bin/opensearch.bat
+++ b/distribution/src/bin/opensearch.bat
@@ -56,6 +56,12 @@ IF ERRORLEVEL 1 (
EXIT /B %ERRORLEVEL%
)
+if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs
+rem The logs directory must exist for the service to start.
+if not exist "%SERVICE_LOG_DIR%" (
+ mkdir "%SERVICE_LOG_DIR%"
+)
+
SET KEYSTORE_PASSWORD=
IF "%checkpassword%"=="Y" (
CALL "%~dp0opensearch-keystore.bat" has-passwd --silent
From cd961f39bf57ae92b4486451ce2841b9682c2582 Mon Sep 17 00:00:00 2001
From: Sachin Kale
Date: Mon, 29 Aug 2022 22:47:52 +0530
Subject: [PATCH 016/187] Use RemoteSegmentStoreDirectory instead of
RemoteDirectory (#4240)
* Use RemoteSegmentStoreDirectory instead of RemoteDirectory
Signed-off-by: Sachin Kale
---
CHANGELOG.md | 3 +-
.../org/opensearch/index/IndexModule.java | 3 +-
.../opensearch/index/shard/IndexShard.java | 5 +-
.../shard/RemoteStoreRefreshListener.java | 176 +++++++++---
.../opensearch/index/shard/StoreRecovery.java | 7 +-
.../index/store/RemoteIndexInput.java | 35 ++-
.../store/RemoteSegmentStoreDirectory.java | 75 ++++-
...> RemoteSegmentStoreDirectoryFactory.java} | 22 +-
.../opensearch/indices/IndicesService.java | 5 +-
.../main/java/org/opensearch/node/Node.java | 6 +-
.../opensearch/index/IndexModuleTests.java | 4 +-
.../index/shard/IndexShardTests.java | 3 +-
.../RemoteStoreRefreshListenerTests.java | 259 ++++++++++++------
.../index/store/RemoteIndexInputTests.java | 31 ++-
...oteSegmentStoreDirectoryFactoryTests.java} | 28 +-
.../RemoteSegmentStoreDirectoryTests.java | 160 +++++++++--
.../snapshots/SnapshotResiliencyTests.java | 4 +-
.../index/shard/IndexShardTestCase.java | 19 +-
18 files changed, 628 insertions(+), 217 deletions(-)
rename server/src/main/java/org/opensearch/index/store/{RemoteDirectoryFactory.java => RemoteSegmentStoreDirectoryFactory.java} (58%)
rename server/src/test/java/org/opensearch/index/store/{RemoteDirectoryFactoryTests.java => RemoteSegmentStoreDirectoryFactoryTests.java} (70%)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 360b47d05ff8f..f11f407434e6b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,7 +6,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085))
### Changed
- - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308))
+- Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308))
+- Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240))
### Deprecated
diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java
index f8604caeab414..e52a2ba39ed52 100644
--- a/server/src/main/java/org/opensearch/index/IndexModule.java
+++ b/server/src/main/java/org/opensearch/index/IndexModule.java
@@ -70,7 +70,6 @@
import org.opensearch.index.shard.SearchOperationListener;
import org.opensearch.index.similarity.SimilarityService;
import org.opensearch.index.store.FsDirectoryFactory;
-import org.opensearch.index.store.RemoteDirectoryFactory;
import org.opensearch.indices.IndicesQueryCache;
import org.opensearch.indices.breaker.CircuitBreakerService;
import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache;
@@ -487,7 +486,7 @@ public IndexService newIndexService(
NamedWriteableRegistry namedWriteableRegistry,
BooleanSupplier idFieldDataEnabled,
ValuesSourceRegistry valuesSourceRegistry,
- RemoteDirectoryFactory remoteDirectoryFactory
+ IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory
) throws IOException {
final IndexEventListener eventListener = freeze();
Function> readerWrapperFactory = indexReaderWrapper
diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
index 67a8e691fda0d..670af1f1c6fd9 100644
--- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
@@ -48,8 +48,6 @@
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.util.SetOnce;
import org.apache.lucene.util.ThreadInterruptedException;
import org.opensearch.Assertions;
@@ -3228,8 +3226,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro
final List internalRefreshListener = new ArrayList<>();
internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric));
if (isRemoteStoreEnabled()) {
- Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate();
- internalRefreshListener.add(new RemoteStoreRefreshListener(store.directory(), remoteDirectory));
+ internalRefreshListener.add(new RemoteStoreRefreshListener(this));
}
if (this.checkpointPublisher != null && indexSettings.isSegRepEnabled() && shardRouting.primary()) {
internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher));
diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java
index 4b549ec485c0e..0d32e8d56e4d2 100644
--- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java
+++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java
@@ -11,32 +11,54 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.search.ReferenceManager;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.opensearch.common.concurrent.GatedCloseable;
+import org.opensearch.index.engine.EngineException;
+import org.opensearch.index.store.RemoteSegmentStoreDirectory;
import java.io.IOException;
-import java.nio.file.NoSuchFileException;
-import java.util.Arrays;
-import java.util.HashSet;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
/**
* RefreshListener implementation to upload newly created segment files to the remote store
+ *
+ * @opensearch.internal
*/
-public class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener {
+public final class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener {
+ // Visible for testing
+ static final Set EXCLUDE_FILES = Set.of("write.lock");
+ // Visible for testing
+ static final int LAST_N_METADATA_FILES_TO_KEEP = 10;
+ private final IndexShard indexShard;
private final Directory storeDirectory;
- private final Directory remoteDirectory;
- // ToDo: This can be a map with metadata of the uploaded file as value of the map (GitHub #3398)
- private final Set filesUploadedToRemoteStore;
+ private final RemoteSegmentStoreDirectory remoteDirectory;
+ private final Map localSegmentChecksumMap;
+ private long primaryTerm;
private static final Logger logger = LogManager.getLogger(RemoteStoreRefreshListener.class);
- public RemoteStoreRefreshListener(Directory storeDirectory, Directory remoteDirectory) throws IOException {
- this.storeDirectory = storeDirectory;
- this.remoteDirectory = remoteDirectory;
- // ToDo: Handle failures in reading list of files (GitHub #3397)
- this.filesUploadedToRemoteStore = new HashSet<>(Arrays.asList(remoteDirectory.listAll()));
+ public RemoteStoreRefreshListener(IndexShard indexShard) {
+ this.indexShard = indexShard;
+ this.storeDirectory = indexShard.store().directory();
+ this.remoteDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory())
+ .getDelegate()).getDelegate();
+ this.primaryTerm = indexShard.getOperationPrimaryTerm();
+ localSegmentChecksumMap = new HashMap<>();
}
@Override
@@ -46,42 +68,112 @@ public void beforeRefresh() throws IOException {
/**
* Upload new segment files created as part of the last refresh to the remote segment store.
- * The method also deletes segment files from remote store which are not part of local filesystem.
+ * This method also uploads remote_segments_metadata file which contains metadata of each segment file uploaded.
* @param didRefresh true if the refresh opened a new reference
- * @throws IOException in case of I/O error in reading list of local files
*/
@Override
- public void afterRefresh(boolean didRefresh) throws IOException {
- if (didRefresh) {
- Set localFiles = Set.of(storeDirectory.listAll());
- localFiles.stream().filter(file -> !filesUploadedToRemoteStore.contains(file)).forEach(file -> {
- try {
- remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT);
- filesUploadedToRemoteStore.add(file);
- } catch (NoSuchFileException e) {
- logger.info(
- () -> new ParameterizedMessage("The file {} does not exist anymore. It can happen in case of temp files", file),
- e
- );
- } catch (IOException e) {
- // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397)
- logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e);
- }
- });
+ public void afterRefresh(boolean didRefresh) {
+ synchronized (this) {
+ try {
+ if (indexShard.shardRouting.primary()) {
+ if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) {
+ this.primaryTerm = indexShard.getOperationPrimaryTerm();
+ this.remoteDirectory.init();
+ }
+ try {
+ String lastCommittedLocalSegmentFileName = SegmentInfos.getLastCommitSegmentsFileName(storeDirectory);
+ if (!remoteDirectory.containsFile(
+ lastCommittedLocalSegmentFileName,
+ getChecksumOfLocalFile(lastCommittedLocalSegmentFileName)
+ )) {
+ deleteStaleCommits();
+ }
+ try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) {
+ SegmentInfos segmentInfos = segmentInfosGatedCloseable.get();
+ Collection refreshedLocalFiles = segmentInfos.files(true);
+
+ List segmentInfosFiles = refreshedLocalFiles.stream()
+ .filter(file -> file.startsWith(IndexFileNames.SEGMENTS))
+ .collect(Collectors.toList());
+ Optional latestSegmentInfos = segmentInfosFiles.stream()
+ .max(Comparator.comparingLong(IndexFileNames::parseGeneration));
- Set remoteFilesToBeDeleted = new HashSet<>();
- // ToDo: Instead of deleting files in sync, mark them and delete in async/periodic flow (GitHub #3142)
- filesUploadedToRemoteStore.stream().filter(file -> !localFiles.contains(file)).forEach(file -> {
- try {
- remoteDirectory.deleteFile(file);
- remoteFilesToBeDeleted.add(file);
- } catch (IOException e) {
- // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397)
- logger.warn(() -> new ParameterizedMessage("Exception while deleting file {} from the remote segment store", file), e);
+ if (latestSegmentInfos.isPresent()) {
+ refreshedLocalFiles.addAll(SegmentInfos.readCommit(storeDirectory, latestSegmentInfos.get()).files(true));
+ segmentInfosFiles.stream()
+ .filter(file -> !file.equals(latestSegmentInfos.get()))
+ .forEach(refreshedLocalFiles::remove);
+
+ boolean uploadStatus = uploadNewSegments(refreshedLocalFiles);
+ if (uploadStatus) {
+ remoteDirectory.uploadMetadata(
+ refreshedLocalFiles,
+ storeDirectory,
+ indexShard.getOperationPrimaryTerm(),
+ segmentInfos.getGeneration()
+ );
+ localSegmentChecksumMap.keySet()
+ .stream()
+ .filter(file -> !refreshedLocalFiles.contains(file))
+ .collect(Collectors.toSet())
+ .forEach(localSegmentChecksumMap::remove);
+ }
+ }
+ } catch (EngineException e) {
+ logger.warn("Exception while reading SegmentInfosSnapshot", e);
+ }
+ } catch (IOException e) {
+ // We don't want to fail refresh if upload of new segments fails. The missed segments will be re-tried
+ // in the next refresh. This should not affect durability of the indexed data after remote trans-log integration.
+ logger.warn("Exception while uploading new segments to the remote segment store", e);
+ }
}
- });
+ } catch (Throwable t) {
+ logger.error("Exception in RemoteStoreRefreshListener.afterRefresh()", t);
+ }
+ }
+ }
+
+ // Visible for testing
+ boolean uploadNewSegments(Collection localFiles) throws IOException {
+ AtomicBoolean uploadSuccess = new AtomicBoolean(true);
+ localFiles.stream().filter(file -> !EXCLUDE_FILES.contains(file)).filter(file -> {
+ try {
+ return !remoteDirectory.containsFile(file, getChecksumOfLocalFile(file));
+ } catch (IOException e) {
+ logger.info(
+ "Exception while reading checksum of local segment file: {}, ignoring the exception and re-uploading the file",
+ file
+ );
+ return true;
+ }
+ }).forEach(file -> {
+ try {
+ remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT);
+ } catch (IOException e) {
+ uploadSuccess.set(false);
+ // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397)
+ logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e);
+ }
+ });
+ return uploadSuccess.get();
+ }
+
+ private String getChecksumOfLocalFile(String file) throws IOException {
+ if (!localSegmentChecksumMap.containsKey(file)) {
+ try (IndexInput indexInput = storeDirectory.openInput(file, IOContext.DEFAULT)) {
+ String checksum = Long.toString(CodecUtil.retrieveChecksum(indexInput));
+ localSegmentChecksumMap.put(file, checksum);
+ }
+ }
+ return localSegmentChecksumMap.get(file);
+ }
- remoteFilesToBeDeleted.forEach(filesUploadedToRemoteStore::remove);
+ private void deleteStaleCommits() {
+ try {
+ remoteDirectory.deleteStaleSegments(LAST_N_METADATA_FILES_TO_KEEP);
+ } catch (IOException e) {
+ logger.info("Exception while deleting stale commits from remote segment store, will retry delete post next commit", e);
}
}
}
diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java
index 1190e8e6ab3d2..06916c4cc87fe 100644
--- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java
+++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java
@@ -449,7 +449,12 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco
}
indexShard.preRecovery();
indexShard.prepareForIndexRecovery();
- final Directory remoteDirectory = remoteStore.directory();
+ assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory";
+ FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory();
+ assert remoteStoreDirectory.getDelegate() instanceof FilterDirectory
+ : "Store.directory is not enclosing an instance of FilterDirectory";
+ FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate();
+ final Directory remoteDirectory = byteSizeCachingStoreDirectory.getDelegate();
final Store store = indexShard.store();
final Directory storeDirectory = store.directory();
store.incRef();
diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java
index 8f8d5dd5418ae..2c809563ca961 100644
--- a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java
+++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java
@@ -27,27 +27,37 @@ public class RemoteIndexInput extends IndexInput {
private final InputStream inputStream;
private final long size;
+ private long filePointer;
public RemoteIndexInput(String name, InputStream inputStream, long size) {
super(name);
this.inputStream = inputStream;
this.size = size;
+ this.filePointer = 0;
}
@Override
public byte readByte() throws IOException {
byte[] buffer = new byte[1];
- inputStream.read(buffer);
+ int numberOfBytesRead = inputStream.read(buffer);
+ if (numberOfBytesRead != -1) {
+ filePointer += numberOfBytesRead;
+ }
return buffer[0];
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
int bytesRead = inputStream.read(b, offset, len);
- while (bytesRead > 0 && bytesRead < len) {
- len -= bytesRead;
- offset += bytesRead;
- bytesRead = inputStream.read(b, offset, len);
+ if (bytesRead == len) {
+ filePointer += bytesRead;
+ } else {
+ while (bytesRead > 0 && bytesRead < len) {
+ filePointer += bytesRead;
+ len -= bytesRead;
+ offset += bytesRead;
+ bytesRead = inputStream.read(b, offset, len);
+ }
}
}
@@ -61,11 +71,6 @@ public long length() {
return size;
}
- @Override
- public void seek(long pos) throws IOException {
- inputStream.skip(pos);
- }
-
/**
* Guaranteed to throw an exception and leave the RemoteIndexInput unmodified.
* This method is not implemented as it is not used for the file transfer to/from the remote store.
@@ -73,10 +78,18 @@ public void seek(long pos) throws IOException {
* @throws UnsupportedOperationException always
*/
@Override
- public long getFilePointer() {
+ public void seek(long pos) throws IOException {
throw new UnsupportedOperationException();
}
+ /**
+ * Returns the current position in this file in terms of number of bytes read so far.
+ */
+ @Override
+ public long getFilePointer() {
+ return filePointer;
+ }
+
/**
* Guaranteed to throw an exception and leave the RemoteIndexInput unmodified.
* This method is not implemented as it is not used for the file transfer to/from the remote store.
diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java
index d7d6b29d08bfc..505ad6fafd550 100644
--- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java
+++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java
@@ -24,9 +24,13 @@
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
/**
@@ -132,8 +136,9 @@ private Map readMetadataFile(String metadataFil
/**
* Metadata of a segment that is uploaded to remote segment store.
*/
- static class UploadedSegmentMetadata {
- private static final String SEPARATOR = "::";
+ public static class UploadedSegmentMetadata {
+ // Visible for testing
+ static final String SEPARATOR = "::";
private final String originalFilename;
private final String uploadedFilename;
private final String checksum;
@@ -366,7 +371,69 @@ private String getLocalSegmentFilename(String remoteFilename) {
}
// Visible for testing
- Map getSegmentsUploadedToRemoteStore() {
- return this.segmentsUploadedToRemoteStore;
+ public Map getSegmentsUploadedToRemoteStore() {
+ return Collections.unmodifiableMap(this.segmentsUploadedToRemoteStore);
+ }
+
+ /**
+ * Delete stale segment and metadata files
+ * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store,
+ * we just need to read the latest metadata file. All the stale metadata files can be safely deleted.
+ * @param lastNMetadataFilesToKeep number of metadata files to keep
+ * @throws IOException in case of I/O error while reading from / writing to remote segment store
+ */
+ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException {
+ Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX);
+ List sortedMetadataFileList = metadataFiles.stream().sorted(METADATA_FILENAME_COMPARATOR).collect(Collectors.toList());
+ if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) {
+ logger.info(
+ "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}",
+ sortedMetadataFileList.size(),
+ lastNMetadataFilesToKeep
+ );
+ return;
+ }
+ List latestNMetadataFiles = sortedMetadataFileList.subList(
+ sortedMetadataFileList.size() - lastNMetadataFilesToKeep,
+ sortedMetadataFileList.size()
+ );
+ Map activeSegmentFilesMetadataMap = new HashMap<>();
+ Set activeSegmentRemoteFilenames = new HashSet<>();
+ for (String metadataFile : latestNMetadataFiles) {
+ Map segmentMetadataMap = readMetadataFile(metadataFile);
+ activeSegmentFilesMetadataMap.putAll(segmentMetadataMap);
+ activeSegmentRemoteFilenames.addAll(
+ segmentMetadataMap.values().stream().map(metadata -> metadata.uploadedFilename).collect(Collectors.toSet())
+ );
+ }
+ for (String metadataFile : sortedMetadataFileList.subList(0, sortedMetadataFileList.size() - lastNMetadataFilesToKeep)) {
+ Map staleSegmentFilesMetadataMap = readMetadataFile(metadataFile);
+ Set staleSegmentRemoteFilenames = staleSegmentFilesMetadataMap.values()
+ .stream()
+ .map(metadata -> metadata.uploadedFilename)
+ .collect(Collectors.toSet());
+ AtomicBoolean deletionSuccessful = new AtomicBoolean(true);
+ staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> {
+ try {
+ remoteDataDirectory.deleteFile(file);
+ if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) {
+ segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file));
+ }
+ } catch (NoSuchFileException e) {
+ logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile);
+ } catch (IOException e) {
+ deletionSuccessful.set(false);
+ logger.info(
+ "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried",
+ file,
+ metadataFile
+ );
+ }
+ });
+ if (deletionSuccessful.get()) {
+ logger.info("Deleting stale metadata file {} from remote segment store", metadataFile);
+ remoteMetadataDirectory.deleteFile(metadataFile);
+ }
+ }
}
}
diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java
similarity index 58%
rename from server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java
rename to server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java
index 62f398cdad207..e77eb52bd3891 100644
--- a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java
+++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java
@@ -27,11 +27,11 @@
*
* @opensearch.internal
*/
-public class RemoteDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory {
+public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory {
private final Supplier repositoriesService;
- public RemoteDirectoryFactory(Supplier repositoriesService) {
+ public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService) {
this.repositoriesService = repositoriesService;
}
@@ -39,13 +39,23 @@ public RemoteDirectoryFactory(Supplier repositoriesService)
public Directory newDirectory(String repositoryName, IndexSettings indexSettings, ShardPath path) throws IOException {
try (Repository repository = repositoriesService.get().repository(repositoryName)) {
assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository";
- BlobPath blobPath = new BlobPath();
- blobPath = blobPath.add(indexSettings.getIndex().getName()).add(String.valueOf(path.getShardId().getId()));
- BlobContainer blobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(blobPath);
- return new RemoteDirectory(blobContainer);
+ BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath();
+ commonBlobPath = commonBlobPath.add(indexSettings.getIndex().getUUID())
+ .add(String.valueOf(path.getShardId().getId()))
+ .add("segments");
+
+ RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data");
+ RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata");
+
+ return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory);
} catch (RepositoryMissingException e) {
throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e);
}
}
+ private RemoteDirectory createRemoteDirectory(Repository repository, BlobPath commonBlobPath, String extention) {
+ BlobPath extendedPath = commonBlobPath.add(extention);
+ BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath);
+ return new RemoteDirectory(dataBlobContainer);
+ }
}
diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java
index fdb609ba7bbff..6808803ee0988 100644
--- a/server/src/main/java/org/opensearch/indices/IndicesService.java
+++ b/server/src/main/java/org/opensearch/indices/IndicesService.java
@@ -132,7 +132,6 @@
import org.opensearch.index.shard.IndexingOperationListener;
import org.opensearch.index.shard.IndexingStats;
import org.opensearch.index.shard.ShardId;
-import org.opensearch.index.store.RemoteDirectoryFactory;
import org.opensearch.indices.breaker.CircuitBreakerService;
import org.opensearch.indices.cluster.IndicesClusterStateService;
import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache;
@@ -266,7 +265,7 @@ public class IndicesService extends AbstractLifecycleComponent
private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet();
private final boolean nodeWriteDanglingIndicesInfo;
private final ValuesSourceRegistry valuesSourceRegistry;
- private final RemoteDirectoryFactory remoteDirectoryFactory;
+ private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory;
@Override
protected void doStart() {
@@ -295,7 +294,7 @@ public IndicesService(
Map directoryFactories,
ValuesSourceRegistry valuesSourceRegistry,
Map recoveryStateFactories,
- RemoteDirectoryFactory remoteDirectoryFactory
+ IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory
) {
this.settings = settings;
this.threadPool = threadPool;
diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java
index d3f0912cab638..3f4eadc52fd2a 100644
--- a/server/src/main/java/org/opensearch/node/Node.java
+++ b/server/src/main/java/org/opensearch/node/Node.java
@@ -39,12 +39,12 @@
import org.opensearch.common.util.FeatureFlags;
import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance;
import org.opensearch.index.IndexingPressureService;
-import org.opensearch.index.store.RemoteDirectoryFactory;
import org.opensearch.indices.replication.SegmentReplicationSourceFactory;
import org.opensearch.indices.replication.SegmentReplicationTargetService;
import org.opensearch.indices.replication.SegmentReplicationSourceService;
import org.opensearch.tasks.TaskResourceTrackingService;
import org.opensearch.threadpool.RunnableTaskExecutionListener;
+import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory;
import org.opensearch.watcher.ResourceWatcherService;
import org.opensearch.Assertions;
import org.opensearch.Build;
@@ -629,7 +629,9 @@ protected Node(
rerouteServiceReference.set(rerouteService);
clusterService.setRerouteService(rerouteService);
- final RemoteDirectoryFactory remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceReference::get);
+ final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(
+ repositoriesServiceReference::get
+ );
final IndicesService indicesService = new IndicesService(
settings,
diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java
index 45d93a5a12847..6bfdd9ae16773 100644
--- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java
+++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java
@@ -89,7 +89,7 @@
import org.opensearch.index.similarity.NonNegativeScoresSimilarity;
import org.opensearch.index.similarity.SimilarityService;
import org.opensearch.index.store.FsDirectoryFactory;
-import org.opensearch.index.store.RemoteDirectoryFactory;
+import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory;
import org.opensearch.indices.IndicesModule;
import org.opensearch.indices.IndicesQueryCache;
import org.opensearch.indices.analysis.AnalysisModule;
@@ -234,7 +234,7 @@ private IndexService newIndexService(IndexModule module) throws IOException {
writableRegistry(),
() -> false,
null,
- new RemoteDirectoryFactory(() -> repositoriesService)
+ new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService)
);
}
diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
index 8c00ab97a46ea..662afa80f65fc 100644
--- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java
@@ -2689,8 +2689,9 @@ public void testRestoreShardFromRemoteStore() throws IOException {
storeDirectory.deleteFile(file);
}
+ assertEquals(0, storeDirectory.listAll().length);
+
Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) target.remoteStore().directory()).getDelegate()).getDelegate();
- ((BaseDirectoryWrapper) remoteDirectory).setCheckIndexOnClose(false);
// extra0 file is added as a part of https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html
// Safe to remove without impacting the test
diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java
index af92d821a9043..6b05d67836272 100644
--- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java
@@ -8,132 +8,209 @@
package org.opensearch.index.shard;
+import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.opensearch.test.OpenSearchTestCase;
+import org.apache.lucene.store.FilterDirectory;
+import org.apache.lucene.tests.store.BaseDirectoryWrapper;
+import org.junit.After;
+import org.opensearch.action.ActionListener;
+import org.opensearch.cluster.metadata.IndexMetadata;
+import org.opensearch.cluster.routing.IndexShardRoutingTable;
+import org.opensearch.cluster.routing.ShardRouting;
+import org.opensearch.common.concurrent.GatedCloseable;
+import org.opensearch.common.lease.Releasable;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.index.engine.InternalEngineFactory;
+import org.opensearch.index.store.RemoteSegmentStoreDirectory;
+import org.opensearch.index.store.Store;
+import org.opensearch.threadpool.ThreadPool;
import java.io.IOException;
-import java.nio.file.NoSuchFileException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.doThrow;
+public class RemoteStoreRefreshListenerTests extends IndexShardTestCase {
+ private IndexShard indexShard;
+ private RemoteStoreRefreshListener remoteStoreRefreshListener;
-public class RemoteStoreRefreshListenerTests extends OpenSearchTestCase {
- private Directory storeDirectory;
- private Directory remoteDirectory;
+ public void setup(boolean primary, int numberOfDocs) throws IOException {
+ indexShard = newStartedShard(
+ primary,
+ Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(),
+ new InternalEngineFactory()
+ );
- private RemoteStoreRefreshListener remoteStoreRefreshListener;
+ indexDocs(1, numberOfDocs);
+ indexShard.refresh("test");
- public void setup(String[] remoteFiles) throws IOException {
- storeDirectory = mock(Directory.class);
- remoteDirectory = mock(Directory.class);
- when(remoteDirectory.listAll()).thenReturn(remoteFiles);
- remoteStoreRefreshListener = new RemoteStoreRefreshListener(storeDirectory, remoteDirectory);
+ remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard);
}
- public void testAfterRefreshFalse() throws IOException {
- setup(new String[0]);
- remoteStoreRefreshListener.afterRefresh(false);
- verify(storeDirectory, times(0)).listAll();
+ private void indexDocs(int startDocId, int numberOfDocs) throws IOException {
+ for (int i = startDocId; i < startDocId + numberOfDocs; i++) {
+ indexDoc(indexShard, "_doc", Integer.toString(i));
+ }
}
- public void testAfterRefreshTrueNoLocalFiles() throws IOException {
- setup(new String[0]);
+ @After
+ public void tearDown() throws Exception {
+ Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) indexShard.store().directory()).getDelegate()).getDelegate();
+ ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false);
+ closeShards(indexShard);
+ super.tearDown();
+ }
- when(storeDirectory.listAll()).thenReturn(new String[0]);
+ public void testAfterRefresh() throws IOException {
+ setup(true, 3);
+ assertDocs(indexShard, "1", "2", "3");
- remoteStoreRefreshListener.afterRefresh(true);
- verify(storeDirectory).listAll();
- verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any());
- verify(remoteDirectory, times(0)).deleteFile(any());
- }
+ try (Store remoteStore = indexShard.remoteStore()) {
+ RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
+ (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate();
- public void testAfterRefreshOnlyUploadFiles() throws IOException {
- setup(new String[0]);
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
- String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" };
- when(storeDirectory.listAll()).thenReturn(localFiles);
+ // This is to check if reading data from remote segment store works as well.
+ remoteSegmentStoreDirectory.init();
- remoteStoreRefreshListener.afterRefresh(true);
- verify(storeDirectory).listAll();
- verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT);
- verify(remoteDirectory, times(0)).deleteFile(any());
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+ }
}
- public void testAfterRefreshOnlyUploadAndDelete() throws IOException {
- setup(new String[] { "0.si", "0.cfs" });
+ public void testAfterCommit() throws IOException {
+ setup(true, 3);
+ assertDocs(indexShard, "1", "2", "3");
+ flushShard(indexShard);
- String[] localFiles = new String[] { "segments_1", "1.si", "1.cfs", "1.cfe" };
- when(storeDirectory.listAll()).thenReturn(localFiles);
+ try (Store remoteStore = indexShard.remoteStore()) {
+ RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
+ (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate();
- remoteStoreRefreshListener.afterRefresh(true);
- verify(storeDirectory).listAll();
- verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "1.si", "1.si", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT);
- verify(remoteDirectory).deleteFile("0.si");
- verify(remoteDirectory).deleteFile("0.cfs");
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+
+ // This is to check if reading data from remote segment store works as well.
+ remoteSegmentStoreDirectory.init();
+
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+ }
}
- public void testAfterRefreshOnlyDelete() throws IOException {
- setup(new String[] { "0.si", "0.cfs" });
+ public void testRefreshAfterCommit() throws IOException {
+ setup(true, 3);
+ assertDocs(indexShard, "1", "2", "3");
+ flushShard(indexShard);
- String[] localFiles = new String[] { "0.si" };
- when(storeDirectory.listAll()).thenReturn(localFiles);
+ indexDocs(4, 4);
+ indexShard.refresh("test");
- remoteStoreRefreshListener.afterRefresh(true);
- verify(storeDirectory).listAll();
- verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any());
- verify(remoteDirectory).deleteFile("0.cfs");
- }
+ indexDocs(8, 4);
+ indexShard.refresh("test");
- public void testAfterRefreshTempLocalFile() throws IOException {
- setup(new String[0]);
+ try (Store remoteStore = indexShard.remoteStore()) {
+ RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
+ (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate();
- String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs.tmp" };
- when(storeDirectory.listAll()).thenReturn(localFiles);
- doThrow(new NoSuchFileException("0.cfs.tmp")).when(remoteDirectory)
- .copyFrom(storeDirectory, "0.cfs.tmp", "0.cfs.tmp", IOContext.DEFAULT);
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
- remoteStoreRefreshListener.afterRefresh(true);
- verify(storeDirectory).listAll();
- verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT);
- verify(remoteDirectory, times(0)).deleteFile(any());
+ // This is to check if reading data from remote segment store works as well.
+ remoteSegmentStoreDirectory.init();
+
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+ }
}
- public void testAfterRefreshConsecutive() throws IOException {
- setup(new String[0]);
+ public void testAfterMultipleCommits() throws IOException {
+ setup(true, 3);
+ assertDocs(indexShard, "1", "2", "3");
- String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" };
- when(storeDirectory.listAll()).thenReturn(localFiles);
- doThrow(new IOException("0.cfs")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfe", IOContext.DEFAULT);
- doThrow(new IOException("0.cfe")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT);
+ for (int i = 0; i < RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP + 3; i++) {
+ indexDocs(4 * (i + 1), 4);
+ flushShard(indexShard);
+ }
+ try (Store remoteStore = indexShard.remoteStore()) {
+ RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
+ (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate();
+
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+
+ // This is to check if reading data from remote segment store works as well.
+ remoteSegmentStoreDirectory.init();
+
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+ }
+ }
+
+ public void testReplica() throws IOException {
+ setup(false, 3);
remoteStoreRefreshListener.afterRefresh(true);
- verify(storeDirectory).listAll();
- verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT);
- verify(remoteDirectory, times(0)).deleteFile(any());
- String[] localFilesSecondRefresh = new String[] { "segments_1", "0.cfs", "1.cfs", "1.cfe" };
- when(storeDirectory.listAll()).thenReturn(localFilesSecondRefresh);
+ try (Store remoteStore = indexShard.remoteStore()) {
+ RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
+ (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate();
+
+ assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size());
+ }
+ }
+ public void testReplicaPromotion() throws IOException, InterruptedException {
+ setup(false, 3);
remoteStoreRefreshListener.afterRefresh(true);
- verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT);
- verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT);
- verify(remoteDirectory).deleteFile("0.si");
+ RemoteSegmentStoreDirectory remoteSegmentStoreDirectory =
+ (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate())
+ .getDelegate();
+
+ assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size());
+
+ final ShardRouting replicaRouting = indexShard.routingEntry();
+ promoteReplica(
+ indexShard,
+ Collections.singleton(replicaRouting.allocationId().getId()),
+ new IndexShardRoutingTable.Builder(replicaRouting.shardId()).addShard(replicaRouting).build()
+ );
+
+ // The following logic is referenced from IndexShardTests.testPrimaryFillsSeqNoGapsOnPromotion
+ // ToDo: Add wait logic as part of promoteReplica()
+ final CountDownLatch latch = new CountDownLatch(1);
+ indexShard.acquirePrimaryOperationPermit(new ActionListener<>() {
+ @Override
+ public void onResponse(Releasable releasable) {
+ releasable.close();
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ throw new AssertionError(e);
+ }
+ }, ThreadPool.Names.GENERIC, "");
+
+ latch.await();
+
+ indexDocs(4, 4);
+ indexShard.refresh("test");
+ remoteStoreRefreshListener.afterRefresh(true);
+
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+
+ // This is to check if reading data from remote segment store works as well.
+ remoteSegmentStoreDirectory.init();
+
+ verifyUploadedSegments(remoteSegmentStoreDirectory);
+ }
+
+ private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentStoreDirectory) throws IOException {
+ Map uploadedSegments = remoteSegmentStoreDirectory
+ .getSegmentsUploadedToRemoteStore();
+ try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) {
+ SegmentInfos segmentInfos = segmentInfosGatedCloseable.get();
+ for (String file : segmentInfos.files(true)) {
+ if (!RemoteStoreRefreshListener.EXCLUDE_FILES.contains(file)) {
+ assertTrue(uploadedSegments.containsKey(file));
+ }
+ }
+ }
}
}
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java
index 273d3c7e37c56..cd35349e33b59 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java
@@ -44,6 +44,7 @@ public void testReadByte() throws IOException {
when(inputStream.read()).thenReturn(10);
assertEquals(10, remoteIndexInput.readByte());
+ assertEquals(1, remoteIndexInput.getFilePointer());
verify(inputStream).read(any());
}
@@ -52,13 +53,19 @@ public void testReadByteIOException() throws IOException {
when(inputStream.read(any())).thenThrow(new IOException("Error reading"));
assertThrows(IOException.class, () -> remoteIndexInput.readByte());
+ assertEquals(0, remoteIndexInput.getFilePointer());
}
public void testReadBytes() throws IOException {
- byte[] buffer = new byte[10];
- remoteIndexInput.readBytes(buffer, 10, 20);
+ byte[] buffer = new byte[20];
+ when(inputStream.read(eq(buffer), anyInt(), anyInt())).thenReturn(10).thenReturn(3).thenReturn(6).thenReturn(-1);
+ remoteIndexInput.readBytes(buffer, 0, 20);
- verify(inputStream).read(buffer, 10, 20);
+ verify(inputStream).read(buffer, 0, 20);
+ verify(inputStream).read(buffer, 10, 10);
+ verify(inputStream).read(buffer, 13, 7);
+ verify(inputStream).read(buffer, 19, 1);
+ assertEquals(19, remoteIndexInput.getFilePointer());
}
public void testReadBytesMultipleIterations() throws IOException {
@@ -95,20 +102,14 @@ public void testLength() {
assertEquals(FILESIZE, remoteIndexInput.length());
}
- public void testSeek() throws IOException {
- remoteIndexInput.seek(10);
-
- verify(inputStream).skip(10);
- }
-
- public void testSeekIOException() throws IOException {
- when(inputStream.skip(10)).thenThrow(new IOException("Error reading"));
-
- assertThrows(IOException.class, () -> remoteIndexInput.seek(10));
+ public void testSeek() {
+ assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.seek(100L));
}
- public void testGetFilePointer() {
- assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.getFilePointer());
+ public void testGetFilePointer() throws IOException {
+ when(inputStream.read(any(), eq(0), eq(8))).thenReturn(8);
+ remoteIndexInput.readBytes(new byte[8], 0, 8);
+ assertEquals(8, remoteIndexInput.getFilePointer());
}
public void testSlice() {
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java
similarity index 70%
rename from server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java
rename to server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java
index e8357d2c184bf..0105d0dc309c2 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java
@@ -11,6 +11,7 @@
import org.apache.lucene.store.Directory;
import org.junit.Before;
import org.mockito.ArgumentCaptor;
+import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.common.blobstore.BlobContainer;
import org.opensearch.common.blobstore.BlobPath;
import org.opensearch.common.blobstore.BlobStore;
@@ -27,29 +28,31 @@
import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
+import java.util.List;
import java.util.function.Supplier;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.times;
-public class RemoteDirectoryFactoryTests extends OpenSearchTestCase {
+public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase {
private Supplier repositoriesServiceSupplier;
private RepositoriesService repositoriesService;
- private RemoteDirectoryFactory remoteDirectoryFactory;
+ private RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory;
@Before
public void setup() {
repositoriesServiceSupplier = mock(Supplier.class);
repositoriesService = mock(RepositoriesService.class);
when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService);
- remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceSupplier);
+ remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier);
}
public void testNewDirectory() throws IOException {
- Settings settings = Settings.builder().build();
+ Settings settings = Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, "uuid_1").build();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings);
Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0");
ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0));
@@ -57,20 +60,21 @@ public void testNewDirectory() throws IOException {
BlobStore blobStore = mock(BlobStore.class);
BlobContainer blobContainer = mock(BlobContainer.class);
when(repository.blobStore()).thenReturn(blobStore);
+ when(repository.basePath()).thenReturn(new BlobPath().add("base_path"));
when(blobStore.blobContainer(any())).thenReturn(blobContainer);
when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap());
when(repositoriesService.repository("remote_store_repository")).thenReturn(repository);
- try (Directory directory = remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) {
- assertTrue(directory instanceof RemoteDirectory);
+ try (Directory directory = remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) {
+ assertTrue(directory instanceof RemoteSegmentStoreDirectory);
ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class);
- verify(blobStore).blobContainer(blobPathCaptor.capture());
- BlobPath blobPath = blobPathCaptor.getValue();
- assertEquals("foo/0/", blobPath.buildAsString());
+ verify(blobStore, times(2)).blobContainer(blobPathCaptor.capture());
+ List blobPaths = blobPathCaptor.getAllValues();
+ assertEquals("base_path/uuid_1/0/segments/data/", blobPaths.get(0).buildAsString());
+ assertEquals("base_path/uuid_1/0/segments/metadata/", blobPaths.get(1).buildAsString());
- directory.listAll();
- verify(blobContainer).listBlobs();
+ verify(blobContainer).listBlobsByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX);
verify(repositoriesService).repository("remote_store_repository");
}
}
@@ -85,7 +89,7 @@ public void testNewDirectoryRepositoryDoesNotExist() {
assertThrows(
IllegalArgumentException.class,
- () -> remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)
+ () -> remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)
);
}
diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
index 4eabfa74625f2..96f14616fb54b 100644
--- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
+++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java
@@ -15,6 +15,7 @@
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.tests.util.LuceneTestCase;
import org.junit.Before;
+import org.opensearch.common.UUIDs;
import org.opensearch.common.collect.Set;
import org.opensearch.test.OpenSearchTestCase;
@@ -129,26 +130,52 @@ public void testInitNoMetadataFile() throws IOException {
private Map getDummyMetadata(String prefix, int commitGeneration) {
Map metadata = new HashMap<>();
- metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__qrt::" + randomIntBetween(1000, 5000));
- metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__zxd::" + randomIntBetween(1000, 5000));
- metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__yui::" + randomIntBetween(1000, 5000));
+
+ metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000));
+ metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000));
+ metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000));
metadata.put(
"segments_" + commitGeneration,
- "segments_" + commitGeneration + "::segments_" + commitGeneration + "__exv::" + randomIntBetween(1000, 5000)
+ "segments_"
+ + commitGeneration
+ + "::segments_"
+ + commitGeneration
+ + "__"
+ + UUIDs.base64UUID()
+ + "::"
+ + randomIntBetween(1000, 5000)
);
return metadata;
}
- private void populateMetadata() throws IOException {
+ private Map> populateMetadata() throws IOException {
List metadataFiles = List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv");
when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn(
metadataFiles
);
- IndexInput indexInput = mock(IndexInput.class);
- Map dummyMetadata = getDummyMetadata("_0", 1);
- when(indexInput.readMapOfStrings()).thenReturn(dummyMetadata);
- when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput);
+ Map> metadataFilenameContentMapping = Map.of(
+ "metadata__1__5__abc",
+ getDummyMetadata("_0", 1),
+ "metadata__1__6__pqr",
+ getDummyMetadata("_0", 1),
+ "metadata__2__1__zxv",
+ getDummyMetadata("_0", 1)
+ );
+
+ IndexInput indexInput1 = mock(IndexInput.class);
+ when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc"));
+ when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1);
+
+ IndexInput indexInput2 = mock(IndexInput.class);
+ when(indexInput2.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__6__pqr"));
+ when(remoteMetadataDirectory.openInput("metadata__1__6__pqr", IOContext.DEFAULT)).thenReturn(indexInput2);
+
+ IndexInput indexInput3 = mock(IndexInput.class);
+ when(indexInput3.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__2__1__zxv"));
+ when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput3);
+
+ return metadataFilenameContentMapping;
}
public void testInit() throws IOException {
@@ -291,20 +318,39 @@ public void testCopyFromException() throws IOException {
}
public void testContainsFile() throws IOException {
- populateMetadata();
+ List metadataFiles = List.of("metadata__1__5__abc");
+ when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn(
+ metadataFiles
+ );
+
+ Map metadata = new HashMap<>();
+ metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234");
+ metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345");
+
+ Map> metadataFilenameContentMapping = Map.of("metadata__1__5__abc", metadata);
+
+ IndexInput indexInput1 = mock(IndexInput.class);
+ when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc"));
+ when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1);
+
remoteSegmentStoreDirectory.init();
- // This is not the correct way to add files but the other way is to open up access to fields in UploadedSegmentMetadata
Map uploadedSegmentMetadataMap = remoteSegmentStoreDirectory
.getSegmentsUploadedToRemoteStore();
- uploadedSegmentMetadataMap.put(
- "_100.si",
- new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234")
+
+ assertThrows(
+ UnsupportedOperationException.class,
+ () -> uploadedSegmentMetadataMap.put(
+ "_100.si",
+ new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234")
+ )
);
- assertTrue(remoteSegmentStoreDirectory.containsFile("_100.si", "1234"));
- assertFalse(remoteSegmentStoreDirectory.containsFile("_100.si", "2345"));
- assertFalse(remoteSegmentStoreDirectory.containsFile("_200.si", "1234"));
+ assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234"));
+ assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345"));
+ assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234000"));
+ assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345000"));
+ assertFalse(remoteSegmentStoreDirectory.containsFile("_0.si", "23"));
}
public void testUploadMetadataEmpty() throws IOException {
@@ -336,4 +382,84 @@ public void testUploadMetadataNonEmpty() throws IOException {
String metadataString = remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().get("_0.si").toString();
verify(indexOutput).writeMapOfStrings(Map.of("_0.si", metadataString));
}
+
+ public void testDeleteStaleCommitsException() throws IOException {
+ when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow(
+ new IOException("Error reading")
+ );
+
+ assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteStaleSegments(5));
+ }
+
+ public void testDeleteStaleCommitsWithinThreshold() throws IOException {
+ populateMetadata();
+
+ // popluateMetadata() adds stub to return 3 metadata files
+ // We are passing lastNMetadataFilesToKeep=5 here so that none of the metadata files will be deleted
+ remoteSegmentStoreDirectory.deleteStaleSegments(5);
+
+ verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT));
+ }
+
+ public void testDeleteStaleCommitsActualDelete() throws IOException {
+ Map> metadataFilenameContentMapping = populateMetadata();
+ remoteSegmentStoreDirectory.init();
+
+ // popluateMetadata() adds stub to return 3 metadata files
+ // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted
+ remoteSegmentStoreDirectory.deleteStaleSegments(2);
+
+ for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) {
+ String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1];
+ verify(remoteDataDirectory).deleteFile(uploadedFilename);
+ }
+ ;
+ verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc");
+ }
+
+ public void testDeleteStaleCommitsActualDeleteIOException() throws IOException {
+ Map> metadataFilenameContentMapping = populateMetadata();
+ remoteSegmentStoreDirectory.init();
+
+ String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc")
+ .values()
+ .stream()
+ .findAny()
+ .get()
+ .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1];
+ doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(segmentFileWithException);
+ // popluateMetadata() adds stub to return 3 metadata files
+ // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted
+ remoteSegmentStoreDirectory.deleteStaleSegments(2);
+
+ for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) {
+ String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1];
+ verify(remoteDataDirectory).deleteFile(uploadedFilename);
+ }
+ ;
+ verify(remoteMetadataDirectory, times(0)).deleteFile("metadata__1__5__abc");
+ }
+
+ public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOException {
+ Map> metadataFilenameContentMapping = populateMetadata();
+ remoteSegmentStoreDirectory.init();
+
+ String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc")
+ .values()
+ .stream()
+ .findAny()
+ .get()
+ .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1];
+ doThrow(new NoSuchFileException(segmentFileWithException)).when(remoteDataDirectory).deleteFile(segmentFileWithException);
+ // popluateMetadata() adds stub to return 3 metadata files
+ // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted
+ remoteSegmentStoreDirectory.deleteStaleSegments(2);
+
+ for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) {
+ String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1];
+ verify(remoteDataDirectory).deleteFile(uploadedFilename);
+ }
+ ;
+ verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc");
+ }
}
diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java
index 4b8eec70f2c1a..4d3b841e203de 100644
--- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java
+++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java
@@ -172,7 +172,7 @@
import org.opensearch.index.seqno.GlobalCheckpointSyncAction;
import org.opensearch.index.seqno.RetentionLeaseSyncer;
import org.opensearch.index.shard.PrimaryReplicaSyncer;
-import org.opensearch.index.store.RemoteDirectoryFactory;
+import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory;
import org.opensearch.indices.IndicesModule;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.ShardLimitValidator;
@@ -1826,7 +1826,7 @@ public void onFailure(final Exception e) {
emptyMap(),
null,
emptyMap(),
- new RemoteDirectoryFactory(() -> repositoriesService)
+ new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService)
);
final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings);
snapshotShardsService = new SnapshotShardsService(
diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java
index f446538acccbb..08004b7e42fea 100644
--- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java
@@ -59,6 +59,10 @@
import org.opensearch.common.CheckedFunction;
import org.opensearch.common.Nullable;
import org.opensearch.common.UUIDs;
+import org.opensearch.common.blobstore.BlobContainer;
+import org.opensearch.common.blobstore.BlobPath;
+import org.opensearch.common.blobstore.fs.FsBlobContainer;
+import org.opensearch.common.blobstore.fs.FsBlobStore;
import org.opensearch.common.bytes.BytesArray;
import org.opensearch.common.concurrent.GatedCloseable;
import org.opensearch.common.lucene.uid.Versions;
@@ -88,6 +92,8 @@
import org.opensearch.index.seqno.SequenceNumbers;
import org.opensearch.index.similarity.SimilarityService;
import org.opensearch.index.snapshots.IndexShardSnapshotStatus;
+import org.opensearch.index.store.RemoteDirectory;
+import org.opensearch.index.store.RemoteSegmentStoreDirectory;
import org.opensearch.index.store.Store;
import org.opensearch.index.store.StoreFileMetadata;
import org.opensearch.index.translog.InternalTranslogFactory;
@@ -123,6 +129,7 @@
import org.opensearch.threadpool.ThreadPool;
import java.io.IOException;
+import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
@@ -532,7 +539,10 @@ protected IndexShard newShard(
ShardId shardId = shardPath.getShardId();
NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId);
- storeProvider = is -> createStore(is, remoteShardPath);
+ RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex());
+ RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex());
+ RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory);
+ storeProvider = is -> createStore(shardId, is, remoteSegmentStoreDirectory);
remoteStore = storeProvider.apply(indexSettings);
}
indexShard = new IndexShard(
@@ -570,6 +580,13 @@ protected IndexShard newShard(
return indexShard;
}
+ private RemoteDirectory newRemoteDirectory(Path f) throws IOException {
+ FsBlobStore fsBlobStore = new FsBlobStore(1024, f, false);
+ BlobPath blobPath = new BlobPath();
+ BlobContainer fsBlobContainer = new FsBlobContainer(fsBlobStore, blobPath, f);
+ return new RemoteDirectory(fsBlobContainer);
+ }
+
/**
* Takes an existing shard, closes it and starts a new initialing shard at the same location
*
From 7fe5830798b43f919ba1beed8669b711b149e60d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?=
Date: Mon, 29 Aug 2022 21:17:21 +0200
Subject: [PATCH 017/187] ZIP publication groupId value is configurable (#4156)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When publishing Zip POM the groupId value was hard-coded to `org.opensearch.plugin` value which worked fine for existing core plugins but is not convenient for other plugins (such as community plugins maintained in independent repositories).
This PR changes the sources of the ZIP publishing groupId value.
Specifically, there are two ways to set the value:
1) It is automatically inherited from the Gradle "project.group"
2) It can be manually specified in the ZIP publication POM object
This PR also brings a major rework of tests in PublishTests class. Individual testing scenarios are driven by "real" gradle building scripts (utilizing `java-gradle-plugin` gradle plugin).
Closes #3692
Signed-off-by: Lukáš Vlček
Signed-off-by: Lukáš Vlček
---
CHANGELOG.md | 1 +
.../opensearch/gradle/pluginzip/Publish.java | 44 +--
.../gradle/pluginzip/PublishTests.java | 339 +++++++++++-------
.../pluginzip/customizedGroupValue.gradle | 45 +++
.../customizedInvalidGroupValue.gradle | 45 +++
.../pluginzip/groupAndVersionValue.gradle | 44 +++
.../pluginzip/missingGroupValue.gradle | 22 ++
.../pluginzip/missingPOMEntity.gradle | 22 ++
8 files changed, 406 insertions(+), 156 deletions(-)
create mode 100644 buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle
create mode 100644 buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle
create mode 100644 buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle
create mode 100644 buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle
create mode 100644 buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f11f407434e6b..52fa12d523659 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
### Changed
- Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308))
- Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240))
+- Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156))
### Deprecated
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java
index d83384ec7d172..70c3737ba3674 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java
@@ -9,7 +9,8 @@
import org.gradle.api.Plugin;
import org.gradle.api.Project;
-import org.gradle.api.publish.Publication;
+import org.gradle.api.logging.Logger;
+import org.gradle.api.logging.Logging;
import org.gradle.api.publish.PublishingExtension;
import org.gradle.api.publish.maven.MavenPublication;
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin;
@@ -18,6 +19,9 @@
import org.gradle.api.Task;
public class Publish implements Plugin {
+
+ private static final Logger LOGGER = Logging.getLogger(Publish.class);
+
public final static String EXTENSION_NAME = "zipmavensettings";
public final static String PUBLICATION_NAME = "pluginZip";
public final static String STAGING_REPO = "zipStaging";
@@ -37,27 +41,25 @@ public static void configMaven(Project project) {
});
});
publishing.publications(publications -> {
- final Publication publication = publications.findByName(PUBLICATION_NAME);
- if (publication == null) {
- publications.create(PUBLICATION_NAME, MavenPublication.class, mavenZip -> {
- String zipGroup = "org.opensearch.plugin";
- String zipArtifact = project.getName();
- String zipVersion = getProperty("version", project);
- mavenZip.artifact(project.getTasks().named("bundlePlugin"));
- mavenZip.setGroupId(zipGroup);
- mavenZip.setArtifactId(zipArtifact);
- mavenZip.setVersion(zipVersion);
- });
- } else {
- final MavenPublication mavenZip = (MavenPublication) publication;
- String zipGroup = "org.opensearch.plugin";
- String zipArtifact = project.getName();
- String zipVersion = getProperty("version", project);
- mavenZip.artifact(project.getTasks().named("bundlePlugin"));
- mavenZip.setGroupId(zipGroup);
- mavenZip.setArtifactId(zipArtifact);
- mavenZip.setVersion(zipVersion);
+ MavenPublication mavenZip = (MavenPublication) publications.findByName(PUBLICATION_NAME);
+
+ if (mavenZip == null) {
+ mavenZip = publications.create(PUBLICATION_NAME, MavenPublication.class);
}
+
+ String groupId = mavenZip.getGroupId();
+ if (groupId == null) {
+ // The groupId is not customized thus we get the value from "project.group".
+ // See https://docs.gradle.org/current/userguide/publishing_maven.html#sec:identity_values_in_the_generated_pom
+ groupId = getProperty("group", project);
+ }
+
+ String artifactId = project.getName();
+ String pluginVersion = getProperty("version", project);
+ mavenZip.artifact(project.getTasks().named("bundlePlugin"));
+ mavenZip.setGroupId(groupId);
+ mavenZip.setArtifactId(artifactId);
+ mavenZip.setVersion(pluginVersion);
});
});
}
diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java
index 8c1314c4b4394..06632e2dfa476 100644
--- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java
+++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java
@@ -10,19 +10,21 @@
import org.gradle.testkit.runner.BuildResult;
import org.gradle.testkit.runner.GradleRunner;
-import org.gradle.testfixtures.ProjectBuilder;
-import org.gradle.api.Project;
+import org.gradle.testkit.runner.UnexpectedBuildFailure;
import org.opensearch.gradle.test.GradleUnitTestCase;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
-import java.io.IOException;
-import org.gradle.api.publish.maven.tasks.PublishToMavenRepository;
import java.io.File;
+import java.io.FileReader;
import java.io.FileWriter;
+import java.io.IOException;
import java.io.Writer;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import static org.gradle.testkit.runner.TaskOutcome.SUCCESS;
@@ -30,14 +32,16 @@
import org.apache.maven.model.Model;
import org.apache.maven.model.io.xpp3.MavenXpp3Reader;
import org.codehaus.plexus.util.xml.pull.XmlPullParserException;
-import java.io.FileReader;
-import org.gradle.api.tasks.bundling.Zip;
+import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.List;
-import java.util.ArrayList;
public class PublishTests extends GradleUnitTestCase {
private TemporaryFolder projectDir;
+ private static final String TEMPLATE_RESOURCE_FOLDER = "pluginzip";
+ private final String PROJECT_NAME = "sample-plugin";
+ private final String ZIP_PUBLISH_TASK = "publishPluginZipPublicationToZipStagingRepository";
@Before
public void setUp() throws IOException {
@@ -51,155 +55,200 @@ public void tearDown() {
}
@Test
- public void testZipPublish() throws IOException, XmlPullParserException {
- String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository";
- prepareProjectForPublishTask(zipPublishTask);
-
- // Generate the build.gradle file
- String buildFileContent = "apply plugin: 'maven-publish' \n"
- + "apply plugin: 'java' \n"
- + "publishing {\n"
- + " repositories {\n"
- + " maven {\n"
- + " url = 'local-staging-repo/'\n"
- + " name = 'zipStaging'\n"
- + " }\n"
- + " }\n"
- + " publications {\n"
- + " pluginZip(MavenPublication) {\n"
- + " groupId = 'org.opensearch.plugin' \n"
- + " artifactId = 'sample-plugin' \n"
- + " version = '2.0.0.0' \n"
- + " artifact('sample-plugin.zip') \n"
- + " }\n"
- + " }\n"
- + "}";
- writeString(projectDir.newFile("build.gradle"), buildFileContent);
- // Execute the task publishPluginZipPublicationToZipStagingRepository
- List allArguments = new ArrayList();
- allArguments.add("build");
- allArguments.add(zipPublishTask);
- GradleRunner runner = GradleRunner.create();
- runner.forwardOutput();
- runner.withPluginClasspath();
- runner.withArguments(allArguments);
- runner.withProjectDir(projectDir.getRoot());
+ public void missingGroupValue() throws IOException, URISyntaxException, XmlPullParserException {
+ GradleRunner runner = prepareGradleRunnerFromTemplate("missingGroupValue.gradle");
+ Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build);
+ assertTrue(e.getMessage().contains("Invalid publication 'pluginZip': groupId cannot be empty."));
+ }
+
+ /**
+ * This would be the most common use case where user declares Maven publication entity with basic info
+ * and the resulting POM file will use groupId and version values from the Gradle project object.
+ */
+ @Test
+ public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPullParserException {
+ GradleRunner runner = prepareGradleRunnerFromTemplate("groupAndVersionValue.gradle");
BuildResult result = runner.build();
- // Check if task publishMavenzipPublicationToZipstagingRepository has ran well
- assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome());
- // check if the zip has been published to local staging repo
+
+ /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+ assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
+ assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
+
+ // check if both the zip and pom files have been published to local staging repo
assertTrue(
- new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip")
- .exists()
+ new File(
+ projectDir.getRoot(),
+ String.join(
+ File.separator,
+ "build",
+ "local-staging-repo",
+ "org",
+ "custom",
+ "group",
+ PROJECT_NAME,
+ "2.0.0.0",
+ PROJECT_NAME + "-2.0.0.0.pom"
+ )
+ ).exists()
);
- assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
- // Parse the maven file and validate the groupID to org.opensearch.plugin
+ assertTrue(
+ new File(
+ projectDir.getRoot(),
+ String.join(
+ File.separator,
+ "build",
+ "local-staging-repo",
+ "org",
+ "custom",
+ "group",
+ PROJECT_NAME,
+ "2.0.0.0",
+ PROJECT_NAME + "-2.0.0.0.zip"
+ )
+ ).exists()
+ );
+
+ // Parse the maven file and validate the groupID
MavenXpp3Reader reader = new MavenXpp3Reader();
Model model = reader.read(
new FileReader(
- new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom")
+ new File(
+ projectDir.getRoot(),
+ String.join(
+ File.separator,
+ "build",
+ "local-staging-repo",
+ "org",
+ "custom",
+ "group",
+ PROJECT_NAME,
+ "2.0.0.0",
+ PROJECT_NAME + "-2.0.0.0.pom"
+ )
+ )
)
);
- assertEquals(model.getGroupId(), "org.opensearch.plugin");
+ assertEquals(model.getVersion(), "2.0.0.0");
+ assertEquals(model.getGroupId(), "org.custom.group");
+ assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin");
}
+ /**
+ * In this case the Publication entity is completely missing but still the POM file is generated using the default
+ * values including the groupId and version values obtained from the Gradle project object.
+ */
@Test
- public void testZipPublishWithPom() throws IOException, XmlPullParserException {
- String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository";
- Project project = prepareProjectForPublishTask(zipPublishTask);
-
- // Generate the build.gradle file
- String buildFileContent = "apply plugin: 'maven-publish' \n"
- + "apply plugin: 'java' \n"
- + "publishing {\n"
- + " repositories {\n"
- + " maven {\n"
- + " url = 'local-staging-repo/'\n"
- + " name = 'zipStaging'\n"
- + " }\n"
- + " }\n"
- + " publications {\n"
- + " pluginZip(MavenPublication) {\n"
- + " groupId = 'org.opensearch.plugin' \n"
- + " artifactId = 'sample-plugin' \n"
- + " version = '2.0.0.0' \n"
- + " artifact('sample-plugin.zip') \n"
- + " pom {\n"
- + " name = 'sample-plugin'\n"
- + " description = 'sample-description'\n"
- + " licenses {\n"
- + " license {\n"
- + " name = \"The Apache License, Version 2.0\"\n"
- + " url = \"http://www.apache.org/licenses/LICENSE-2.0.txt\"\n"
- + " }\n"
- + " }\n"
- + " developers {\n"
- + " developer {\n"
- + " name = 'opensearch'\n"
- + " url = 'https://github.com/opensearch-project/OpenSearch'\n"
- + " }\n"
- + " }\n"
- + " url = 'https://github.com/opensearch-project/OpenSearch'\n"
- + " scm {\n"
- + " url = 'https://github.com/opensearch-project/OpenSearch'\n"
- + " }\n"
- + " }"
- + " }\n"
- + " }\n"
- + "}";
- writeString(projectDir.newFile("build.gradle"), buildFileContent);
- // Execute the task publishPluginZipPublicationToZipStagingRepository
- List allArguments = new ArrayList();
- allArguments.add("build");
- allArguments.add(zipPublishTask);
- GradleRunner runner = GradleRunner.create();
- runner.forwardOutput();
- runner.withPluginClasspath();
- runner.withArguments(allArguments);
- runner.withProjectDir(projectDir.getRoot());
+ public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullParserException {
+ GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle");
BuildResult result = runner.build();
- // Check if task publishMavenzipPublicationToZipstagingRepository has ran well
- assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome());
- // check if the zip has been published to local staging repo
- assertTrue(
- new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip")
- .exists()
+
+ /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+ assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
+ assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
+
+ // Parse the maven file and validate it
+ MavenXpp3Reader reader = new MavenXpp3Reader();
+ Model model = reader.read(
+ new FileReader(
+ new File(
+ projectDir.getRoot(),
+ String.join(
+ File.separator,
+ "build",
+ "local-staging-repo",
+ "org",
+ "custom",
+ "group",
+ PROJECT_NAME,
+ "2.0.0.0",
+ PROJECT_NAME + "-2.0.0.0.pom"
+ )
+ )
+ )
);
+
+ assertEquals(model.getArtifactId(), PROJECT_NAME);
+ assertEquals(model.getGroupId(), "org.custom.group");
+ assertEquals(model.getVersion(), "2.0.0.0");
+ assertEquals(model.getPackaging(), "zip");
+
+ assertNull(model.getName());
+ assertNull(model.getDescription());
+
+ assertEquals(0, model.getDevelopers().size());
+ assertEquals(0, model.getContributors().size());
+ assertEquals(0, model.getLicenses().size());
+ }
+
+ /**
+ * In some cases we need the POM groupId value to be different from the Gradle "project.group" value hence we
+ * allow for groupId customization (it will override whatever the Gradle "project.group" value is).
+ */
+ @Test
+ public void customizedGroupValue() throws IOException, URISyntaxException, XmlPullParserException {
+ GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle");
+ BuildResult result = runner.build();
+
+ /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
- // Parse the maven file and validate the groupID to org.opensearch.plugin
+ assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
+
+ // Parse the maven file and validate the groupID
MavenXpp3Reader reader = new MavenXpp3Reader();
Model model = reader.read(
new FileReader(
- new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom")
+ new File(
+ projectDir.getRoot(),
+ String.join(
+ File.separator,
+ "build",
+ "local-staging-repo",
+ "I",
+ "am",
+ "customized",
+ PROJECT_NAME,
+ "2.0.0.0",
+ PROJECT_NAME + "-2.0.0.0.pom"
+ )
+ )
)
);
- assertEquals(model.getGroupId(), "org.opensearch.plugin");
- assertEquals(model.getUrl(), "https://github.com/opensearch-project/OpenSearch");
+
+ assertEquals(model.getGroupId(), "I.am.customized");
}
- protected Project prepareProjectForPublishTask(String zipPublishTask) throws IOException {
- Project project = ProjectBuilder.builder().build();
-
- // Apply the opensearch.pluginzip plugin
- project.getPluginManager().apply("opensearch.pluginzip");
- // Check if the plugin has been applied to the project
- assertTrue(project.getPluginManager().hasPlugin("opensearch.pluginzip"));
- // Check if the project has the task from class PublishToMavenRepository after plugin apply
- assertNotNull(project.getTasks().withType(PublishToMavenRepository.class));
- // Create a mock bundlePlugin task
- Zip task = project.getTasks().create("bundlePlugin", Zip.class);
- Publish.configMaven(project);
- // Check if the main task publishPluginZipPublicationToZipStagingRepository exists after plugin apply
- assertTrue(project.getTasks().getNames().contains(zipPublishTask));
- assertNotNull("Task to generate: ", project.getTasks().getByName(zipPublishTask));
- // Run Gradle functional tests, but calling a build.gradle file, that resembles the plugin publish behavior
-
- // Create a sample plugin zip file
- File sampleZip = new File(projectDir.getRoot(), "sample-plugin.zip");
- Files.createFile(sampleZip.toPath());
- writeString(projectDir.newFile("settings.gradle"), "");
-
- return project;
+ /**
+ * If the customized groupId value is invalid (from the Maven POM perspective) then we need to be sure it is
+ * caught and reported properly.
+ */
+ @Test
+ public void customizedInvalidGroupValue() throws IOException, URISyntaxException {
+ GradleRunner runner = prepareGradleRunnerFromTemplate("customizedInvalidGroupValue.gradle");
+ Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build);
+ assertTrue(
+ e.getMessage().contains("Invalid publication 'pluginZip': groupId ( ) is not a valid Maven identifier ([A-Za-z0-9_\\-.]+).")
+ );
+ }
+
+ private GradleRunner prepareGradleRunnerFromTemplate(String templateName) throws IOException, URISyntaxException {
+ useTemplateFile(projectDir.newFile("build.gradle"), templateName);
+ prepareGradleFilesAndSources();
+
+ GradleRunner runner = GradleRunner.create()
+ .forwardOutput()
+ .withPluginClasspath()
+ .withArguments("build", ZIP_PUBLISH_TASK)
+ .withProjectDir(projectDir.getRoot());
+
+ return runner;
+ }
+
+ private void prepareGradleFilesAndSources() throws IOException {
+ // A dummy "source" file that is processed with bundlePlugin and put into a ZIP artifact file
+ File bundleFile = new File(projectDir.getRoot(), PROJECT_NAME + "-source.txt");
+ Path zipFile = Files.createFile(bundleFile.toPath());
+ // Setting a project name via settings.gradle file
+ writeString(projectDir.newFile("settings.gradle"), "rootProject.name = '" + PROJECT_NAME + "'");
}
private void writeString(File file, String string) throws IOException {
@@ -208,4 +257,24 @@ private void writeString(File file, String string) throws IOException {
}
}
+ /**
+ * Write the content of the "template" file into the target file.
+ * The template file must be located in the {@value TEMPLATE_RESOURCE_FOLDER} folder.
+ * @param targetFile A target file
+ * @param templateFile A name of the template file located under {@value TEMPLATE_RESOURCE_FOLDER} folder
+ */
+ private void useTemplateFile(File targetFile, String templateFile) throws IOException, URISyntaxException {
+
+ URL resource = getClass().getClassLoader().getResource(String.join(File.separator, TEMPLATE_RESOURCE_FOLDER, templateFile));
+ Path resPath = Paths.get(resource.toURI()).toAbsolutePath();
+ List lines = Files.readAllLines(resPath, StandardCharsets.UTF_8);
+
+ try (Writer writer = new FileWriter(targetFile)) {
+ for (String line : lines) {
+ writer.write(line);
+ writer.write(System.lineSeparator());
+ }
+ }
+ }
+
}
diff --git a/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle
new file mode 100644
index 0000000000000..1bde3edda2d91
--- /dev/null
+++ b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle
@@ -0,0 +1,45 @@
+plugins {
+ id 'java-gradle-plugin'
+ id 'nebula.maven-base-publish'
+ id 'opensearch.pluginzip'
+}
+
+group="org.custom.group"
+version='2.0.0.0'
+
+// A bundlePlugin task mockup
+tasks.register('bundlePlugin', Zip.class) {
+ archiveFileName = "sample-plugin-${version}.zip"
+ destinationDirectory = layout.buildDirectory.dir('distributions')
+ from layout.projectDirectory.file('sample-plugin-source.txt')
+}
+
+publishing {
+ publications {
+ pluginZip(MavenPublication) {
+ groupId = "I.am.customized"
+ pom {
+ name = "sample-plugin"
+ description = "pluginDescription"
+ licenses {
+ license {
+ name = "The Apache License, Version 2.0"
+ url = "http://www.apache.org/licenses/LICENSE-2.0.txt"
+ }
+ }
+ developers {
+ developer {
+ name = "John Doe"
+ url = "https://github.com/john-doe/"
+ organization = "Doe.inc"
+ organizationUrl = "https://doe.inc/"
+ }
+ }
+ url = "https://github.com/doe/sample-plugin"
+ scm {
+ url = "https://github.com/doe/sample-plugin"
+ }
+ }
+ }
+ }
+}
diff --git a/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle
new file mode 100644
index 0000000000000..b6deeeb12ca6a
--- /dev/null
+++ b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle
@@ -0,0 +1,45 @@
+plugins {
+ id 'java-gradle-plugin'
+ id 'nebula.maven-base-publish'
+ id 'opensearch.pluginzip'
+}
+
+group="org.custom.group"
+version='2.0.0.0'
+
+// A bundlePlugin task mockup
+tasks.register('bundlePlugin', Zip.class) {
+ archiveFileName = "sample-plugin-${version}.zip"
+ destinationDirectory = layout.buildDirectory.dir('distributions')
+ from layout.projectDirectory.file('sample-plugin-source.txt')
+}
+
+publishing {
+ publications {
+ pluginZip(MavenPublication) {
+ groupId = " " // <-- User provides invalid value
+ pom {
+ name = "sample-plugin"
+ description = "pluginDescription"
+ licenses {
+ license {
+ name = "The Apache License, Version 2.0"
+ url = "http://www.apache.org/licenses/LICENSE-2.0.txt"
+ }
+ }
+ developers {
+ developer {
+ name = "John Doe"
+ url = "https://github.com/john-doe/"
+ organization = "Doe.inc"
+ organizationUrl = "https://doe.inc/"
+ }
+ }
+ url = "https://github.com/doe/sample-plugin"
+ scm {
+ url = "https://github.com/doe/sample-plugin"
+ }
+ }
+ }
+ }
+}
diff --git a/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle
new file mode 100644
index 0000000000000..bdab385f6082c
--- /dev/null
+++ b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle
@@ -0,0 +1,44 @@
+plugins {
+ id 'java-gradle-plugin'
+ id 'nebula.maven-base-publish'
+ id 'opensearch.pluginzip'
+}
+
+group="org.custom.group"
+version='2.0.0.0'
+
+// A bundlePlugin task mockup
+tasks.register('bundlePlugin', Zip.class) {
+ archiveFileName = "sample-plugin-${version}.zip"
+ destinationDirectory = layout.buildDirectory.dir('distributions')
+ from layout.projectDirectory.file('sample-plugin-source.txt')
+}
+
+publishing {
+ publications {
+ pluginZip(MavenPublication) {
+ pom {
+ name = "sample-plugin"
+ description = "pluginDescription"
+ licenses {
+ license {
+ name = "The Apache License, Version 2.0"
+ url = "http://www.apache.org/licenses/LICENSE-2.0.txt"
+ }
+ }
+ developers {
+ developer {
+ name = "John Doe"
+ url = "https://github.com/john-doe/"
+ organization = "Doe.inc"
+ organizationUrl = "https://doe.inc/"
+ }
+ }
+ url = "https://github.com/doe/sample-plugin"
+ scm {
+ url = "https://github.com/doe/sample-plugin"
+ }
+ }
+ }
+ }
+}
diff --git a/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle
new file mode 100644
index 0000000000000..602c178ea1a5b
--- /dev/null
+++ b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle
@@ -0,0 +1,22 @@
+plugins {
+ id 'java-gradle-plugin'
+ id 'nebula.maven-base-publish'
+ id 'opensearch.pluginzip'
+}
+
+//group="org.custom.group"
+version='2.0.0.0'
+
+// A bundlePlugin task mockup
+tasks.register('bundlePlugin', Zip.class) {
+ archiveFileName = "sample-plugin-${version}.zip"
+ destinationDirectory = layout.buildDirectory.dir('distributions')
+ from layout.projectDirectory.file('sample-plugin-source.txt')
+}
+
+publishing {
+ publications {
+ pluginZip(MavenPublication) {
+ }
+ }
+}
diff --git a/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle
new file mode 100644
index 0000000000000..2cc67c2e98954
--- /dev/null
+++ b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle
@@ -0,0 +1,22 @@
+plugins {
+ id 'java-gradle-plugin'
+ id 'nebula.maven-base-publish'
+ id 'opensearch.pluginzip'
+}
+
+group="org.custom.group"
+version='2.0.0.0'
+
+// A bundlePlugin task mockup
+tasks.register('bundlePlugin', Zip.class) {
+ archiveFileName = "sample-plugin-${version}.zip"
+ destinationDirectory = layout.buildDirectory.dir('distributions')
+ from layout.projectDirectory.file('sample-plugin-source.txt')
+}
+
+publishing {
+ publications {
+ pluginZip(MavenPublication) {
+ }
+ }
+}
From f4e041ec5b178db0bb80db167dc99ac3fdc3eb09 Mon Sep 17 00:00:00 2001
From: Suraj Singh
Date: Mon, 29 Aug 2022 13:43:44 -0700
Subject: [PATCH 018/187] [Segment Replication] Add timeout on Mockito.verify
to reduce flakyness in testReplicationOnDone test (#4314)
* [Segment Replication] testReplicationOnDone Add timeout to allow time for verify call
Signed-off-by: Suraj Singh
* Update changelog
Signed-off-by: Suraj Singh
* Add change log entry
Signed-off-by: Suraj Singh
Signed-off-by: Suraj Singh
---
CHANGELOG.md | 1 +
.../replication/SegmentReplicationTargetServiceTests.java | 3 +--
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 52fa12d523659..b3c5d731af082 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -20,6 +20,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
- `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305))
- Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307))
- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133))
+- Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314))
### Security
diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java
index d3a6d1a97dacc..de739f4ca834a 100644
--- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java
+++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java
@@ -252,9 +252,8 @@ public void testReplicationOnDone() throws IOException {
SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue();
listener.onDone(new SegmentReplicationState(new ReplicationLuceneIndex()));
doNothing().when(spy).onNewCheckpoint(any(), any());
- verify(spy, timeout(0).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any());
+ verify(spy, timeout(100).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any());
closeShard(indexShard, false);
-
}
public void testBeforeIndexShardClosed_CancelsOngoingReplications() {
From 183006f4ce2d4423883a75e450e3c8a4553c5b92 Mon Sep 17 00:00:00 2001
From: Rishab Nahata
Date: Tue, 2 Aug 2022 11:45:51 +0530
Subject: [PATCH 019/187] Add Executor to decommission node attribute
Signed-off-by: Rishab Nahata
---
...NodeAttributeClusterStateTaskExecutor.java | 141 ++++++++++++++
.../decommission/DecommissionAttribute.java | 108 +++++++++++
...ttributeClusterStateTaskExecutorTests.java | 178 ++++++++++++++++++
3 files changed, 427 insertions(+)
create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java
create mode 100644 server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java
create mode 100644 server/src/test/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutorTests.java
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java
new file mode 100644
index 0000000000000..d71cd98d5f25e
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java
@@ -0,0 +1,141 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.coordination;
+
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.ClusterStateTaskExecutor;
+import org.opensearch.cluster.ClusterStateTaskListener;
+import org.opensearch.cluster.decommission.DecommissionAttribute;
+import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.cluster.node.DiscoveryNodes;
+import org.opensearch.cluster.routing.allocation.AllocationService;
+import org.opensearch.persistent.PersistentTasksCustomMetadata;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.function.Predicate;
+
+/**
+ * Decommissions and shuts down nodes having a given attribute and updates the cluster state
+ *
+ * @opensearch.internal
+ */
+public class DecommissionNodeAttributeClusterStateTaskExecutor
+ implements
+ ClusterStateTaskExecutor,
+ ClusterStateTaskListener {
+
+ private final AllocationService allocationService;
+ private final Logger logger;
+
+ /**
+ * Task for the executor.
+ *
+ * @opensearch.internal
+ */
+ public static class Task {
+
+ private final DecommissionAttribute decommissionAttribute;
+ private final String reason;
+
+ public Task(final DecommissionAttribute decommissionAttribute, final String reason) {
+ this.decommissionAttribute = decommissionAttribute;
+ this.reason = reason;
+ }
+
+ public DecommissionAttribute decommissionAttribute() {
+ return decommissionAttribute;
+ }
+
+ public String reason() {
+ return reason;
+ }
+
+ @Override
+ public String toString() {
+ return "Decommission Node Attribute Task{"
+ + "decommissionAttribute="
+ + decommissionAttribute
+ + ", reason='"
+ + reason
+ + '\''
+ + '}';
+ }
+ }
+
+ public DecommissionNodeAttributeClusterStateTaskExecutor(final AllocationService allocationService, final Logger logger) {
+ this.allocationService = allocationService;
+ this.logger = logger;
+ }
+
+ @Override
+ public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
+ final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(currentState.nodes());
+ List nodesToBeRemoved = new ArrayList();
+ for (final Task task : tasks) {
+ final Predicate shouldRemoveNodePredicate = discoveryNode -> nodeHasDecommissionedAttribute(discoveryNode, task);
+ Iterator nodesIter = currentState.nodes().getNodes().valuesIt();
+ while (nodesIter.hasNext()) {
+ final DiscoveryNode node = nodesIter.next();
+ if (shouldRemoveNodePredicate.test(node) && currentState.nodes().nodeExists(node)) {
+ nodesToBeRemoved.add(node);
+ }
+ }
+ }
+ if (nodesToBeRemoved.size() <= 0) {
+ // no nodes to remove, will keep the current cluster state
+ return ClusterTasksResult.builder()
+ .successes(tasks)
+ .build(currentState);
+ }
+ for (DiscoveryNode nodeToBeRemoved : nodesToBeRemoved) {
+ remainingNodesBuilder.remove(nodeToBeRemoved);
+ }
+
+ final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder);
+
+ return getTaskClusterTasksResult(currentState, tasks, remainingNodesClusterState);
+ }
+
+ private boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, Task task) {
+ String discoveryNodeAttributeValue = discoveryNode.getAttributes().get(task.decommissionAttribute().attributeName());
+ return discoveryNodeAttributeValue != null && task.decommissionAttribute().attributeValues().contains(discoveryNodeAttributeValue);
+ }
+
+ // visible for testing
+ // hook is used in testing to ensure that correct cluster state is used to test whether a
+ // rejoin or reroute is needed
+ protected ClusterState remainingNodesClusterState(final ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder) {
+ return ClusterState.builder(currentState).nodes(remainingNodesBuilder).build();
+ }
+
+ protected ClusterTasksResult getTaskClusterTasksResult(
+ ClusterState currentState,
+ List tasks,
+ ClusterState remainingNodesClusterState
+ ) {
+ ClusterState ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState);
+ final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.<
+ DecommissionNodeAttributeClusterStateTaskExecutor.Task>builder().successes(tasks);
+ return resultBuilder.build(allocationService.disassociateDeadNodes(ptasksDisassociatedState, true, describeTasks(tasks)));
+ }
+
+ @Override
+ public void onFailure(final String source, final Exception e) {
+ logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
+ }
+
+ @Override
+ public void onNoLongerClusterManager(String source) {
+ logger.debug("no longer cluster-manager while decommissioning node attribute [{}]", source);
+ }
+}
diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java
new file mode 100644
index 0000000000000..6260af2823687
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java
@@ -0,0 +1,108 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.decommission;
+
+import org.opensearch.common.io.stream.StreamInput;
+import org.opensearch.common.io.stream.StreamOutput;
+import org.opensearch.common.io.stream.Writeable;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+
+public final class DecommissionAttribute implements Writeable {
+ private final String attributeName;
+ private final List attributeValues;
+
+ /**
+ * Update the attribute values for a given attribute name to decommission
+ *
+ * @param decommissionAttribute current decommissioned attribute object
+ * @param attributeValues values to be updated with
+ */
+ public DecommissionAttribute(DecommissionAttribute decommissionAttribute, List attributeValues) {
+ this(decommissionAttribute.attributeName, attributeValues);
+ }
+
+ /**
+ * Constructs new decommission attribute name values pair
+ *
+ * @param attributeName attribute name
+ * @param attributeValues attribute values
+ */
+ public DecommissionAttribute(String attributeName, List attributeValues) {
+ this.attributeName = attributeName;
+ this.attributeValues = attributeValues;
+ }
+
+ /**
+ * Returns attribute name
+ *
+ * @return attributeName
+ */
+ public String attributeName() {
+ return this.attributeName;
+ }
+
+ /**
+ * Returns attribute values
+ *
+ * @return attributeValues
+ */
+ public List attributeValues() {
+ return this.attributeValues;
+ }
+
+ public DecommissionAttribute(StreamInput in) throws IOException {
+ attributeName = in.readString();
+ attributeValues = in.readStringList();
+ }
+
+ /**
+ * Writes decommission attribute name values to stream output
+ *
+ * @param out stream output
+ */
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(attributeName);
+ out.writeStringCollection(attributeValues);
+ }
+
+ /**
+ * Checks if this instance is equal to the other instance in attributeName other than {@link #attributeValues}.
+ *
+ * @param other other decommission attribute name values
+ * @return {@code true} if both instances equal in attributeName fields but the attributeValues fields
+ */
+ public boolean equalsIgnoreValues(DecommissionAttribute other) {
+ return attributeName.equals(other.attributeName);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ DecommissionAttribute that = (DecommissionAttribute) o;
+
+ if (!attributeName.equals(that.attributeName)) return false;
+ return attributeValues.equals(that.attributeValues);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(attributeName, attributeValues);
+ }
+
+ @Override
+ public String toString() {
+ return "DecommissionAttribute{" + attributeName + "}{" + attributeValues().toString() + "}";
+ }
+}
diff --git a/server/src/test/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutorTests.java
new file mode 100644
index 0000000000000..204d31f18e2cf
--- /dev/null
+++ b/server/src/test/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutorTests.java
@@ -0,0 +1,178 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.coordination;
+
+import org.opensearch.Version;
+import org.opensearch.cluster.ClusterName;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.ClusterStateTaskExecutor;
+import org.opensearch.cluster.decommission.DecommissionAttribute;
+import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.cluster.node.DiscoveryNodeRole;
+import org.opensearch.cluster.node.DiscoveryNodes;
+import org.opensearch.cluster.routing.allocation.AllocationService;
+import org.opensearch.test.OpenSearchTestCase;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static java.util.Collections.singletonMap;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class DecommissionNodeAttributeClusterStateTaskExecutorTests extends OpenSearchTestCase {
+
+ public void testRemoveNodesForDecommissionedAttribute() throws Exception {
+ final AllocationService allocationService = mock(AllocationService.class);
+ when(allocationService.disassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class))).thenAnswer(
+ im -> im.getArguments()[0]
+ );
+ final AtomicReference remainingNodesClusterState = new AtomicReference<>();
+ ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build();
+
+ logger.info("--> adding five nodes on same zone_1");
+ clusterState = addNodes(clusterState, "zone_1", "node1", "node2", "node3", "node4", "node5");
+
+ logger.info("--> adding five nodes on same zone_2");
+ clusterState = addNodes(clusterState, "zone_2", "node6", "node7", "node8", "node9", "node10");
+
+ logger.info("--> adding five nodes on same zone_3");
+ clusterState = addNodes(clusterState, "zone_3", "node11", "node12", "node13", "node14", "node15");
+
+ final DecommissionNodeAttributeClusterStateTaskExecutor executor = new DecommissionNodeAttributeClusterStateTaskExecutor(
+ allocationService,
+ logger
+ ) {
+ @Override
+ protected ClusterState remainingNodesClusterState(ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder) {
+ remainingNodesClusterState.set(super.remainingNodesClusterState(currentState, remainingNodesBuilder));
+ return remainingNodesClusterState.get();
+ }
+ };
+
+ final List tasks = new ArrayList<>();
+ tasks.add(
+ new DecommissionNodeAttributeClusterStateTaskExecutor.Task(
+ new DecommissionAttribute("zone", Collections.singletonList("zone_3")),
+ "unit test zone decommission executor"
+ )
+ );
+
+ final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(
+ clusterState,
+ tasks
+ );
+
+ ClusterState expectedClusterState = remainingNodesClusterState.get();
+ ClusterState actualClusterState = result.resultingState;
+
+ // Assert cluster state is updated and is successful
+ verify(allocationService).disassociateDeadNodes(eq(expectedClusterState), eq(true), any(String.class));
+ assertEquals(actualClusterState, expectedClusterState);
+ assertTrue(result.executionResults.get(tasks.get(0)).isSuccess());
+
+ // Verify only 10 nodes present in the cluster after decommissioning
+ assertEquals(actualClusterState.nodes().getNodes().size(), 10);
+
+ // Verify no nodes has attribute (zone, zone_3)
+ Iterator currDiscoveryNodeIterator = actualClusterState.nodes().getNodes().valuesIt();
+ while (currDiscoveryNodeIterator.hasNext()) {
+ final DiscoveryNode node = currDiscoveryNodeIterator.next();
+ assertNotEquals(node.getAttributes().get("zone"), "zone_3");
+ }
+ }
+
+ public void testSameClusterStateAfterExecutionForUnknownAttributeNameAndValue() throws Exception {
+ final AllocationService allocationService = mock(AllocationService.class);
+ when(allocationService.disassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class))).thenAnswer(
+ im -> im.getArguments()[0]
+ );
+ final AtomicReference remainingNodesClusterState = new AtomicReference<>();
+ ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build();
+
+ logger.info("--> adding five nodes on same zone_1");
+ clusterState = addNodes(clusterState, "zone_1", "node1", "node2", "node3", "node4", "node5");
+
+ logger.info("--> adding five nodes on same zone_2");
+ clusterState = addNodes(clusterState, "zone_2", "node6", "node7", "node8", "node9", "node10");
+
+ logger.info("--> adding five nodes on same zone_3");
+ clusterState = addNodes(clusterState, "zone_3", "node11", "node12", "node13", "node14", "node15");
+
+ final DecommissionNodeAttributeClusterStateTaskExecutor executor = new DecommissionNodeAttributeClusterStateTaskExecutor(
+ allocationService,
+ logger
+ ) {
+ @Override
+ protected ClusterState remainingNodesClusterState(ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder) {
+ remainingNodesClusterState.set(super.remainingNodesClusterState(currentState, remainingNodesBuilder));
+ return remainingNodesClusterState.get();
+ }
+ };
+
+ final List tasks = new ArrayList<>();
+ // Task 1 with unknown attribute name
+ tasks.add(
+ new DecommissionNodeAttributeClusterStateTaskExecutor.Task(
+ new DecommissionAttribute("unknown_zone_name", Collections.singletonList("unknown_zone_value")),
+ "unit test zone decommission executor"
+ )
+ );
+ // Task 2 with unknown attribute value
+ tasks.add(
+ new DecommissionNodeAttributeClusterStateTaskExecutor.Task(
+ new DecommissionAttribute("zone", Collections.singletonList("unknown_zone_value")),
+ "unit test zone decommission executor"
+ )
+ );
+
+ final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(
+ clusterState,
+ tasks
+ );
+
+ ClusterState expectedClusterState = remainingNodesClusterState.get();
+ ClusterState actualClusterState = result.resultingState;
+
+ // assert that disassociate dead node tasks is never executed
+ verify(allocationService, never()).disassociateDeadNodes(eq(expectedClusterState), eq(true), any(String.class));
+
+ // assert that cluster state remains same
+ assertEquals(clusterState, actualClusterState);
+
+ // Verify all 15 nodes present in the cluster after decommissioning unknown attribute name
+ assertEquals(actualClusterState.nodes().getNodes().size(), 15);
+ }
+
+ private ClusterState addNodes(ClusterState clusterState, String zone, String... nodeIds) {
+ DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newNode(nodeId, singletonMap("zone", zone))));
+ clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
+ return clusterState;
+ }
+
+ private DiscoveryNode newNode(String nodeId, Map attributes) {
+ return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, CLUSTER_MANAGER_DATA_ROLES, Version.CURRENT);
+ }
+
+ final private static Set CLUSTER_MANAGER_DATA_ROLES = Collections.unmodifiableSet(
+ new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE))
+ );
+}
From 042375bf623f2394482e8eec3964e223aab5e272 Mon Sep 17 00:00:00 2001
From: Rishab Nahata
Date: Mon, 8 Aug 2022 20:05:20 +0530
Subject: [PATCH 020/187] Add DecommissionHelper
Signed-off-by: Rishab Nahata
---
.../decommission/DecommissionHelper.java | 58 +++++++++++++++++++
1 file changed, 58 insertions(+)
create mode 100644 server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java
diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java
new file mode 100644
index 0000000000000..6b9e480abcef7
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.decommission;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.ClusterStateTaskConfig;
+import org.opensearch.cluster.ClusterStateTaskListener;
+import org.opensearch.cluster.coordination.NodeRemovalClusterStateTaskExecutor;
+import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.cluster.node.DiscoveryNodes;
+import org.opensearch.cluster.service.ClusterManagerService;
+import org.opensearch.common.Priority;
+import org.opensearch.common.inject.Inject;
+
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+public class DecommissionHelper {
+
+ private static final Logger logger = LogManager.getLogger(DecommissionHelper.class);
+
+ private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
+ private final ClusterManagerService clusterManagerService;
+
+ DecommissionHelper(
+ ClusterManagerService clusterManagerService,
+ NodeRemovalClusterStateTaskExecutor nodeRemovalClusterStateTaskExecutor
+ ) {
+ this.nodeRemovalExecutor = nodeRemovalClusterStateTaskExecutor;
+ this.clusterManagerService = clusterManagerService;
+ }
+
+ private void handleNodesDecommissionRequest(List nodesToBeDecommissioned, String reason) {
+ final Map nodesDecommissionTasks = new LinkedHashMap<>();
+ nodesToBeDecommissioned.forEach(discoveryNode -> {
+ final NodeRemovalClusterStateTaskExecutor.Task task = new NodeRemovalClusterStateTaskExecutor.Task(
+ discoveryNode, reason
+ );
+ nodesDecommissionTasks.put(task, nodeRemovalExecutor);
+ });
+ final String source = "node-decommissioned";
+ clusterManagerService.submitStateUpdateTasks(
+ source,
+ nodesDecommissionTasks,
+ ClusterStateTaskConfig.build(Priority.IMMEDIATE),
+ nodeRemovalExecutor
+ );
+ }
+}
From 50de6084743882fa1ac9b6be27a313ba5eba9f68 Mon Sep 17 00:00:00 2001
From: Rishab Nahata
Date: Wed, 17 Aug 2022 15:46:09 +0530
Subject: [PATCH 021/187] Decommission service implementation with metadata
Signed-off-by: Rishab Nahata
---
.../org/opensearch/OpenSearchException.java | 7 +
.../org/opensearch/cluster/ClusterModule.java | 9 +
...NodeAttributeClusterStateTaskExecutor.java | 141 ----------
.../decommission/DecommissionAttribute.java | 47 ++--
.../DecommissionFailedException.java | 49 ++++
.../decommission/DecommissionHelper.java | 15 +-
.../decommission/DecommissionService.java | 223 +++++++++++++++
.../decommission/DecommissionStatus.java | 94 +++++++
.../DecommissionAttributeMetadata.java | 254 ++++++++++++++++++
...ttributeClusterStateTaskExecutorTests.java | 178 ------------
10 files changed, 664 insertions(+), 353 deletions(-)
delete mode 100644 server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java
create mode 100644 server/src/main/java/org/opensearch/cluster/decommission/DecommissionFailedException.java
create mode 100644 server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java
create mode 100644 server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java
create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/DecommissionAttributeMetadata.java
delete mode 100644 server/src/test/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutorTests.java
diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java
index 87efc03734d26..d3e1bef9b6dbb 100644
--- a/server/src/main/java/org/opensearch/OpenSearchException.java
+++ b/server/src/main/java/org/opensearch/OpenSearchException.java
@@ -34,6 +34,7 @@
import org.opensearch.action.support.replication.ReplicationOperation;
import org.opensearch.cluster.action.shard.ShardStateAction;
+import org.opensearch.cluster.decommission.DecommissionFailedException;
import org.opensearch.common.CheckedFunction;
import org.opensearch.common.Nullable;
import org.opensearch.common.ParseField;
@@ -1608,6 +1609,12 @@ private enum OpenSearchExceptionHandle {
org.opensearch.index.shard.PrimaryShardClosedException::new,
162,
V_3_0_0
+ ),
+ DECOMMISSION_FAILED_EXCEPTION(
+ org.opensearch.cluster.decommission.DecommissionFailedException.class,
+ org.opensearch.cluster.decommission.DecommissionFailedException::new,
+ 163,
+ V_2_1_0
);
final Class extends OpenSearchException> exceptionClass;
diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java
index f8ba520e465e2..de63369dafc89 100644
--- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java
+++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java
@@ -38,6 +38,7 @@
import org.opensearch.cluster.metadata.ComponentTemplateMetadata;
import org.opensearch.cluster.metadata.ComposableIndexTemplateMetadata;
import org.opensearch.cluster.metadata.DataStreamMetadata;
+import org.opensearch.cluster.metadata.DecommissionAttributeMetadata;
import org.opensearch.cluster.metadata.IndexGraveyard;
import org.opensearch.cluster.metadata.IndexNameExpressionResolver;
import org.opensearch.cluster.metadata.Metadata;
@@ -191,6 +192,7 @@ public static List getNamedWriteables() {
ComposableIndexTemplateMetadata::readDiffFrom
);
registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom);
+ registerMetadataCustom(entries, DecommissionAttributeMetadata.TYPE, DecommissionAttributeMetadata::new, DecommissionAttributeMetadata::readDiffFrom);
// Task Status (not Diffable)
entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new));
return entries;
@@ -274,6 +276,13 @@ public static List getNamedXWriteables() {
DataStreamMetadata::fromXContent
)
);
+ entries.add(
+ new NamedXContentRegistry.Entry(
+ Metadata.Custom.class,
+ new ParseField(DecommissionAttributeMetadata.TYPE),
+ DecommissionAttributeMetadata::fromXContent
+ )
+ );
return entries;
}
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java
deleted file mode 100644
index d71cd98d5f25e..0000000000000
--- a/server/src/main/java/org/opensearch/cluster/coordination/DecommissionNodeAttributeClusterStateTaskExecutor.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * SPDX-License-Identifier: Apache-2.0
- *
- * The OpenSearch Contributors require contributions made to
- * this file be licensed under the Apache-2.0 license or a
- * compatible open source license.
- */
-
-package org.opensearch.cluster.coordination;
-
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.opensearch.cluster.ClusterState;
-import org.opensearch.cluster.ClusterStateTaskExecutor;
-import org.opensearch.cluster.ClusterStateTaskListener;
-import org.opensearch.cluster.decommission.DecommissionAttribute;
-import org.opensearch.cluster.node.DiscoveryNode;
-import org.opensearch.cluster.node.DiscoveryNodes;
-import org.opensearch.cluster.routing.allocation.AllocationService;
-import org.opensearch.persistent.PersistentTasksCustomMetadata;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.function.Predicate;
-
-/**
- * Decommissions and shuts down nodes having a given attribute and updates the cluster state
- *
- * @opensearch.internal
- */
-public class DecommissionNodeAttributeClusterStateTaskExecutor
- implements
- ClusterStateTaskExecutor,
- ClusterStateTaskListener {
-
- private final AllocationService allocationService;
- private final Logger logger;
-
- /**
- * Task for the executor.
- *
- * @opensearch.internal
- */
- public static class Task {
-
- private final DecommissionAttribute decommissionAttribute;
- private final String reason;
-
- public Task(final DecommissionAttribute decommissionAttribute, final String reason) {
- this.decommissionAttribute = decommissionAttribute;
- this.reason = reason;
- }
-
- public DecommissionAttribute decommissionAttribute() {
- return decommissionAttribute;
- }
-
- public String reason() {
- return reason;
- }
-
- @Override
- public String toString() {
- return "Decommission Node Attribute Task{"
- + "decommissionAttribute="
- + decommissionAttribute
- + ", reason='"
- + reason
- + '\''
- + '}';
- }
- }
-
- public DecommissionNodeAttributeClusterStateTaskExecutor(final AllocationService allocationService, final Logger logger) {
- this.allocationService = allocationService;
- this.logger = logger;
- }
-
- @Override
- public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
- final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(currentState.nodes());
- List nodesToBeRemoved = new ArrayList();
- for (final Task task : tasks) {
- final Predicate shouldRemoveNodePredicate = discoveryNode -> nodeHasDecommissionedAttribute(discoveryNode, task);
- Iterator nodesIter = currentState.nodes().getNodes().valuesIt();
- while (nodesIter.hasNext()) {
- final DiscoveryNode node = nodesIter.next();
- if (shouldRemoveNodePredicate.test(node) && currentState.nodes().nodeExists(node)) {
- nodesToBeRemoved.add(node);
- }
- }
- }
- if (nodesToBeRemoved.size() <= 0) {
- // no nodes to remove, will keep the current cluster state
- return ClusterTasksResult.builder()
- .successes(tasks)
- .build(currentState);
- }
- for (DiscoveryNode nodeToBeRemoved : nodesToBeRemoved) {
- remainingNodesBuilder.remove(nodeToBeRemoved);
- }
-
- final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder);
-
- return getTaskClusterTasksResult(currentState, tasks, remainingNodesClusterState);
- }
-
- private boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, Task task) {
- String discoveryNodeAttributeValue = discoveryNode.getAttributes().get(task.decommissionAttribute().attributeName());
- return discoveryNodeAttributeValue != null && task.decommissionAttribute().attributeValues().contains(discoveryNodeAttributeValue);
- }
-
- // visible for testing
- // hook is used in testing to ensure that correct cluster state is used to test whether a
- // rejoin or reroute is needed
- protected ClusterState remainingNodesClusterState(final ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder) {
- return ClusterState.builder(currentState).nodes(remainingNodesBuilder).build();
- }
-
- protected ClusterTasksResult getTaskClusterTasksResult(
- ClusterState currentState,
- List tasks,
- ClusterState remainingNodesClusterState
- ) {
- ClusterState ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState);
- final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.<
- DecommissionNodeAttributeClusterStateTaskExecutor.Task>builder().successes(tasks);
- return resultBuilder.build(allocationService.disassociateDeadNodes(ptasksDisassociatedState, true, describeTasks(tasks)));
- }
-
- @Override
- public void onFailure(final String source, final Exception e) {
- logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
- }
-
- @Override
- public void onNoLongerClusterManager(String source) {
- logger.debug("no longer cluster-manager while decommissioning node attribute [{}]", source);
- }
-}
diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java
index 6260af2823687..db4e06e854518 100644
--- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java
+++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java
@@ -18,27 +18,27 @@
public final class DecommissionAttribute implements Writeable {
private final String attributeName;
- private final List attributeValues;
+ private final String attributeValue;
/**
- * Update the attribute values for a given attribute name to decommission
+ * Update the attribute value for a given attribute name to decommission
*
* @param decommissionAttribute current decommissioned attribute object
- * @param attributeValues values to be updated with
+ * @param attributeValue attribute value to be updated with
*/
- public DecommissionAttribute(DecommissionAttribute decommissionAttribute, List attributeValues) {
- this(decommissionAttribute.attributeName, attributeValues);
+ public DecommissionAttribute(DecommissionAttribute decommissionAttribute, String attributeValue) {
+ this(decommissionAttribute.attributeName, attributeValue);
}
/**
- * Constructs new decommission attribute name values pair
+ * Constructs new decommission attribute name value pair
*
* @param attributeName attribute name
- * @param attributeValues attribute values
+ * @param attributeValue attribute value
*/
- public DecommissionAttribute(String attributeName, List