From 91582e7c25a6f76eeebfa3ab74287178e1986491 Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 19 Mar 2024 12:23:00 +0530 Subject: [PATCH 01/74] Introduce remote store path type in customData in IndexMetadata (#12607) Signed-off-by: Ashish Singh --- .../cluster/metadata/IndexMetadata.java | 1 + .../metadata/MetadataCreateIndexService.java | 36 ++++++-- .../common/settings/ClusterSettings.java | 2 + .../index/remote/RemoteStorePathResolver.java | 30 ++++++ .../index/remote/RemoteStorePathType.java | 36 ++++++++ .../opensearch/indices/IndicesService.java | 13 +++ .../MetadataCreateIndexServiceTests.java | 92 ++++++++++++++++++- 7 files changed, 201 insertions(+), 9 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 03784df509ed6..80b78cfe154f1 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -635,6 +635,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { static final String KEY_ROLLOVER_INFOS = "rollover_info"; static final String KEY_SYSTEM = "system"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; + public static final String REMOTE_STORE_CUSTOM_KEY = "remote_store"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index acc2f3a294745..f117d1a4a11a2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,6 +88,8 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.remote.RemoteStorePathResolver; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -167,6 +169,9 @@ public class MetadataCreateIndexService { private final ClusterManagerTaskThrottler.ThrottlingKey createIndexTaskKey; private AwarenessReplicaBalance awarenessReplicaBalance; + @Nullable + private final RemoteStorePathResolver remoteStorePathResolver; + public MetadataCreateIndexService( final Settings settings, final ClusterService clusterService, @@ -198,6 +203,9 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); + remoteStorePathResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStorePathResolver(clusterService.getClusterSettings()) + : null; } /** @@ -498,7 +506,8 @@ private ClusterState applyCreateIndexWithTemporaryService( temporaryIndexMeta.getSettings(), temporaryIndexMeta.getRoutingNumShards(), sourceMetadata, - temporaryIndexMeta.isSystem() + temporaryIndexMeta.isSystem(), + temporaryIndexMeta.getCustomData() ); } catch (Exception e) { logger.info("failed to build index metadata [{}]", request.index()); @@ -522,10 +531,11 @@ private ClusterState applyCreateIndexWithTemporaryService( /** * Given a state and index settings calculated after applying templates, validate metadata for - * the new index, returning an {@link IndexMetadata} for the new index + * the new index, returning an {@link IndexMetadata} for the new index. + *

+ * The access level of the method changed to default level for visibility to test. */ - private IndexMetadata buildAndValidateTemporaryIndexMetadata( - final ClusterState currentState, + IndexMetadata buildAndValidateTemporaryIndexMetadata( final Settings aggregatedIndexSettings, final CreateIndexClusterStateUpdateRequest request, final int routingNumShards @@ -544,6 +554,11 @@ private IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); + if (remoteStorePathResolver != null) { + String pathType = remoteStorePathResolver.resolveType().toString(); + tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, Map.of(RemoteStorePathType.NAME, pathType)); + } + // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); validateActiveShardCount(request.waitForActiveShards(), tempMetadata); @@ -582,7 +597,7 @@ private ClusterState applyCreateIndexRequestWithV1Templates( clusterService.getClusterSettings() ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); - IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); + IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(aggregatedIndexSettings, request, routingNumShards); return applyCreateIndexWithTemporaryService( currentState, @@ -647,7 +662,7 @@ private ClusterState applyCreateIndexRequestWithV2Template( clusterService.getClusterSettings() ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); - IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); + IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(aggregatedIndexSettings, request, routingNumShards); return applyCreateIndexWithTemporaryService( currentState, @@ -728,7 +743,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( clusterService.getClusterSettings() ); final int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, sourceMetadata); - IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); + IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(aggregatedIndexSettings, request, routingNumShards); return applyCreateIndexWithTemporaryService( currentState, @@ -1147,7 +1162,8 @@ static IndexMetadata buildIndexMetadata( Settings indexSettings, int routingNumShards, @Nullable IndexMetadata sourceMetadata, - boolean isSystem + boolean isSystem, + Map customData ) { IndexMetadata.Builder indexMetadataBuilder = createIndexMetadataBuilder(indexName, sourceMetadata, indexSettings, routingNumShards); indexMetadataBuilder.system(isSystem); @@ -1168,6 +1184,10 @@ static IndexMetadata buildIndexMetadata( indexMetadataBuilder.putAlias(aliases.get(i)); } + for (Map.Entry entry : customData.entrySet()) { + indexMetadataBuilder.putCustom(entry.getKey(), entry.getValue()); + } + indexMetadataBuilder.state(IndexMetadata.State.OPEN); return indexMetadataBuilder.build(); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 6b4be45929553..730a330b6ca60 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -713,6 +713,8 @@ public void apply(Settings value, Settings current, Settings previous) { IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT, IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, + // Concurrent segment search settings SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java new file mode 100644 index 0000000000000..6e8126fcce0ca --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.indices.IndicesService; + +/** + * Determines the {@link RemoteStorePathType} at the time of index metadata creation. + * + * @opensearch.internal + */ +public class RemoteStorePathResolver { + + private final ClusterSettings clusterSettings; + + public RemoteStorePathResolver(ClusterSettings clusterSettings) { + this.clusterSettings = clusterSettings; + } + + public RemoteStorePathType resolveType() { + return clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java new file mode 100644 index 0000000000000..a64e07ab1f66f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import java.util.Locale; + +/** + * Enumerates the types of remote store paths resolution techniques supported by OpenSearch. + * For more information, see Github issue #12567. + * + * @opensearch.internal + */ +public enum RemoteStorePathType { + + FIXED, + HASHED_PREFIX; + + public static RemoteStorePathType parseString(String remoteStoreBlobPathType) { + try { + return RemoteStorePathType.valueOf(remoteStoreBlobPathType.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Could not parse RemoteStorePathType for [" + remoteStoreBlobPathType + "]"); + } + } + + /** + * This string is used as key for storing information in the custom data in index settings. + */ + public static final String NAME = "path_type"; +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 40c10e3a2fe96..0e64894e6f708 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -123,6 +123,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -313,6 +314,18 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); + /** + * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for + * remote store enabled cluster. + */ + public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.prefix.type", + RemoteStorePathType.FIXED.toString(), + RemoteStorePathType::parseString, + Property.NodeScope, + Property.Dynamic + ); + /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cc605878119a2..cf4de32890a2a 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -71,6 +71,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; @@ -1563,13 +1564,102 @@ public void testBuildIndexMetadata() { .put(SETTING_NUMBER_OF_SHARDS, 1) .build(); List aliases = singletonList(AliasMetadata.builder("alias1").build()); - IndexMetadata indexMetadata = buildIndexMetadata("test", aliases, () -> null, indexSettings, 4, sourceIndexMetadata, false); + IndexMetadata indexMetadata = buildIndexMetadata( + "test", + aliases, + () -> null, + indexSettings, + 4, + sourceIndexMetadata, + false, + new HashMap<>() + ); assertThat(indexMetadata.getAliases().size(), is(1)); assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1")); assertThat("The source index primary term must be used", indexMetadata.primaryTerm(0), is(3L)); } + /** + * This test checks if the cluster is a remote store cluster then we populate custom data for remote settings in + * index metadata of the underlying index. This captures information around the resolution pattern of the path for + * remote segments and translog. + */ + public void testRemoteCustomData() { + // Case 1 - Remote store is not enabled + IndexMetadata indexMetadata = testRemoteCustomData(false, randomFrom(RemoteStorePathType.values())); + assertNull(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + + // Case 2 - cluster.remote_store.index.path.prefix.optimised=fixed (default value) + indexMetadata = testRemoteCustomData(true, RemoteStorePathType.FIXED); + validateRemoteCustomData( + indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), + RemoteStorePathType.NAME, + RemoteStorePathType.FIXED.toString() + ); + + // Case 3 - cluster.remote_store.index.path.prefix.optimised=hashed_prefix + indexMetadata = testRemoteCustomData(true, RemoteStorePathType.HASHED_PREFIX); + validateRemoteCustomData( + indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), + RemoteStorePathType.NAME, + RemoteStorePathType.HASHED_PREFIX.toString() + ); + } + + private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, RemoteStorePathType remoteStorePathType) { + Settings.Builder settingsBuilder = Settings.builder(); + if (remoteStoreEnabled) { + settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); + } + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), remoteStorePathType.toString()); + Settings settings = settingsBuilder.build(); + + ClusterService clusterService = mock(ClusterService.class); + Metadata metadata = Metadata.builder() + .transientSettings(Settings.builder().put(Metadata.DEFAULT_REPLICA_COUNT_SETTING.getKey(), 1).build()) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getSettings()).thenReturn(settings); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + when(clusterService.state()).thenReturn(clusterState); + + ThreadPool threadPool = new TestThreadPool(getTestName()); + MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( + settings, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + ); + CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + Settings indexSettings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + + IndexMetadata indexMetadata = metadataCreateIndexService.buildAndValidateTemporaryIndexMetadata(indexSettings, request, 0); + threadPool.shutdown(); + return indexMetadata; + } + + private void validateRemoteCustomData(Map customData, String expectedKey, String expectedValue) { + assertTrue(customData.containsKey(expectedKey)); + assertEquals(expectedValue, customData.get(expectedKey)); + } + public void testGetIndexNumberOfRoutingShardsWithNullSourceIndex() { Settings indexSettings = Settings.builder() .put("index.version.created", Version.CURRENT) From 7a6f8b0eead5371fa18d49534ac76edf199e9c4c Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 19 Mar 2024 07:16:07 -0400 Subject: [PATCH 02/74] Bump mockito & bytebuddy dependencies (#12720) Signed-off-by: Andriy Redko --- buildSrc/version.properties | 4 ++-- .../org/opensearch/transport/RemoteClusterClientTests.java | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 6da095473b520..8705588babe97 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -55,9 +55,9 @@ bouncycastle=1.77 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 5.10.0 +mockito = 5.11.0 objenesis = 3.2 -bytebuddy = 1.14.7 +bytebuddy = 1.14.9 # benchmark dependencies jmh = 1.35 diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java index f3b7f9916d460..59c0206a87fb3 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java @@ -63,6 +63,7 @@ public void tearDown() throws Exception { ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/12338") public void testConnectAndExecuteRequest() throws Exception { Settings remoteSettings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "foo_bar_cluster").build(); try ( From d4e1ab135dfcb058b476fce09a669f4be5854702 Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Tue, 19 Mar 2024 21:52:43 +0530 Subject: [PATCH 03/74] Election scheduler should be cancelled after cluster state publication (#11699) Signed-off-by: Sooraj Sinha --- CHANGELOG.md | 1 + .../cluster/coordination/Coordinator.java | 20 +++++++++++++++---- .../coordination/CoordinatorTests.java | 4 ++-- .../AbstractCoordinatorTestCase.java | 7 +++++++ 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c4558cf3fe251..3fb141e189bd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -146,6 +146,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) - Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) - Mark fuzzy filter GA and remove experimental setting ([12631](https://github.com/opensearch-project/OpenSearch/pull/12631)) +- Keep the election scheduler open until cluster state has been applied ([#11699](https://github.com/opensearch-project/OpenSearch/pull/11699)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 5a07f964f94a4..3d74feddfa261 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -386,6 +386,7 @@ public void onFailure(String source, Exception e) { @Override public void onSuccess(String source) { + closePrevotingAndElectionScheduler(); applyListener.onResponse(null); } }); @@ -472,17 +473,29 @@ private static Optional joinWithDestination(Optional lastJoin, Disco } private void closePrevotingAndElectionScheduler() { + closePrevoting(); + closeElectionScheduler(); + } + + private void closePrevoting() { if (prevotingRound != null) { prevotingRound.close(); prevotingRound = null; } + } + private void closeElectionScheduler() { if (electionScheduler != null) { electionScheduler.close(); electionScheduler = null; } } + // package-visible for testing + boolean isElectionSchedulerRunning() { + return electionScheduler != null; + } + private void updateMaxTermSeen(final long term) { synchronized (mutex) { maxTermSeen = Math.max(maxTermSeen, term); @@ -724,7 +737,7 @@ void becomeLeader(String method) { lastKnownLeader = Optional.of(getLocalNode()); peerFinder.deactivate(getLocalNode()); clusterFormationFailureHelper.stop(); - closePrevotingAndElectionScheduler(); + closePrevoting(); preVoteCollector.update(getPreVoteResponse(), getLocalNode()); assert leaderChecker.leader() == null : leaderChecker.leader(); @@ -761,7 +774,7 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { lastKnownLeader = Optional.of(leaderNode); peerFinder.deactivate(leaderNode); clusterFormationFailureHelper.stop(); - closePrevotingAndElectionScheduler(); + closePrevoting(); cancelActivePublication("become follower: " + method); preVoteCollector.update(getPreVoteResponse(), leaderNode); @@ -927,7 +940,6 @@ public void invariant() { assert lastKnownLeader.isPresent() && lastKnownLeader.get().equals(getLocalNode()); assert joinAccumulator instanceof JoinHelper.LeaderJoinAccumulator; assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; - assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; assert becomingClusterManager || getStateForClusterManagerService().nodes().getClusterManagerNodeId() != null : getStateForClusterManagerService(); @@ -972,7 +984,6 @@ assert getLocalNode().equals(applierState.nodes().getClusterManagerNode()) assert lastKnownLeader.isPresent() && (lastKnownLeader.get().equals(getLocalNode()) == false); assert joinAccumulator instanceof JoinHelper.FollowerJoinAccumulator; assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; - assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; assert getStateForClusterManagerService().nodes().getClusterManagerNodeId() == null : getStateForClusterManagerService(); assert leaderChecker.currentNodeIsClusterManager() == false; @@ -1693,6 +1704,7 @@ public void onSuccess(String source) { updateMaxTermSeen(getCurrentTerm()); if (mode == Mode.LEADER) { + closePrevotingAndElectionScheduler(); // if necessary, abdicate to another node or improve the voting configuration boolean attemptReconfiguration = true; final ClusterState state = getLastAcceptedState(); // committed state diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index a3129655148ab..5eeebd2588416 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -270,7 +270,7 @@ public void testNodesJoinAfterStableCluster() { public void testExpandsConfigurationWhenGrowingFromOneNodeToThreeButDoesNotShrink() { try (Cluster cluster = new Cluster(1)) { cluster.runRandomly(); - cluster.stabilise(); + cluster.stabilise(DEFAULT_STABILISATION_TIME * 2); final ClusterNode leader = cluster.getAnyLeader(); @@ -1750,7 +1750,7 @@ public void testDoesNotPerformElectionWhenRestartingFollower() { public void testImproveConfigurationPerformsVotingConfigExclusionStateCheck() { try (Cluster cluster = new Cluster(1)) { cluster.runRandomly(); - cluster.stabilise(); + cluster.stabilise(DEFAULT_STABILISATION_TIME * 2); final Coordinator coordinator = cluster.getAnyLeader().coordinator; final ClusterState currentState = coordinator.getLastAcceptedState(); diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 28d7706fb1493..0754cc1793dc8 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.ClusterNode; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.coordination.LinearizabilityChecker.History; import org.opensearch.cluster.coordination.LinearizabilityChecker.SequentialSpec; import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; @@ -653,6 +654,12 @@ void stabilise(long stabilisationDurationMillis) { leader.getLastAppliedClusterState().getNodes().nodeExists(nodeId) ); } + if (clusterNode.coordinator.getMode() == Mode.LEADER || clusterNode.coordinator.getMode() == Mode.FOLLOWER) { + assertFalse( + "Election scheduler should stop after cluster has stabilised", + clusterNode.coordinator.isElectionSchedulerRunning() + ); + } } final Set connectedNodeIds = clusterNodes.stream() From 479b65bfd4337ede96105739f5611ae9e8dbf971 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 21:16:50 -0500 Subject: [PATCH 04/74] Bump peter-evans/create-pull-request from 5 to 6 (#12724) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/version.yml | 6 +++--- CHANGELOG.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index be2a89ac931e9..7f120b65d7c2e 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -62,7 +62,7 @@ jobs: - name: Create PR for BASE id: base_pr - uses: peter-evans/create-pull-request@v5 + uses: peter-evans/create-pull-request@v6 with: base: ${{ env.BASE }} branch: 'create-pull-request/patch-${{ env.BASE }}' @@ -88,7 +88,7 @@ jobs: - name: Create PR for BASE_X id: base_x_pr - uses: peter-evans/create-pull-request@v5 + uses: peter-evans/create-pull-request@v6 with: base: ${{ env.BASE_X }} branch: 'create-pull-request/patch-${{ env.BASE_X }}' @@ -114,7 +114,7 @@ jobs: - name: Create PR for main id: main_pr - uses: peter-evans/create-pull-request@v5 + uses: peter-evans/create-pull-request@v6 with: base: main branch: 'create-pull-request/patch-main' diff --git a/CHANGELOG.md b/CHANGELOG.md index 3fb141e189bd3..4e5426797c69f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -141,6 +141,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) - Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) - Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `peter-evans/create-pull-request` from 5 to 6 ([#12724](https://github.com/opensearch-project/OpenSearch/pull/12724)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) From f4a8d2b752fd2b444cc3ab35fc463300cf31f870 Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Tue, 19 Mar 2024 19:41:59 -0700 Subject: [PATCH 05/74] Fixing ehcache flaky test (#12764) * Fixing ehcache flaky test Signed-off-by: Sagar Upadhyaya * Adding a ehcache issue reference for thread leak issue Signed-off-by: Sagar Upadhyaya * Updating comment Signed-off-by: Sagar Upadhyaya --------- Signed-off-by: Sagar Upadhyaya --- .../store/disk/EhCacheDiskCacheTests.java | 44 +++++++++++++++++++ .../store/disk/EhcacheThreadLeakFilter.java | 29 ++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheThreadLeakFilter.java diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index 8c0ab62baee54..3a98ad2fef6b1 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -8,6 +8,8 @@ package org.opensearch.cache.store.disk; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.opensearch.cache.EhcacheDiskCacheSettings; import org.opensearch.common.Randomness; import org.opensearch.common.cache.CacheType; @@ -47,6 +49,7 @@ import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; import static org.hamcrest.CoreMatchers.instanceOf; +@ThreadLeakFilters(filters = { EhcacheThreadLeakFilter.class }) public class EhCacheDiskCacheTests extends OpenSearchSingleNodeTestCase { private static final int CACHE_SIZE_IN_BYTES = 1024 * 101; @@ -633,6 +636,47 @@ public void testBasicGetAndPutBytesReference() throws Exception { } } + public void testInvalidate() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(10, 100); + Map keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + assertEquals(keyValueMap.size(), ehcacheTest.count()); + List removedKeyList = new ArrayList<>(); + for (Map.Entry entry : keyValueMap.entrySet()) { + if (randomBoolean()) { + removedKeyList.add(entry.getKey()); + ehcacheTest.invalidate(entry.getKey()); + } + } + for (String removedKey : removedKeyList) { + assertNull(ehcacheTest.get(removedKey)); + } + assertEquals(keyValueMap.size() - removedKeyList.size(), ehcacheTest.count()); + ehcacheTest.close(); + } + } + private static String generateRandomString(int length) { String characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; StringBuilder randomString = new StringBuilder(length); diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheThreadLeakFilter.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheThreadLeakFilter.java new file mode 100644 index 0000000000000..6b54c3be10466 --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhcacheThreadLeakFilter.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +/** + * In Ehcache(as of 3.10.8), while calling remove/invalidate() on entries causes to start a daemon thread in the + * background to clean up the stale offheap memory associated with the disk cache. And this thread is not closed even + * after we try to close the cache or cache manager. Considering that it requires a node restart to switch between + * different cache plugins, this shouldn't be a problem for now. + * + * See: https://github.com/ehcache/ehcache3/issues/3204 + */ +public class EhcacheThreadLeakFilter implements ThreadFilter { + + private static final String OFFENDING_THREAD_NAME = "MappedByteBufferSource"; + + @Override + public boolean reject(Thread t) { + return t.getName().startsWith(OFFENDING_THREAD_NAME); + } +} From 1b4c76ddd9a4363312d5950655ecbc36c530bb96 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 20 Mar 2024 11:06:16 +0800 Subject: [PATCH 06/74] Update supported version for the primary_only parameter in force-merge API (#12657) * Update supported version for adding primary_only parameter to force-merge API Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong * Remove some unused code Signed-off-by: Gao Binlong * Remove change log Signed-off-by: Gao Binlong * Fix test issue Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- .../20_wait_for_completion.yml | 33 ++++++++++-- .../indices/forcemerge/ForceMergeRequest.java | 4 +- .../forcemerge/ForceMergeRequestTests.java | 53 ++++++++++--------- 3 files changed, 60 insertions(+), 30 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml index efa239547e84a..a0bddd1cbd13f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml @@ -4,15 +4,17 @@ # will return a task immediately and the merge process will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" - features: allowed_warnings + version: " - 2.6.99, 2.13.0 - " + reason: "wait_for_completion was introduced in 2.7.0 and task description was changed in 2.13.0" + features: allowed_warnings, node_selector - do: indices.create: index: test_index - do: + node_selector: + version: " 2.7.0 - 2.12.99" indices.forcemerge: index: test_index wait_for_completion: false @@ -25,8 +27,31 @@ wait_for_completion: true task_id: $taskId - match: { task.action: "indices:admin/forcemerge" } - - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]" } + - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true]" } + +--- +"Force merge index with wait_for_completion after task description changed": + - skip: + version: " - 2.12.99 " + reason: "task description was changed in 2.13.0" + features: allowed_warnings, node_selector + + - do: + node_selector: + version: " 2.13.0 - " + indices.forcemerge: + index: test_index + wait_for_completion: false + max_num_segments: 1 + - match: { task: /^.+$/ } + - set: { task: taskId } + - do: + tasks.get: + wait_for_completion: true + task_id: $taskId + - match: { task.action: "indices:admin/forcemerge" } + - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]" } # .tasks index is created when the force-merge operation completes, so we should delete .tasks index finally, # if not, the .tasks index may introduce unexpected warnings and then cause other test cases to fail. # Delete the .tasks index directly will also introduce warning, but currently we don't have such APIs which can delete one diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index bf6ee9ca43755..3efc4db21afbc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -102,7 +102,7 @@ public ForceMergeRequest(StreamInput in) throws IOException { maxNumSegments = in.readInt(); onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { primaryOnly = in.readBoolean(); } if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { @@ -219,7 +219,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(maxNumSegments); out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { out.writeBoolean(primaryOnly); } if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java index a80141c52b6b4..03cf38548a8cd 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java @@ -32,8 +32,10 @@ package org.opensearch.action.admin.indices.forcemerge; import org.opensearch.Version; +import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -95,42 +97,48 @@ public void testSerialization() throws Exception { public void testBwcSerialization() throws Exception { { final ForceMergeRequest sample = randomRequest(); - final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + final Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(compatibleVersion); + out.setVersion(version); sample.writeTo(out); - final ForceMergeRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(Version.CURRENT); - deserializedRequest = new ForceMergeRequest(in); - } - - assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); - assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); - assertEquals(sample.flush(), deserializedRequest.flush()); - if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { - assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); - assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + in.setVersion(version); + TaskId.readFromStream(in); + in.readStringArray(); + IndicesOptions.readIndicesOptions(in); + int maxNumSegments = in.readInt(); + boolean onlyExpungeDeletes = in.readBoolean(); + boolean flush = in.readBoolean(); + boolean primaryOnly = in.readBoolean(); + String forceMergeUUID; + if (version.onOrAfter(Version.V_3_0_0)) { + forceMergeUUID = in.readString(); + } else { + forceMergeUUID = in.readOptionalString(); + } + assertEquals(sample.maxNumSegments(), maxNumSegments); + assertEquals(sample.onlyExpungeDeletes(), onlyExpungeDeletes); + assertEquals(sample.flush(), flush); + assertEquals(sample.primaryOnly(), primaryOnly); + assertEquals(sample.forceMergeUUID(), forceMergeUUID); } } } { final ForceMergeRequest sample = randomRequest(); - final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + final Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(Version.CURRENT); + out.setVersion(version); sample.getParentTask().writeTo(out); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); out.writeInt(sample.maxNumSegments()); out.writeBoolean(sample.onlyExpungeDeletes()); out.writeBoolean(sample.flush()); - if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { - out.writeBoolean(sample.primaryOnly()); - } - if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(sample.primaryOnly()); + if (version.onOrAfter(Version.V_3_0_0)) { out.writeString(sample.forceMergeUUID()); } else { out.writeOptionalString(sample.forceMergeUUID()); @@ -138,18 +146,15 @@ public void testBwcSerialization() throws Exception { final ForceMergeRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(compatibleVersion); + in.setVersion(version); deserializedRequest = new ForceMergeRequest(in); } assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); assertEquals(sample.flush(), deserializedRequest.flush()); - if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { - assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); - } + assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); - } } } From ad6b53834229da724312ad6a64faa2dc41a8d4c5 Mon Sep 17 00:00:00 2001 From: shwetathareja Date: Wed, 20 Mar 2024 16:28:42 +0530 Subject: [PATCH 07/74] Fixing flaky test GeoPointShapeQueryTests.testQueryLinearRing (#12783) Signed-off-by: Shweta Thareja Co-authored-by: Shweta Thareja --- .../java/org/opensearch/search/geo/GeoPointShapeQueryTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java index b6b2a86ac7549..b5d34a78ab5a4 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java @@ -143,6 +143,8 @@ public void testQueryLinearRing() throws Exception { e.getCause().getMessage(), containsString("Field [" + defaultGeoFieldName + "] does not support LINEARRING queries") ); + } catch (UnsupportedOperationException e) { + assertThat(e.getMessage(), containsString("line ring cannot be serialized using GeoJson")); } } From c369ec44e381ddb85db647d89389842952a1c06a Mon Sep 17 00:00:00 2001 From: Prabhat <20185657+CaptainDredge@users.noreply.github.com> Date: Wed, 20 Mar 2024 04:50:28 -0700 Subject: [PATCH 08/74] Added static setting for checkPendingFlushUpdate functionality of lucene writer (#12710) Signed-off-by: Prabhat Sharma Co-authored-by: Prabhat Sharma --- CHANGELOG.md | 1 + .../common/settings/IndexScopedSettings.java | 1 + .../org/opensearch/index/IndexSettings.java | 20 ++++++++++++++++++- .../index/engine/InternalEngine.java | 1 + 4 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e5426797c69f..3cfdcd86f6bf0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -118,6 +118,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625)) - [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709)) - [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) +- Introduce a new setting `index.check_pending_flush.enabled` to expose the ability to disable the check for pending flushes by write threads ([#12710](https://github.com/opensearch-project/OpenSearch/pull/12710)) ### Dependencies - Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 49bb3abf1decd..c6c312d6b6eea 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -207,6 +207,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, IndexSettings.INDEX_MERGE_POLICY, + IndexSettings.INDEX_CHECK_PENDING_FLUSH_ENABLED, LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index d750a13dece64..5aaea2c498701 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -624,6 +624,16 @@ public static IndexMergePolicy fromString(String text) { Property.IndexScope ); + /** + * Expert: Makes indexing threads check for pending flushes on update in order to help out + * flushing indexing buffers to disk. This is an experimental Apache Lucene feature. + */ + public static final Setting INDEX_CHECK_PENDING_FLUSH_ENABLED = Setting.boolSetting( + "index.check_pending_flush.enabled", + true, + Property.IndexScope + ); + public static final Setting TIME_SERIES_INDEX_MERGE_POLICY = Setting.simpleString( "indices.time_series_index.default_index_merge_policy", DEFAULT_POLICY, @@ -816,7 +826,10 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { * Specialized merge-on-flush policy if provided */ private volatile UnaryOperator mergeOnFlushPolicy; - + /** + * Is flush check by write threads enabled or not + */ + private final boolean checkPendingFlushEnabled; /** * Is fuzzy set enabled for doc id */ @@ -959,6 +972,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxFullFlushMergeWaitTime = scopedSettings.get(INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME); mergeOnFlushEnabled = scopedSettings.get(INDEX_MERGE_ON_FLUSH_ENABLED); setMergeOnFlushPolicy(scopedSettings.get(INDEX_MERGE_ON_FLUSH_POLICY)); + checkPendingFlushEnabled = scopedSettings.get(INDEX_CHECK_PENDING_FLUSH_ENABLED); defaultSearchPipeline = scopedSettings.get(DEFAULT_SEARCH_PIPELINE); /* There was unintentional breaking change got introduced with [OpenSearch-6424](https://github.com/opensearch-project/OpenSearch/pull/6424) (version 2.7). * For indices created prior version (prior to 2.7) which has IndexSort type, they used to type cast the SortField.Type @@ -1844,6 +1858,10 @@ private void setMergeOnFlushPolicy(String policy) { } } + public boolean isCheckPendingFlushEnabled() { + return checkPendingFlushEnabled; + } + public Optional> getMergeOnFlushPolicy() { return Optional.ofNullable(mergeOnFlushPolicy); } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index e204656d3f106..a25ec95f58e05 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -2336,6 +2336,7 @@ private IndexWriterConfig getIndexWriterConfig() { iwc.setMaxFullFlushMergeWaitMillis(0); } + iwc.setCheckPendingFlushUpdate(config().getIndexSettings().isCheckPendingFlushEnabled()); iwc.setMergePolicy(new OpenSearchMergePolicy(mergePolicy)); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); From 773a93945429d99e3551fad6e4746668778aa563 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 20 Mar 2024 10:01:59 -0400 Subject: [PATCH 09/74] [FEATURE] Built-in secure transports support (#12435) * [FEATURE] Built-in secure transports support Signed-off-by: Andriy Redko * Added more tests, addressing code review comments Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../ssl/SecureNetty4HttpServerTransport.java | 169 +++++ .../transport/Netty4ModulePlugin.java | 64 ++ .../netty4/ssl/DualModeSslHandler.java | 106 +++ .../netty4/ssl/SecureConnectionTestUtil.java | 214 +++++++ .../netty4/ssl/SecureNetty4Transport.java | 316 +++++++++ .../transport/netty4/ssl/SslUtils.java | 107 ++++ .../http/netty4/Netty4HttpClient.java | 60 +- .../SecureNetty4HttpServerTransportTests.java | 603 ++++++++++++++++++ .../ssl/SimpleSecureNetty4TransportTests.java | 234 +++++++ .../transport/netty4/ssl/TrustAllManager.java | 28 + .../src/test/resources/README.txt | 17 + .../src/test/resources/certificate.crt | 22 + .../src/test/resources/certificate.key | 28 + .../src/test/resources/netty4-secure.jks | Bin 0 -> 2790 bytes .../common/network/NetworkModule.java | 69 +- .../common/settings/ClusterSettings.java | 3 + .../main/java/org/opensearch/node/Node.java | 11 +- .../org/opensearch/plugins/NetworkPlugin.java | 37 ++ .../java/org/opensearch/plugins/Plugin.java | 9 + .../plugins/SecureSettingsFactory.java | 29 + .../SecureTransportSettingsProvider.java | 90 +++ .../common/network/NetworkModuleTests.java | 95 ++- 23 files changed, 2296 insertions(+), 16 deletions(-) create mode 100644 modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java create mode 100644 modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/DualModeSslHandler.java create mode 100644 modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureConnectionTestUtil.java create mode 100644 modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java create mode 100644 modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SslUtils.java create mode 100644 modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java create mode 100644 modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java create mode 100644 modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/TrustAllManager.java create mode 100644 modules/transport-netty4/src/test/resources/README.txt create mode 100644 modules/transport-netty4/src/test/resources/certificate.crt create mode 100644 modules/transport-netty4/src/test/resources/certificate.key create mode 100644 modules/transport-netty4/src/test/resources/netty4-secure.jks create mode 100644 server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java create mode 100644 server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cfdcd86f6bf0..0b5a7cc705a79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709)) - [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) - Introduce a new setting `index.check_pending_flush.enabled` to expose the ability to disable the check for pending flushes by write threads ([#12710](https://github.com/opensearch-project/OpenSearch/pull/12710)) +- Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) ### Dependencies - Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java new file mode 100644 index 0000000000000..51a76903e284d --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java @@ -0,0 +1,169 @@ +/* + * Copyright 2015-2017 floragunn GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.netty4.ssl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpHandlingSettings; +import org.opensearch.http.netty4.Netty4HttpChannel; +import org.opensearch.http.netty4.Netty4HttpServerTransport; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.netty4.ssl.SslUtils; + +import javax.net.ssl.SSLEngine; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.DecoderException; +import io.netty.handler.ssl.ApplicationProtocolNames; +import io.netty.handler.ssl.ApplicationProtocolNegotiationHandler; +import io.netty.handler.ssl.SslHandler; + +/** + * @see SecuritySSLNettyHttpServerTransport + */ +public class SecureNetty4HttpServerTransport extends Netty4HttpServerTransport { + private static final Logger logger = LogManager.getLogger(SecureNetty4HttpServerTransport.class); + private final SecureTransportSettingsProvider secureTransportSettingsProvider; + private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + + public SecureNetty4HttpServerTransport( + final Settings settings, + final NetworkService networkService, + final BigArrays bigArrays, + final ThreadPool threadPool, + final NamedXContentRegistry namedXContentRegistry, + final Dispatcher dispatcher, + final ClusterSettings clusterSettings, + final SharedGroupFactory sharedGroupFactory, + final SecureTransportSettingsProvider secureTransportSettingsProvider, + final Tracer tracer + ) { + super( + settings, + networkService, + bigArrays, + threadPool, + namedXContentRegistry, + dispatcher, + clusterSettings, + sharedGroupFactory, + tracer + ); + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + this.exceptionHandler = secureTransportSettingsProvider.buildHttpServerExceptionHandler(settings, this) + .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + } + + @Override + public ChannelHandler configureServerChannelHandler() { + return new SslHttpChannelHandler(this, handlingSettings); + } + + @Override + public void onException(HttpChannel channel, Exception cause0) { + Throwable cause = cause0; + + if (cause0 instanceof DecoderException && cause0 != null) { + cause = cause0.getCause(); + } + + exceptionHandler.onError(cause); + logger.error("Exception during establishing a SSL connection: " + cause, cause); + super.onException(channel, cause0); + } + + protected class SslHttpChannelHandler extends Netty4HttpServerTransport.HttpChannelHandler { + /** + * Application negotiation handler to select either HTTP 1.1 or HTTP 2 protocol, based + * on client/server ALPN negotiations. + */ + private class Http2OrHttpHandler extends ApplicationProtocolNegotiationHandler { + protected Http2OrHttpHandler() { + super(ApplicationProtocolNames.HTTP_1_1); + } + + @Override + protected void configurePipeline(ChannelHandlerContext ctx, String protocol) throws Exception { + if (ApplicationProtocolNames.HTTP_2.equals(protocol)) { + configureDefaultHttp2Pipeline(ctx.pipeline()); + } else if (ApplicationProtocolNames.HTTP_1_1.equals(protocol)) { + configureDefaultHttpPipeline(ctx.pipeline()); + } else { + throw new IllegalStateException("Unknown application protocol: " + protocol); + } + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + Netty4HttpChannel channel = ctx.channel().attr(HTTP_CHANNEL_KEY).get(); + if (channel != null) { + if (cause instanceof Error) { + onException(channel, new Exception(cause)); + } else { + onException(channel, (Exception) cause); + } + } + } + } + + protected SslHttpChannelHandler(final Netty4HttpServerTransport transport, final HttpHandlingSettings handlingSettings) { + super(transport, handlingSettings); + } + + @Override + protected void initChannel(Channel ch) throws Exception { + super.initChannel(ch); + + final SSLEngine sslEngine = secureTransportSettingsProvider.buildSecureHttpServerEngine( + settings, + SecureNetty4HttpServerTransport.this + ).orElseGet(SslUtils::createDefaultServerSSLEngine); + + final SslHandler sslHandler = new SslHandler(sslEngine); + ch.pipeline().addFirst("ssl_http", sslHandler); + } + + @Override + protected void configurePipeline(Channel ch) { + ch.pipeline().addLast(new Http2OrHttpHandler()); + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index 2bc795d11ed5d..56163c18949a4 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -46,11 +46,14 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.netty4.Netty4HttpServerTransport; +import org.opensearch.http.netty4.ssl.SecureNetty4HttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.netty4.Netty4Transport; +import org.opensearch.transport.netty4.ssl.SecureNetty4Transport; import java.util.Arrays; import java.util.Collections; @@ -61,7 +64,9 @@ public class Netty4ModulePlugin extends Plugin implements NetworkPlugin { public static final String NETTY_TRANSPORT_NAME = "netty4"; + public static final String NETTY_SECURE_TRANSPORT_NAME = "netty4-secure"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; + public static final String NETTY_SECURE_HTTP_TRANSPORT_NAME = "netty4-secure"; private final SetOnce groupFactory = new SetOnce<>(); @@ -144,6 +149,65 @@ public Map> getHttpTransports( ); } + @Override + public Map> getSecureHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap( + NETTY_SECURE_HTTP_TRANSPORT_NAME, + () -> new SecureNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + secureTransportSettingsProvider, + tracer + ) + ); + } + + @Override + public Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap( + NETTY_SECURE_TRANSPORT_NAME, + () -> new SecureNetty4Transport( + settings, + Version.CURRENT, + threadPool, + networkService, + pageCacheRecycler, + namedWriteableRegistry, + circuitBreakerService, + getSharedGroupFactory(settings), + secureTransportSettingsProvider, + tracer + ) + ); + } + SharedGroupFactory getSharedGroupFactory(Settings settings) { SharedGroupFactory groupFactory = this.groupFactory.get(); if (groupFactory != null) { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/DualModeSslHandler.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/DualModeSslHandler.java new file mode 100644 index 0000000000000..1bf4cdb0eb438 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/DualModeSslHandler.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.transport.netty4.ssl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.transport.TcpTransport; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.nio.charset.StandardCharsets; +import java.security.NoSuchAlgorithmException; +import java.util.List; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.ssl.SslHandler; + +/** + * Modifies the current pipeline dynamically to enable TLS + * + * @see DualModeSSLHandler + */ +public class DualModeSslHandler extends ByteToMessageDecoder { + + private static final Logger logger = LogManager.getLogger(DualModeSslHandler.class); + private final Settings settings; + private final SecureTransportSettingsProvider secureTransportSettingsProvider; + private final TcpTransport transport; + private final SslHandler providedSSLHandler; + + public DualModeSslHandler( + final Settings settings, + final SecureTransportSettingsProvider secureTransportSettingsProvider, + final TcpTransport transport + ) { + this(settings, secureTransportSettingsProvider, transport, null); + } + + protected DualModeSslHandler( + final Settings settings, + final SecureTransportSettingsProvider secureTransportSettingsProvider, + final TcpTransport transport, + SslHandler providedSSLHandler + ) { + this.settings = settings; + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + this.transport = transport; + this.providedSSLHandler = providedSSLHandler; + } + + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { + // Will use the first six bytes to detect a protocol. + if (in.readableBytes() < 6) { + return; + } + int offset = in.readerIndex(); + if (in.getCharSequence(offset, 6, StandardCharsets.UTF_8).equals(SecureConnectionTestUtil.DUAL_MODE_CLIENT_HELLO_MSG)) { + logger.debug("Received DualSSL Client Hello message"); + ByteBuf responseBuffer = Unpooled.buffer(6); + responseBuffer.writeCharSequence(SecureConnectionTestUtil.DUAL_MODE_SERVER_HELLO_MSG, StandardCharsets.UTF_8); + ctx.writeAndFlush(responseBuffer).addListener(ChannelFutureListener.CLOSE); + return; + } + + if (SslUtils.isTLS(in)) { + logger.debug("Identified request as SSL request"); + enableSsl(ctx); + } else { + logger.debug("Identified request as non SSL request, running in HTTP mode as dual mode is enabled"); + ctx.pipeline().remove(this); + } + } + + private void enableSsl(ChannelHandlerContext ctx) throws SSLException, NoSuchAlgorithmException { + final SSLEngine sslEngine = secureTransportSettingsProvider.buildSecureServerTransportEngine(settings, transport) + .orElseGet(SslUtils::createDefaultServerSSLEngine); + + SslHandler sslHandler; + if (providedSSLHandler != null) { + sslHandler = providedSSLHandler; + } else { + sslHandler = new SslHandler(sslEngine); + } + ChannelPipeline p = ctx.pipeline(); + p.addAfter("port_unification_handler", "ssl_server", sslHandler); + p.remove(this); + logger.debug("Removed port unification handler and added SSL handler as incoming request is SSL"); + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureConnectionTestUtil.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureConnectionTestUtil.java new file mode 100644 index 0000000000000..d5667475ea007 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureConnectionTestUtil.java @@ -0,0 +1,214 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.netty4.ssl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.net.InetAddress; +import java.net.Socket; +import java.nio.charset.StandardCharsets; + +/** + * Utility class to test if the server supports SSL connections. + * SSL Check will be done by sending an OpenSearch Ping to see if server is replying to pings. + * Following that a custom client hello message will be sent to the server, if the server + * side has OpenSearchPortUnificationHandler it will reply with server hello message. + * + * @see SSLConnectionTestUtil + */ +class SecureConnectionTestUtil { + private static final Logger logger = LogManager.getLogger(SecureConnectionTestUtil.class); + + /** + * Return codes for SSLConnectionTestUtil.testConnection() + */ + enum SSLConnectionTestResult { + /** + * OpenSearch Ping to the server failed. + */ + OPENSEARCH_PING_FAILED, + /** + * Server does not support SSL. + */ + SSL_NOT_AVAILABLE, + /** + * Server supports SSL. + */ + SSL_AVAILABLE + } + + public static final byte[] OPENSEARCH_PING_MSG = new byte[] { + (byte) 'E', + (byte) 'S', + (byte) 0xFF, + (byte) 0xFF, + (byte) 0xFF, + (byte) 0xFF }; + public static final String DUAL_MODE_CLIENT_HELLO_MSG = "DUALCM"; + public static final String DUAL_MODE_SERVER_HELLO_MSG = "DUALSM"; + private static final int SOCKET_TIMEOUT_MILLIS = 10 * 1000; + private final String host; + private final int port; + private Socket overriddenSocket = null; + private OutputStreamWriter testOutputStreamWriter = null; + private InputStreamReader testInputStreamReader = null; + + public SecureConnectionTestUtil(final String host, final int port) { + this.host = host; + this.port = port; + } + + protected SecureConnectionTestUtil( + final String host, + final int port, + final Socket overriddenSocket, + final OutputStreamWriter testOutputStreamWriter, + final InputStreamReader testInputStreamReader + ) { + this.overriddenSocket = overriddenSocket; + this.testOutputStreamWriter = testOutputStreamWriter; + this.testInputStreamReader = testInputStreamReader; + + this.host = host; + this.port = port; + } + + /** + * Test connection to server by performing the below steps: + * - Send Client Hello to check if the server replies with Server Hello which indicates that Server understands SSL + * - Send OpenSearch Ping to check if the server replies to the OpenSearch Ping message + * + * @return SSLConnectionTestResult i.e. OPENSEARCH_PING_FAILED or SSL_NOT_AVAILABLE or SSL_AVAILABLE + */ + public SSLConnectionTestResult testConnection() { + if (sendDualSSLClientHello()) { + return SSLConnectionTestResult.SSL_AVAILABLE; + } + + if (sendOpenSearchPing()) { + return SSLConnectionTestResult.SSL_NOT_AVAILABLE; + } + + return SSLConnectionTestResult.OPENSEARCH_PING_FAILED; + } + + private boolean sendDualSSLClientHello() { + boolean dualSslSupported = false; + Socket socket = null; + try { + OutputStreamWriter outputStreamWriter; + InputStreamReader inputStreamReader; + if (overriddenSocket != null) { + socket = overriddenSocket; + outputStreamWriter = testOutputStreamWriter; + inputStreamReader = testInputStreamReader; + } else { + socket = new Socket(InetAddress.getByName(host), port); + outputStreamWriter = new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8); + inputStreamReader = new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8); + } + + socket.setSoTimeout(SOCKET_TIMEOUT_MILLIS); + outputStreamWriter.write(DUAL_MODE_CLIENT_HELLO_MSG); + outputStreamWriter.flush(); + logger.debug("Sent DualSSL Client Hello msg to {}", host); + + StringBuilder sb = new StringBuilder(); + int currentChar; + while ((currentChar = inputStreamReader.read()) != -1) { + sb.append((char) currentChar); + } + + if (sb.toString().equals(DUAL_MODE_SERVER_HELLO_MSG)) { + logger.debug("Received DualSSL Server Hello msg from {}", host); + dualSslSupported = true; + } + } catch (IOException e) { + logger.debug("DualSSL client check failed for {}, exception {}", host, e.getMessage()); + } finally { + logger.debug("Closing DualSSL check client socket for {}", host); + if (socket != null) { + try { + socket.close(); + } catch (IOException e) { + logger.error( + "Exception occurred while closing DualSSL check client socket for {}. Exception: {}", + host, + e.getMessage() + ); + } + } + } + logger.debug("dualSslClient check with server {}, server supports ssl = {}", host, dualSslSupported); + return dualSslSupported; + } + + private boolean sendOpenSearchPing() { + boolean pingSucceeded = false; + Socket socket = null; + try { + if (overriddenSocket != null) { + socket = overriddenSocket; + } else { + socket = new Socket(InetAddress.getByName(host), port); + } + + socket.setSoTimeout(SOCKET_TIMEOUT_MILLIS); + OutputStream outputStream = socket.getOutputStream(); + InputStream inputStream = socket.getInputStream(); + + logger.debug("Sending OpenSearch Ping to {}", host); + outputStream.write(OPENSEARCH_PING_MSG); + outputStream.flush(); + + int currentByte; + int byteBufIndex = 0; + byte[] response = new byte[6]; + while ((byteBufIndex < 6) && ((currentByte = inputStream.read()) != -1)) { + response[byteBufIndex] = (byte) currentByte; + byteBufIndex++; + } + if (byteBufIndex == 6) { + logger.debug("Received reply for OpenSearch Ping. from {}", host); + pingSucceeded = true; + for (int i = 0; i < 6; i++) { + if (response[i] != OPENSEARCH_PING_MSG[i]) { + // Unexpected byte in response + logger.error("Received unexpected byte in OpenSearch Ping reply from {}", host); + pingSucceeded = false; + break; + } + } + } + } catch (IOException ex) { + logger.error("OpenSearch Ping failed for {}, exception: {}", host, ex.getMessage()); + } finally { + logger.debug("Closing OpenSearch Ping client socket for connection to {}", host); + if (socket != null) { + try { + socket.close(); + } catch (IOException e) { + logger.error("Exception occurred while closing socket for {}. Exception: {}", host, e.getMessage()); + } + } + } + + logger.debug("OpenSearch Ping check to server {} result = {}", host, pingSucceeded); + return pingSucceeded; + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java new file mode 100644 index 0000000000000..9c63a1ab9161b --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java @@ -0,0 +1,316 @@ +/* + * Copyright 2015-2017 floragunn GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.netty4.ssl; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchSecurityException; +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TcpChannel; +import org.opensearch.transport.netty4.Netty4Transport; +import org.opensearch.transport.netty4.ssl.SecureConnectionTestUtil.SSLConnectionTestResult; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.DecoderException; +import io.netty.handler.ssl.SslHandler; + +/** + * @see SecuritySSLNettyTransport + */ +public class SecureNetty4Transport extends Netty4Transport { + + private static final Logger logger = LogManager.getLogger(SecureNetty4Transport.class); + private final SecureTransportSettingsProvider secureTransportSettingsProvider; + private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + + public SecureNetty4Transport( + final Settings settings, + final Version version, + final ThreadPool threadPool, + final NetworkService networkService, + final PageCacheRecycler pageCacheRecycler, + final NamedWriteableRegistry namedWriteableRegistry, + final CircuitBreakerService circuitBreakerService, + final SharedGroupFactory sharedGroupFactory, + final SecureTransportSettingsProvider secureTransportSettingsProvider, + final Tracer tracer + ) { + super( + settings, + version, + threadPool, + networkService, + pageCacheRecycler, + namedWriteableRegistry, + circuitBreakerService, + sharedGroupFactory, + tracer + ); + + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + this.exceptionHandler = secureTransportSettingsProvider.buildServerTransportExceptionHandler(settings, this) + .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + } + + @Override + public void onException(TcpChannel channel, Exception e) { + + Throwable cause = e; + + if (e instanceof DecoderException && e != null) { + cause = e.getCause(); + } + + exceptionHandler.onError(cause); + logger.error("Exception during establishing a SSL connection: " + cause, cause); + + if (channel == null || !channel.isOpen()) { + throw new OpenSearchSecurityException("The provided TCP channel is invalid.", e); + } + super.onException(channel, e); + } + + @Override + protected ChannelHandler getServerChannelInitializer(String name) { + return new SSLServerChannelInitializer(name); + } + + @Override + protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) { + return new SSLClientChannelInitializer(node); + } + + protected class SSLServerChannelInitializer extends Netty4Transport.ServerChannelInitializer { + + public SSLServerChannelInitializer(String name) { + super(name); + } + + @Override + protected void initChannel(Channel ch) throws Exception { + super.initChannel(ch); + + final boolean dualModeEnabled = NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED.get(settings); + if (dualModeEnabled) { + logger.info("SSL Dual mode enabled, using port unification handler"); + final ChannelHandler portUnificationHandler = new DualModeSslHandler( + settings, + secureTransportSettingsProvider, + SecureNetty4Transport.this + ); + ch.pipeline().addFirst("port_unification_handler", portUnificationHandler); + } else { + final SSLEngine sslEngine = secureTransportSettingsProvider.buildSecureServerTransportEngine( + settings, + SecureNetty4Transport.this + ).orElseGet(SslUtils::createDefaultServerSSLEngine); + final SslHandler sslHandler = new SslHandler(sslEngine); + ch.pipeline().addFirst("ssl_server", sslHandler); + } + } + + @Override + public final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + if (cause instanceof DecoderException && cause != null) { + cause = cause.getCause(); + } + + logger.error("Exception during establishing a SSL connection: " + cause, cause); + + super.exceptionCaught(ctx, cause); + } + } + + protected static class ClientSSLHandler extends ChannelOutboundHandlerAdapter { + private final Logger log = LogManager.getLogger(this.getClass()); + private final Settings settings; + private final SecureTransportSettingsProvider secureTransportSettingsProvider; + private final boolean hostnameVerificationEnabled; + private final boolean hostnameVerificationResovleHostName; + + private ClientSSLHandler( + final Settings settings, + final SecureTransportSettingsProvider secureTransportSettingsProvider, + final boolean hostnameVerificationEnabled, + final boolean hostnameVerificationResovleHostName + ) { + this.settings = settings; + this.secureTransportSettingsProvider = secureTransportSettingsProvider; + this.hostnameVerificationEnabled = hostnameVerificationEnabled; + this.hostnameVerificationResovleHostName = hostnameVerificationResovleHostName; + } + + @Override + public final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + if (cause instanceof DecoderException && cause != null) { + cause = cause.getCause(); + } + + logger.error("Exception during establishing a SSL connection: " + cause, cause); + + super.exceptionCaught(ctx, cause); + } + + @SuppressForbidden(reason = "The java.net.InetSocketAddress#getHostName() needs to be used") + @Override + public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) + throws Exception { + SSLEngine sslEngine = null; + try { + if (hostnameVerificationEnabled) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) remoteAddress; + final String hostname = (hostnameVerificationResovleHostName == true) + ? inetSocketAddress.getHostName() + : inetSocketAddress.getHostString(); + + if (log.isDebugEnabled()) { + log.debug( + "Hostname of peer is {} ({}/{}) with hostnameVerificationResolveHostName: {}", + hostname, + inetSocketAddress.getHostName(), + inetSocketAddress.getHostString(), + hostnameVerificationResovleHostName + ); + } + + sslEngine = secureTransportSettingsProvider.buildSecureClientTransportEngine( + settings, + hostname, + inetSocketAddress.getPort() + ).orElse(null); + + } else { + sslEngine = secureTransportSettingsProvider.buildSecureClientTransportEngine(settings, null, -1).orElse(null); + } + + if (sslEngine == null) { + sslEngine = SslUtils.createDefaultClientSSLEngine(); + } + } catch (final SSLException e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + + final SslHandler sslHandler = new SslHandler(sslEngine); + ctx.pipeline().replace(this, "ssl_client", sslHandler); + super.connect(ctx, remoteAddress, localAddress, promise); + } + } + + protected class SSLClientChannelInitializer extends Netty4Transport.ClientChannelInitializer { + private final boolean hostnameVerificationEnabled; + private final boolean hostnameVerificationResolveHostName; + private final DiscoveryNode node; + private SSLConnectionTestResult connectionTestResult; + + @SuppressWarnings("removal") + public SSLClientChannelInitializer(DiscoveryNode node) { + this.node = node; + + final boolean dualModeEnabled = NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED.get(settings); + hostnameVerificationEnabled = NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION.get(settings); + hostnameVerificationResolveHostName = NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME.get(settings); + + connectionTestResult = SSLConnectionTestResult.SSL_AVAILABLE; + if (dualModeEnabled) { + SecureConnectionTestUtil sslConnectionTestUtil = new SecureConnectionTestUtil( + node.getAddress().getAddress(), + node.getAddress().getPort() + ); + connectionTestResult = AccessController.doPrivileged( + (PrivilegedAction) sslConnectionTestUtil::testConnection + ); + } + } + + @Override + protected void initChannel(Channel ch) throws Exception { + super.initChannel(ch); + + if (connectionTestResult == SSLConnectionTestResult.OPENSEARCH_PING_FAILED) { + logger.error( + "SSL dual mode is enabled but dual mode handshake and OpenSearch ping has failed during client connection setup, closing channel" + ); + ch.close(); + return; + } + + if (connectionTestResult == SSLConnectionTestResult.SSL_AVAILABLE) { + logger.debug("Connection to {} needs to be ssl, adding ssl handler to the client channel ", node.getHostName()); + ch.pipeline() + .addFirst( + "client_ssl_handler", + new ClientSSLHandler( + settings, + secureTransportSettingsProvider, + hostnameVerificationEnabled, + hostnameVerificationResolveHostName + ) + ); + } else { + logger.debug("Connection to {} needs to be non ssl", node.getHostName()); + } + } + + @Override + public final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + if (cause instanceof DecoderException && cause != null) { + cause = cause.getCause(); + } + + logger.error("Exception during establishing a SSL connection: " + cause, cause); + + super.exceptionCaught(ctx, cause); + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SslUtils.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SslUtils.java new file mode 100644 index 0000000000000..8b8223da70c08 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SslUtils.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.transport.netty4.ssl; + +import org.opensearch.OpenSearchSecurityException; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + +import java.nio.ByteOrder; +import java.security.NoSuchAlgorithmException; + +import io.netty.buffer.ByteBuf; + +/** + * @see TLSUtil + */ +public class SslUtils { + private static final String[] DEFAULT_SSL_PROTOCOLS = { "TLSv1.3", "TLSv1.2", "TLSv1.1" }; + + private static final int SSL_CONTENT_TYPE_CHANGE_CIPHER_SPEC = 20; + private static final int SSL_CONTENT_TYPE_ALERT = 21; + private static final int SSL_CONTENT_TYPE_HANDSHAKE = 22; + private static final int SSL_CONTENT_TYPE_APPLICATION_DATA = 23; + // CS-SUPPRESS-SINGLE: RegexpSingleline Extensions heartbeat needs special handling by security extension + private static final int SSL_CONTENT_TYPE_EXTENSION_HEARTBEAT = 24; + // CS-ENFORCE-SINGLE + private static final int SSL_RECORD_HEADER_LENGTH = 5; + + private SslUtils() { + + } + + public static SSLEngine createDefaultServerSSLEngine() { + try { + final SSLEngine engine = SSLContext.getDefault().createSSLEngine(); + engine.setEnabledProtocols(DEFAULT_SSL_PROTOCOLS); + engine.setUseClientMode(false); + return engine; + } catch (final NoSuchAlgorithmException ex) { + throw new OpenSearchSecurityException("Unable to initialize default server SSL engine", ex); + } + } + + public static SSLEngine createDefaultClientSSLEngine() { + try { + final SSLEngine engine = SSLContext.getDefault().createSSLEngine(); + engine.setEnabledProtocols(DEFAULT_SSL_PROTOCOLS); + engine.setUseClientMode(true); + return engine; + } catch (final NoSuchAlgorithmException ex) { + throw new OpenSearchSecurityException("Unable to initialize default client SSL engine", ex); + } + } + + static boolean isTLS(ByteBuf buffer) { + int packetLength = 0; + int offset = buffer.readerIndex(); + + // SSLv3 or TLS - Check ContentType + boolean tls; + switch (buffer.getUnsignedByte(offset)) { + case SSL_CONTENT_TYPE_CHANGE_CIPHER_SPEC: + case SSL_CONTENT_TYPE_ALERT: + case SSL_CONTENT_TYPE_HANDSHAKE: + case SSL_CONTENT_TYPE_APPLICATION_DATA: + // CS-SUPPRESS-SINGLE: RegexpSingleline Extensions heartbeat needs special handling by security extension + case SSL_CONTENT_TYPE_EXTENSION_HEARTBEAT: + tls = true; + break; + // CS-ENFORCE-SINGLE + default: + // SSLv2 or bad data + tls = false; + } + + if (tls) { + // SSLv3 or TLS - Check ProtocolVersion + int majorVersion = buffer.getUnsignedByte(offset + 1); + if (majorVersion == 3) { + // SSLv3 or TLS + packetLength = unsignedShortBE(buffer, offset + 3) + SSL_RECORD_HEADER_LENGTH; + if (packetLength <= SSL_RECORD_HEADER_LENGTH) { + // Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data) + tls = false; + } + } else { + // Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data) + tls = false; + } + } + + return tls; + } + + private static int unsignedShortBE(ByteBuf buffer, int offset) { + return buffer.order() == ByteOrder.BIG_ENDIAN ? buffer.getUnsignedShort(offset) : buffer.getUnsignedShortLE(offset); + } +} diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index 1c381e8000f6b..7cc1a47a5d2a4 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -32,11 +32,13 @@ package org.opensearch.http.netty4; +import org.opensearch.common.TriFunction; import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.tasks.Task; import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.netty4.ssl.TrustAllManager; import java.io.Closeable; import java.net.SocketAddress; @@ -47,7 +49,6 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.function.BiFunction; import io.netty.bootstrap.Bootstrap; import io.netty.buffer.ByteBuf; @@ -86,6 +87,9 @@ import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandler; import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandlerBuilder; import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapterBuilder; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslHandler; import io.netty.util.AttributeKey; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; @@ -95,7 +99,7 @@ /** * Tiny helper to send http requests over netty. */ -class Netty4HttpClient implements Closeable { +public class Netty4HttpClient implements Closeable { static Collection returnHttpResponseBodies(Collection responses) { List list = new ArrayList<>(responses.size()); @@ -114,31 +118,46 @@ static Collection returnOpaqueIds(Collection responses } private final Bootstrap clientBootstrap; - private final BiFunction, AwaitableChannelInitializer> handlerFactory; + private final TriFunction, Boolean, AwaitableChannelInitializer> handlerFactory; + private final boolean secure; Netty4HttpClient( Bootstrap clientBootstrap, - BiFunction, AwaitableChannelInitializer> handlerFactory + TriFunction, Boolean, AwaitableChannelInitializer> handlerFactory, + boolean secure ) { this.clientBootstrap = clientBootstrap; this.handlerFactory = handlerFactory; + this.secure = secure; } - static Netty4HttpClient http() { + public static Netty4HttpClient https() { return new Netty4HttpClient( new Bootstrap().channel(NettyAllocator.getChannelType()) .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) .group(new NioEventLoopGroup(1)), - CountDownLatchHandlerHttp::new + CountDownLatchHandlerHttp::new, + true ); } - static Netty4HttpClient http2() { + public static Netty4HttpClient http() { return new Netty4HttpClient( new Bootstrap().channel(NettyAllocator.getChannelType()) .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) .group(new NioEventLoopGroup(1)), - CountDownLatchHandlerHttp2::new + CountDownLatchHandlerHttp::new, + false + ); + } + + public static Netty4HttpClient http2() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp2::new, + false ); } @@ -148,7 +167,7 @@ public List get(SocketAddress remoteAddress, String... uris) t final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); httpRequest.headers().add(HOST, "localhost"); httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); - httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), secure ? "http" : "https"); requests.add(httpRequest); } return sendRequests(remoteAddress, requests); @@ -195,7 +214,7 @@ private synchronized List sendRequests(final SocketAddress rem final CountDownLatch latch = new CountDownLatch(requests.size()); final List content = Collections.synchronizedList(new ArrayList<>(requests.size())); - final AwaitableChannelInitializer handler = handlerFactory.apply(latch, content); + final AwaitableChannelInitializer handler = handlerFactory.apply(latch, content, secure); clientBootstrap.handler(handler); ChannelFuture channelFuture = null; @@ -232,19 +251,32 @@ private static class CountDownLatchHandlerHttp extends AwaitableChannelInitializ private final CountDownLatch latch; private final Collection content; + private final boolean secure; - CountDownLatchHandlerHttp(final CountDownLatch latch, final Collection content) { + CountDownLatchHandlerHttp(final CountDownLatch latch, final Collection content, final boolean secure) { this.latch = latch; this.content = content; + this.secure = secure; } @Override - protected void initChannel(SocketChannel ch) { + protected void initChannel(SocketChannel ch) throws Exception { final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(); ch.pipeline().addLast(new HttpResponseDecoder()); ch.pipeline().addLast(new HttpRequestEncoder()); ch.pipeline().addLast(new HttpContentDecompressor()); ch.pipeline().addLast(new HttpObjectAggregator(maxContentLength)); + if (secure) { + final SslHandler sslHandler = new SslHandler( + SslContextBuilder.forClient() + .clientAuth(ClientAuth.NONE) + .trustManager(TrustAllManager.INSTANCE) + .build() + .newEngine(ch.alloc()) + ); + ch.pipeline().addFirst("client_ssl_handler", sslHandler); + } + ch.pipeline().addLast(new SimpleChannelInboundHandler() { @Override protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { @@ -283,11 +315,13 @@ private static class CountDownLatchHandlerHttp2 extends AwaitableChannelInitiali private final CountDownLatch latch; private final Collection content; + private final boolean secure; private Http2SettingsHandler settingsHandler; - CountDownLatchHandlerHttp2(final CountDownLatch latch, final Collection content) { + CountDownLatchHandlerHttp2(final CountDownLatch latch, final Collection content, final boolean secure) { this.latch = latch; this.content = content; + this.secure = secure; } @Override diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java new file mode 100644 index 0000000000000..9ea49d0b24d44 --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java @@ -0,0 +1,603 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4.ssl; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.BindHttpException; +import org.opensearch.http.CorsHandler; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.http.NullDispatcher; +import org.opensearch.http.netty4.Netty4HttpClient; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.netty4.ssl.TrustAllManager; +import org.junit.After; +import org.junit.Before; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.util.Collections; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.TooLongFrameException; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContextBuilder; + +import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; +import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the {@link SecureNetty4HttpServerTransport} class. + */ +public class SecureNetty4HttpServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + private SecureTransportSettingsProvider secureTransportSettingsProvider; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + secureTransportSettingsProvider = new SecureTransportSettingsProvider() { + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + try { + final KeyStore keyStore = KeyStore.getInstance("PKCS12"); + keyStore.load( + SecureNetty4HttpServerTransportTests.class.getResourceAsStream("/netty4-secure.jks"), + "password".toCharArray() + ); + + final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + keyManagerFactory.init(keyStore, "password".toCharArray()); + + SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) + .trustManager(TrustAllManager.INSTANCE) + .build() + .newEngine(NettyAllocator.getAllocator()); + return Optional.of(engine); + } catch (final IOException | NoSuchAlgorithmException | UnrecoverableKeyException | KeyStoreException + | CertificateException ex) { + throw new SSLException(ex); + } + } + + @Override + public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { + return Optional.of( + SslContextBuilder.forClient() + .clientAuth(ClientAuth.NONE) + .trustManager(TrustAllManager.INSTANCE) + .build() + .newEngine(NettyAllocator.getAllocator()) + ); + } + }; + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + /** + * Test that {@link SecureNetty4HttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = createSettings(); + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link SecureNetty4HttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link SecureNetty4HttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + Settings settings = createSettings(); + runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus + ) throws InterruptedException { + + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + }; + try ( + SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (Netty4HttpClient client = Netty4HttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(expectedStatus)); + if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) { + final FullHttpRequest continuationRequest = new DefaultFullHttpRequest( + HttpVersion.HTTP_1_1, + HttpMethod.POST, + "/", + Unpooled.EMPTY_BUFFER + ); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), continuationRequest); + try { + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat( + new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), + is("done") + ); + } finally { + continuationResponse.release(); + } + } + } finally { + response.release(); + } + } + } + } + + public void testBindUnavailableAddress() { + Settings initialSettings = createSettings(); + try ( + SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + initialSettings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder() + .put("http.port", remoteAddress.getPort()) + .put("network.host", remoteAddress.getAddress()) + .build(); + try ( + SecureNetty4HttpServerTransport otherTransport = new SecureNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(settings), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final AtomicReference causeReference = new AtomicReference<>(); + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + causeReference.set(cause); + try { + final OpenSearchException e = new OpenSearchException("you sent a bad request and you should feel bad"); + channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e)); + } catch (final IOException e) { + throw new AssertionError(e); + } + } + + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = createSettings(); + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try ( + SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (Netty4HttpClient client = Netty4HttpClient.https()) { + final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST)); + assertThat( + new String(response.content().array(), Charset.forName("UTF-8")), + containsString("you sent a bad request and you should feel bad") + ); + } finally { + response.release(); + } + } + } + + assertNotNull(causeReference.get()); + assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); + } + + public void testLargeCompressedResponse() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024 * 1024); + final String url = "/thing"; + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + channel.sendResponse(new BytesRestResponse(OK, responseString)); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (Netty4HttpClient client = Netty4HttpClient.https()) { + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString)); + } finally { + response.release(); + } + } + } + } + + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") + .build(); + + try ( + SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (Netty4HttpClient client = Netty4HttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("test-cors.org")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (Netty4HttpClient client = Netty4HttpClient.https()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "google.com"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + + public void testReadTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + NioEventLoopGroup group = new NioEventLoopGroup(); + try ( + SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + CountDownLatch channelClosedLatch = new CountDownLatch(1); + + Bootstrap clientBootstrap = new Bootstrap().option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer() { + + @Override + protected void initChannel(SocketChannel ch) { + ch.pipeline().addLast(new ChannelHandlerAdapter() { + }); + + } + }) + .group(group); + ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); + connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown()); + + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + + } finally { + group.shutdownGracefully().await(); + } + } + + private Settings createSettings() { + return createBuilderWithPort().build(); + } + + private Settings.Builder createBuilderWithPort() { + return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()); + } +} diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java new file mode 100644 index 0000000000000..0cae58b8efa2a --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java @@ -0,0 +1,234 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.netty4.ssl; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.net.NetUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.test.transport.StubbableTransport; +import org.opensearch.transport.AbstractSimpleTransportTestCase; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.ConnectionProfile; +import org.opensearch.transport.Netty4NioSocketChannel; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TcpChannel; +import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.TestProfiles; +import org.opensearch.transport.Transport; +import org.opensearch.transport.netty4.Netty4TcpChannel; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.channels.SocketChannel; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.util.Collections; +import java.util.Optional; + +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.SslContextBuilder; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SimpleSecureNetty4TransportTests extends AbstractSimpleTransportTestCase { + @Override + protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + final SecureTransportSettingsProvider secureTransportSettingsProvider = new SecureTransportSettingsProvider() { + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + try { + final KeyStore keyStore = KeyStore.getInstance("PKCS12"); + keyStore.load( + SimpleSecureNetty4TransportTests.class.getResourceAsStream("/netty4-secure.jks"), + "password".toCharArray() + ); + + final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + keyManagerFactory.init(keyStore, "password".toCharArray()); + + SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) + .trustManager(TrustAllManager.INSTANCE) + .build() + .newEngine(NettyAllocator.getAllocator()); + return Optional.of(engine); + } catch (final IOException | NoSuchAlgorithmException | UnrecoverableKeyException | KeyStoreException + | CertificateException ex) { + throw new SSLException(ex); + } + } + + @Override + public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { + try { + final KeyStore keyStore = KeyStore.getInstance("PKCS12"); + keyStore.load( + SimpleSecureNetty4TransportTests.class.getResourceAsStream("/netty4-secure.jks"), + "password".toCharArray() + ); + + final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + keyManagerFactory.init(keyStore, "password".toCharArray()); + + SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) + .clientAuth(ClientAuth.NONE) + .trustManager(TrustAllManager.INSTANCE) + .build() + .newEngine(NettyAllocator.getAllocator()); + return Optional.of(engine); + } catch (final IOException | NoSuchAlgorithmException | UnrecoverableKeyException | KeyStoreException + | CertificateException ex) { + throw new SSLException(ex); + } + + } + + @Override + public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { + return Optional.of( + SslContextBuilder.forClient() + .clientAuth(ClientAuth.NONE) + .trustManager(TrustAllManager.INSTANCE) + .build() + .newEngine(NettyAllocator.getAllocator()) + ); + } + }; + + return new SecureNetty4Transport( + settings, + version, + threadPool, + new NetworkService(Collections.emptyList()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + namedWriteableRegistry, + new NoneCircuitBreakerService(), + new SharedGroupFactory(settings), + secureTransportSettingsProvider, + NoopTracer.INSTANCE + ) { + + @Override + public void executeHandshake( + DiscoveryNode node, + TcpChannel channel, + ConnectionProfile profile, + ActionListener listener + ) { + if (doHandshake) { + super.executeHandshake(node, channel, profile, listener); + } else { + listener.onResponse(version.minimumCompatibilityVersion()); + } + } + }; + } + + public void testConnectException() throws UnknownHostException { + try { + serviceA.connectToNode( + new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + emptyMap(), + emptySet(), + Version.CURRENT + ) + ); + fail("Expected ConnectTransportException"); + } catch (ConnectTransportException e) { + assertThat(e.getMessage(), containsString("connect_exception")); + assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); + } + } + + public void testDefaultKeepAliveSettings() throws IOException { + assumeTrue("setting default keepalive options not supported on this platform", (IOUtils.LINUX || IOUtils.MAC_OS_X)); + try ( + MockTransportService serviceC = buildService("TS_C", Version.CURRENT, Settings.EMPTY); + MockTransportService serviceD = buildService("TS_D", Version.CURRENT, Settings.EMPTY) + ) { + serviceC.start(); + serviceC.acceptIncomingRequests(); + serviceD.start(); + serviceD.acceptIncomingRequests(); + + try (Transport.Connection connection = serviceC.openConnection(serviceD.getLocalDiscoNode(), TestProfiles.LIGHT_PROFILE)) { + assertThat(connection, instanceOf(StubbableTransport.WrappedConnection.class)); + Transport.Connection conn = ((StubbableTransport.WrappedConnection) connection).getConnection(); + assertThat(conn, instanceOf(TcpTransport.NodeChannels.class)); + TcpTransport.NodeChannels nodeChannels = (TcpTransport.NodeChannels) conn; + for (TcpChannel channel : nodeChannels.getChannels()) { + assertFalse(channel.isServerChannel()); + checkDefaultKeepAliveOptions(channel); + } + + assertThat(serviceD.getOriginalTransport(), instanceOf(TcpTransport.class)); + for (TcpChannel channel : getAcceptedChannels((TcpTransport) serviceD.getOriginalTransport())) { + assertTrue(channel.isServerChannel()); + checkDefaultKeepAliveOptions(channel); + } + } + } + } + + private void checkDefaultKeepAliveOptions(TcpChannel channel) throws IOException { + assertThat(channel, instanceOf(Netty4TcpChannel.class)); + Netty4TcpChannel nettyChannel = (Netty4TcpChannel) channel; + assertThat(nettyChannel.getNettyChannel(), instanceOf(Netty4NioSocketChannel.class)); + Netty4NioSocketChannel netty4NioSocketChannel = (Netty4NioSocketChannel) nettyChannel.getNettyChannel(); + SocketChannel socketChannel = netty4NioSocketChannel.javaChannel(); + assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIdleSocketOptionOrNull())); + Integer keepIdle = socketChannel.getOption(NetUtils.getTcpKeepIdleSocketOptionOrNull()); + assertNotNull(keepIdle); + assertThat(keepIdle, lessThanOrEqualTo(500)); + assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIntervalSocketOptionOrNull())); + Integer keepInterval = socketChannel.getOption(NetUtils.getTcpKeepIntervalSocketOptionOrNull()); + assertNotNull(keepInterval); + assertThat(keepInterval, lessThanOrEqualTo(500)); + } + +} diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/TrustAllManager.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/TrustAllManager.java new file mode 100644 index 0000000000000..a38c542b5780e --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/TrustAllManager.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.netty4.ssl; + +import javax.net.ssl.X509TrustManager; + +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; + +public class TrustAllManager implements X509TrustManager { + public static final X509TrustManager INSTANCE = new TrustAllManager(); + + private TrustAllManager() {} + + public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {} + + public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {} + + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } +} diff --git a/modules/transport-netty4/src/test/resources/README.txt b/modules/transport-netty4/src/test/resources/README.txt new file mode 100644 index 0000000000000..c8cec5d3803a4 --- /dev/null +++ b/modules/transport-netty4/src/test/resources/README.txt @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# +# This is README describes how the certificates in this directory were created. +# This file can also be executed as a script +# + +# 1. Create certificate key + +openssl req -x509 -sha256 -newkey rsa:2048 -keyout certificate.key -out certificate.crt -days 1024 -nodes + +# 2. Export the certificate in pkcs12 format + +openssl pkcs12 -export -in certificate.crt -inkey certificate.key -out server.p12 -name netty4-secure -password pass:password + +# 3. Import the certificate into JDK keystore (PKCS12 type) + +keytool -importkeystore -srcstorepass password -destkeystore netty4-secure.jks -srckeystore server.p12 -srcstoretype PKCS12 -alias netty4-secure -deststorepass password \ No newline at end of file diff --git a/modules/transport-netty4/src/test/resources/certificate.crt b/modules/transport-netty4/src/test/resources/certificate.crt new file mode 100644 index 0000000000000..54c78fdbcf6de --- /dev/null +++ b/modules/transport-netty4/src/test/resources/certificate.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIUddAawr5zygcd+Dcn9WVDpO4BJ7YwDQYJKoZIhvcNAQEL +BQAwWTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MB4X +DTI0MDMxNDE5NDQzOVoXDTI3MDEwMjE5NDQzOVowWTELMAkGA1UEBhMCQVUxEzAR +BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5 +IEx0ZDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAzjOKkg6Iba5zfZ8b/RYw+PGmGEfbdGuuF10Wz4Jmx/Nk4VfDLxdh +TW8VllUL2JD7uPkjABj7pW3awAbvIJ+VGbKqfBr1Nsz0mPPzhT8cfuMH/FDZgQs3 +4HuqDKr0LfC1Kw5E3WF0GVMBDNu0U+nKoeqySeYjGdxDnd3W4cqK5AnUxL0RnIny +Bw7ZuhcU55XndH/Xauro/2EpvJduDsWMdqt7ZfIf1TOmaiQHK+82yb/drVaJbczK +uTpn1Kv2bnzkQEckgq+z1dLNOOyvP2xf+nsziw5ilJe92e5GJOUJYFAlEgUAGpfD +dv6j/gTRYvdJCJItOQEQtektNCAZsoc0wwIDAQABo1MwUTAdBgNVHQ4EFgQUzHts +wIt+zhB/R4U4Do2P6rr0YhkwHwYDVR0jBBgwFoAUzHtswIt+zhB/R4U4Do2P6rr0 +YhkwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAveh870jJX7vt +oLCrdugsyo79pR4f7Nr1kUy3jJrfoaoUmrjiiiHWgT22fGwp7j1GZF2mVfo8YVaK +63YNn5gB2NNZhguPOFC4AdvHRYOKRBOaOvWK8oq7BcJ//18JYI/pPnpgkYvJjqv4 +gFKaZX9qWtujHpAmKiVGs7pwYGNXfixPHRNV4owcfHMIH5dhbbqT49j94xVpjbXs +OymKtFl4kpCE/0LzKFrFcuu55Am1VLBHx2cPpHLOipgUcF5BHFlQ8AXiCMOwfPAw +d22mLB6Gt1oVEpyvQHYd3e04FetEXQ9E8T+NKWZx/8Ucf+IWBYmZBRxch6O83xgk +bAbGzqkbzQ== +-----END CERTIFICATE----- diff --git a/modules/transport-netty4/src/test/resources/certificate.key b/modules/transport-netty4/src/test/resources/certificate.key new file mode 100644 index 0000000000000..228350180935d --- /dev/null +++ b/modules/transport-netty4/src/test/resources/certificate.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOM4qSDohtrnN9 +nxv9FjD48aYYR9t0a64XXRbPgmbH82ThV8MvF2FNbxWWVQvYkPu4+SMAGPulbdrA +Bu8gn5UZsqp8GvU2zPSY8/OFPxx+4wf8UNmBCzfge6oMqvQt8LUrDkTdYXQZUwEM +27RT6cqh6rJJ5iMZ3EOd3dbhyorkCdTEvRGcifIHDtm6FxTnled0f9dq6uj/YSm8 +l24OxYx2q3tl8h/VM6ZqJAcr7zbJv92tVoltzMq5OmfUq/ZufORARySCr7PV0s04 +7K8/bF/6ezOLDmKUl73Z7kYk5QlgUCUSBQAal8N2/qP+BNFi90kIki05ARC16S00 +IBmyhzTDAgMBAAECggEAVOdiElvLjyX6xeoC00YU6hxOIMdNtHU2HMamwtDV01UD +38mMQ9KjrQelYt4n34drLrHe2IZw75/5J4JzagJrmUY47psHBwaDXItuZRokeJaw +zhLYTEs7OcKRtV+a5WOspUrdzi33aQoFb67zZG3qkpsZyFXrdBV+/fy/Iv+MCvLH +xR0jQ5mzE3cw20R7S4nddChBA/y8oKGOo6QRf2SznC1jL/+yolHvJPEn1v8AUxYm +BMPHxj1O0c4M4IxnJQ3Y5Jy9OaFMyMsFlF1hVhc/3LDDxDyOuBsVsFDicojyrRea +GKngIke0yezy7Wo4NUcp8YQhafonpWVsSJJdOUotcQKBgQD0rihFBXVtcG1d/Vy7 +FvLHrmccD56JNV744LSn2CDM7W1IulNbDUZINdCFqL91u5LpxozeE1FPY1nhwncJ +N7V7XYCaSLCuV1YJzRmUCjnzk2RyopGpzWog3f9uUFGgrk1HGbNAv99k/REya6Iu +IRSkuQhaJOj3bRXzonh0K4GjewKBgQDXvamtCioOUMSP8vq919YMkBw7F+z/fr0p +pamO8HL9eewAUg6N92JQ9kobSo/GptdmdHIjs8LqnS5C3H13GX5Qlf5GskOlCpla +V55ElaSp0gvKwWE168U7gQH4etPQAXXJrOGFaGbPj9W81hTUud7HVE88KYdfWTBo +I7TuE25tWQKBgBRjcr2Vn9xXsvVTCGgamG5lLPhcoNREGz7X0pXt34XT/vhBdnKu +331i5pZMom+YCrzqK5DRwUPBPpseTjb5amj2OKIijn5ojqXQbmI0m/GdBZC71TF2 +CXLlrMQvcy3VeGEFVjd+BYpvwAAYkfIQFZ1IQdbpHnSHpX2guzLK8UmDAoGBANUy +PIcf0EetUVHfkCIjNQfdMcjD8BTcLhsF9vWmcDxFTA9VB8ULf0D64mjt2f85yQsa +b+EQN8KZ6alxMxuLOeRxFYLPj0F9o+Y/R8wHBV48kCKhz2r1v0b6SfQ/jSm1B61x +BrxLW64qOdIOzS8bLyhUDKkrcPesr8V548aRtUKhAoGBAKlNJFd8BCGKD9Td+3dE +oP1iHTX5XZ+cQIqL0e+GMQlK4HnQP566DFZU5/GHNNAfmyxd5iSRwhTqPMHRAmOb +pqQwsyufx0dFeIBxeSO3Z6jW5h2sl4nBipZpw9bzv6EBL1xRr0SfMNZzdnf4JFzc +0htGo/VO93Z2pv8w7uGUz1nN +-----END PRIVATE KEY----- diff --git a/modules/transport-netty4/src/test/resources/netty4-secure.jks b/modules/transport-netty4/src/test/resources/netty4-secure.jks new file mode 100644 index 0000000000000000000000000000000000000000..59dfd31c2a1567c6fbae386aa8f15c563bc66ae0 GIT binary patch literal 2790 zcma)8XEYm(8crgiLaZ7k_^3S_Tdh)5I#8=SvDFOy$b^} z0rT-tEEo?BJ)r{;cu4DiNstOK9`fphE~rNH!5#!!9K_}1H^u_V z=usLU2dZ9W+tu&&HP&rr0Gj`32Xv({(IBcgf--PNjPP?Wl@c*PRlG~?w#y{$$E7Ac zx~e{WW#twe_l9Rk?!G}q_9k~-h|^Nd;KI<0tW*OJCTydW5K?zK18ZR;_&+8)QRWs&D)HUeQu z9uJg@R_z>@?>MI)N$iZs?C70IBKa$HMTmwtRZM2#Eoh>qRjH=pY|48!KPy$z*ryQVZ6bbD&&t4TF&?ZKHwA0q_P!85hGFhoG?V+nrEvu|YdG zNrL!x5hXK9XG!dHbKAhpa*yK2X+&`U9}I8`ON-SR2AaO}$md&6HEJY?sp&0?74^a& zVGN_jRIazY8Xhs~^9fS?Fi-gwcl8p=<-=HX>-`)aSrru|SJ3LA(S5lX(T8(;a;TEk zjAO5JZ;+6Ri7Fk+ojzl_j>FsNxien-@nif$*x?i|H~&{et(-zmOA(y|b9$qWo>F~6 zki+c@3Ez0iXk}m9oTR;sSPhbZ=SK~bRJ`f>5rk8+I0fjlFBn~o1e<4(6(c5WFg`wy)X+ZN{sOnlx=ETJarFP_~xRzcNqSZ7%QHR!VzITeKkh$31~ zImEggf>lp$FXnU=Ts#bWExW*Y9;7%4Oe?((yn8vIeg2W0PfQBbZg?H7S6jR7_Ab`i zFt+ORFZV!dxP5(el{8;w<@pHgN@T-AM|1c0R#EBHJ|EF9tDR?*`HCSSS-NChYvG5g86>yVnk$g?6B#Mb(2mG-cf!r7rvHyVnf8E8TJzgSB$KPWTl~g|#-Rw(h>E!kcXTQ3Q6z`cP z)fS*)cgRrG{5|EtiN~1M7vJ;J2n&CXaT*Pb--Bc$Zi!bnJlbwCvj3*7ebDIBSRV6) zJ(Pz1F*1RFZ#H99b! zFAr=1?f}O-Ds0l4sX~>=qET8%wZFXa-H{oyX`N12eqs0c0QyH-0D8xlm&3z#uIe`1 zrI9EslWc5kB75)nwQj9;_~TDgCgpG=C=?yr`DQL%YWUdLLKt~BF_>(ozPEDYfmq5| z0JoEQ)AV!wN+o_w*8sM82I;8gUaTdYv=QY$QCfcLv(IxGSQ+S> z%RxCJI*K1%Di#OIIA!I`gNrV;_(=luy&a6iR5jIAl$8;9aP{vcCdPa`xZs4!1pxskBjXCrY< zOcTKv`EhagL&xGTrIIUt2v?a2%l?bgMOV&&+|WU;e$=RpZpz1|dNV>*m~d#i)d2TYZVO)~XMlNxBrxoWuJ<66oc1(`c8LIHSqMFI@oVB5 zCTi-Btx2^*IwaoUOI*HGf^JQ#GU_^GnY6KL3-W;<6D`7ahRD>wHVTL3(9$baE*+Uh zI?Qh*6kS`O(EC~T8Xj=&-O3)}wcfX~t$yft z0#%1s3udIR?zCsDgD^((_g@(9316i|*L)iFc0tc}Oh8!c)=u|})&8|LYjG)q`SIr(O$~<-27o6EP z#<~$zeByPj(`59rPL~;qQy3J_7#W=XF0H@rb!E#Ub}O0BW;u}u1Ml#*1v|V#XY%_PSfaE#-vVR6s9p13SJS(vq?@w&x z1io}4NWAA0VtOK@D-a#+({tI}OZlv_g9^ZdL~a$%7K051T_}A1FpPpLUh51%m?D%A zu;0HQ2nYdyMN)q`vbBY0Z04-Vi-TU!{Ws&)J05FHCn{A|`7rGNj0AIUHubc-71}?2 T^$whsN!%rkGx+QOE291eC9)6< literal 0 HcmV?d00001 diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index f97d5b2f80eeb..d0f5dd9e4581d 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -55,6 +55,7 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.RawTaskStatus; import org.opensearch.tasks.Task; @@ -67,6 +68,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -85,6 +87,9 @@ public final class NetworkModule { public static final String HTTP_TYPE_KEY = "http.type"; public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default"; public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default"; + public static final String TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_KEY = "transport.ssl.enforce_hostname_verification"; + public static final String TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME_KEY = "transport.ssl.resolve_hostname"; + public static final String TRANSPORT_SSL_DUAL_MODE_ENABLED_KEY = "transport.ssl.dual_mode.enabled"; public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString( TRANSPORT_TYPE_DEFAULT_KEY, @@ -94,6 +99,22 @@ public final class NetworkModule { public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope); + public static final Setting TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION = Setting.boolSetting( + TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_KEY, + true, + Property.NodeScope + ); + public static final Setting TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME = Setting.boolSetting( + TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME_KEY, + true, + Property.NodeScope + ); + public static final Setting TRANSPORT_SSL_DUAL_MODE_ENABLED = Setting.boolSetting( + TRANSPORT_SSL_DUAL_MODE_ENABLED_KEY, + false, + Property.NodeScope + ); + private final Settings settings; private static final List namedWriteables = new ArrayList<>(); @@ -151,9 +172,17 @@ public NetworkModule( HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, Tracer tracer, - List transportInterceptors + List transportInterceptors, + Collection secureTransportSettingsProvider ) { this.settings = settings; + + if (secureTransportSettingsProvider.size() > 1) { + throw new IllegalArgumentException( + "there is more than one secure transport settings provider: " + secureTransportSettingsProvider + ); + } + for (NetworkPlugin plugin : plugins) { Map> httpTransportFactory = plugin.getHttpTransports( settings, @@ -170,6 +199,7 @@ public NetworkModule( for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); } + Map> transportFactory = plugin.getTransports( settings, threadPool, @@ -182,6 +212,43 @@ public NetworkModule( for (Map.Entry> entry : transportFactory.entrySet()) { registerTransport(entry.getKey(), entry.getValue()); } + + // Register any secure transports if available + if (secureTransportSettingsProvider.isEmpty() == false) { + final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProvider.iterator().next(); + + final Map> secureHttpTransportFactory = plugin.getSecureHttpTransports( + settings, + threadPool, + bigArrays, + pageCacheRecycler, + circuitBreakerService, + xContentRegistry, + networkService, + dispatcher, + clusterSettings, + secureSettingProvider, + tracer + ); + for (Map.Entry> entry : secureHttpTransportFactory.entrySet()) { + registerHttpTransport(entry.getKey(), entry.getValue()); + } + + final Map> secureTransportFactory = plugin.getSecureTransports( + settings, + threadPool, + pageCacheRecycler, + circuitBreakerService, + namedWriteableRegistry, + networkService, + secureSettingProvider, + tracer + ); + for (Map.Entry> entry : secureTransportFactory.entrySet()) { + registerTransport(entry.getKey(), entry.getValue()); + } + } + List pluginTransportInterceptors = plugin.getTransportInterceptors( namedWriteableRegistry, threadPool.getThreadContext() diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 730a330b6ca60..7c8afb6b5c1b5 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -331,6 +331,9 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING, NetworkModule.HTTP_TYPE_SETTING, NetworkModule.TRANSPORT_TYPE_SETTING, + NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED, + NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION, + NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME, HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, HttpTransportSettings.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_MAX_AGE, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 3ef3ae4f6230e..ea449afe1c811 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -202,6 +202,7 @@ import org.opensearch.plugins.ScriptPlugin; import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.plugins.SearchPlugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; @@ -945,6 +946,13 @@ protected Node( admissionControlService ); + final Collection secureTransportSettingsProviders = pluginsService.filterPlugins(Plugin.class) + .stream() + .map(p -> p.getSecureSettingFactory(settings).flatMap(f -> f.getSecureTransportSettingsProvider(settings))) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + List transportInterceptors = List.of(admissionControlTransportInterceptor); final NetworkModule networkModule = new NetworkModule( settings, @@ -959,7 +967,8 @@ protected Node( restController, clusterService.getClusterSettings(), tracer, - transportInterceptors + transportInterceptors, + secureTransportSettingsProviders ); Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 07df40bafe6a1..679833c9f6e0d 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -107,4 +107,41 @@ default Map> getHttpTransports( ) { return Collections.emptyMap(); } + + /** + * Returns a map of secure {@link Transport} suppliers. + * See {@link org.opensearch.common.network.NetworkModule#TRANSPORT_TYPE_KEY} to configure a specific implementation. + */ + default Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.emptyMap(); + } + + /** + * Returns a map of secure {@link HttpServerTransport} suppliers. + * See {@link org.opensearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. + */ + default Map> getSecureHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.emptyMap(); + } } diff --git a/server/src/main/java/org/opensearch/plugins/Plugin.java b/server/src/main/java/org/opensearch/plugins/Plugin.java index 48486a6b55dfd..33c4155d12c25 100644 --- a/server/src/main/java/org/opensearch/plugins/Plugin.java +++ b/server/src/main/java/org/opensearch/plugins/Plugin.java @@ -269,4 +269,13 @@ public void close() throws IOException { public Collection getAdditionalIndexSettingProviders() { return Collections.emptyList(); } + + /** + * Returns the {@link SecureSettingsFactory} instance that could be used to configure the + * security related components (fe. transports) + * @return the {@link SecureSettingsFactory} instance + */ + public Optional getSecureSettingFactory(Settings settings) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java new file mode 100644 index 0000000000000..b98d9cf51c129 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; + +import java.util.Optional; + +/** + * A factory for creating the instance of the {@link SecureTransportSettingsProvider}, taking into account current settings. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface SecureSettingsFactory { + /** + * Creates (or provides pre-created) instance of the {@link SecureTransportSettingsProvider} + * @param settings settings + * @return optionally, the instance of the {@link SecureTransportSettingsProvider} + */ + Optional getSecureTransportSettingsProvider(Settings settings); +} diff --git a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java new file mode 100644 index 0000000000000..6d038ed30c8ff --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.transport.TcpTransport; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.util.Optional; + +/** + * A provider for security related settings for transports. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface SecureTransportSettingsProvider { + /** + * An exception handler for errors that might happen while secure transport handle the requests. + * + * @see SslExceptionHandler + * + * @opensearch.experimental + */ + @ExperimentalApi + @FunctionalInterface + interface ServerExceptionHandler { + static ServerExceptionHandler NOOP = t -> {}; + + /** + * Handler for errors happening during the server side processing of the requests + * @param t the error + */ + void onError(Throwable t); + } + + /** + * If supported, builds the {@link ServerExceptionHandler} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link ServerExceptionHandler} instance + */ + Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport); + + /** + * If supported, builds the {@link ServerExceptionHandler} instance for {@link TcpTransport} instance + * @param settings settings + * @param transport {@link TcpTransport} instance + * @return if supported, builds the {@link ServerExceptionHandler} instance + */ + Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport); + + /** + * If supported, builds the {@link SSLEngine} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link SSLEngine} instance + * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + */ + Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException; + + /** + * If supported, builds the {@link SSLEngine} instance for {@link TcpTransport} instance + * @param settings settings + * @param transport {@link TcpTransport} instance + * @return if supported, builds the {@link SSLEngine} instance + * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + */ + Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException; + + /** + * If supported, builds the {@link SSLEngine} instance for client transport instance + * @param settings settings + * @param hostname host name + * @param port port + * @return if supported, builds the {@link SSLEngine} instance + * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + */ + Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException; +} diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index de4bdcac6c2b2..1c607ca0dc98b 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -47,33 +47,66 @@ import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TcpTransport; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; public class NetworkModuleTests extends OpenSearchTestCase { private ThreadPool threadPool; + private SecureTransportSettingsProvider secureTransportSettingsProvider; @Override public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(NetworkModuleTests.class.getName()); + secureTransportSettingsProvider = new SecureTransportSettingsProvider() { + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { + return Optional.empty(); + } + }; } @Override @@ -160,6 +193,56 @@ public Map> getHttpTransports( expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier()); } + public void testRegisterSecureTransport() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom-secure").build(); + Supplier custom = () -> null; // content doesn't matter we check reference equality + NetworkPlugin plugin = new NetworkPlugin() { + @Override + public Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap("custom-secure", custom); + } + }; + NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), plugin); + assertSame(custom, module.getTransportSupplier()); + } + + public void testRegisterSecureHttpTransport() { + Settings settings = Settings.builder() + .put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom-secure") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "local") + .build(); + Supplier custom = FakeHttpTransport::new; + + NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), new NetworkPlugin() { + @Override + public Map> getSecureHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher requestDispatcher, + ClusterSettings clusterSettings, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap("custom-secure", custom); + } + }); + assertSame(custom, module.getHttpServerTransportSupplier()); + } + public void testOverrideDefault() { Settings settings = Settings.builder() .put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom") @@ -505,6 +588,15 @@ private NetworkModule newNetworkModule( Settings settings, List coreTransportInterceptors, NetworkPlugin... plugins + ) { + return newNetworkModule(settings, coreTransportInterceptors, List.of(), plugins); + } + + private NetworkModule newNetworkModule( + Settings settings, + List coreTransportInterceptors, + List secureTransportSettingsProviders, + NetworkPlugin... plugins ) { return new NetworkModule( settings, @@ -519,7 +611,8 @@ private NetworkModule newNetworkModule( new NullDispatcher(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), NoopTracer.INSTANCE, - coreTransportInterceptors + coreTransportInterceptors, + secureTransportSettingsProviders ); } } From 0510c5bc80d708d096d167474cbac7cead7b43db Mon Sep 17 00:00:00 2001 From: Ashish Date: Wed, 20 Mar 2024 21:55:26 +0530 Subject: [PATCH 10/74] Use cluster default remote store path type during snapshot restore (#12753) * Use cluster default remote store path type as fallback during snapshot restore --------- Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 84 ++++++++++++++++++- .../snapshots/RestoreSnapshotIT.java | 57 +++++++++++++ .../metadata/MetadataCreateIndexService.java | 22 +++-- .../opensearch/snapshots/RestoreService.java | 2 +- 4 files changed, 158 insertions(+), 7 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 21ce4be9981fb..fff99e65054dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -18,6 +18,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; import org.opensearch.client.Requests; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; @@ -26,6 +27,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; @@ -43,6 +45,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; @@ -50,7 +53,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -257,6 +260,85 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); } + /** + * In this test, we validate presence of remote_store custom data in index metadata for standard index creation and + * on snapshot restore. + */ + public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndexName1version1 = indexName1 + "-restored-1"; + String restoredIndexName1version2 = indexName1 + "-restored-2"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + indexDocuments(client, indexName1, randomIntBetween(5, 10)); + ensureGreen(indexName1); + validateRemoteStorePathType(indexName1, RemoteStorePathType.FIXED); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertTrue(snapshotInfo.successfulShards() > 0); + assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); + + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1version1) + .get(); + assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); + ensureGreen(restoredIndexName1version1); + validateRemoteStorePathType(restoredIndexName1version1, RemoteStorePathType.FIXED); + + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), RemoteStorePathType.HASHED_PREFIX) + ) + .get(); + + restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1version2) + .get(); + assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); + ensureGreen(restoredIndexName1version2); + validateRemoteStorePathType(restoredIndexName1version2, RemoteStorePathType.HASHED_PREFIX); + + // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. + indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings); + ensureGreen(indexName2); + validateRemoteStorePathType(indexName2, RemoteStorePathType.HASHED_PREFIX); + + // Validating that custom data has not changed for indexes which were created before the cluster setting got updated + validateRemoteStorePathType(indexName1, RemoteStorePathType.FIXED); + } + + private void validateRemoteStorePathType(String index, RemoteStorePathType pathType) { + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + // Validate that the remote_store custom data is present in index metadata for the created index. + Map remoteCustomData = state.metadata().index(index).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + assertNotNull(remoteCustomData); + assertEquals(pathType.toString(), remoteCustomData.get(RemoteStorePathType.NAME)); + } + public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 7117818451e14..e76587653e99a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -39,6 +39,7 @@ import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -151,6 +152,62 @@ public void testParallelRestoreOperations() { assertThat(client.prepareGet(restoredIndexName2, docId2).get().isExists(), equalTo(true)); } + /** + * In this test, we test that an index created does not have any remote_store custom data in index metadata at the + * time of index creation and after snapshot restore. + */ + public void testNoRemoteStoreCustomDataOnIndexCreationAndRestore() { + String indexName1 = "testindex1"; + String repoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath = randomRepoPath().toAbsolutePath(); + logger.info("Path [{}]", absolutePath); + String restoredIndexName1 = indexName1 + "-restored"; + String expectedValue = "expected"; + + Client client = client(); + // Write a document + String docId = Integer.toString(randomInt()); + index(indexName1, "_doc", docId, "value", expectedValue); + + createRepository(repoName, "fs", absolutePath); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // Validate that the remote_store custom data is not present in index metadata for the created index. + assertNull(state.metadata().index(indexName1).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(repoName, snapshotName1) + .setWaitForCompletion(false) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + assertThat(restoreSnapshotResponse1.status(), equalTo(RestStatus.ACCEPTED)); + ensureGreen(restoredIndexName1); + assertThat(client.prepareGet(restoredIndexName1, docId).get().isExists(), equalTo(true)); + + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // Validate that the remote_store custom data is not present in index metadata for the restored index. + assertNull(state.metadata().index(restoredIndexName1).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + } + public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index f117d1a4a11a2..f6a14d8ec9d63 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -553,11 +553,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - - if (remoteStorePathResolver != null) { - String pathType = remoteStorePathResolver.resolveType().toString(); - tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, Map.of(RemoteStorePathType.NAME, pathType)); - } + addRemoteCustomData(tmpImdBuilder); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -566,6 +562,22 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( return tempMetadata; } + public void addRemoteCustomData(IndexMetadata.Builder tmpImdBuilder) { + if (remoteStorePathResolver != null) { + // It is possible that remote custom data exists already. In such cases, we need to only update the path type + // in the remote store custom data map. + Map existingRemoteCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + Map remoteCustomData = existingRemoteCustomData == null + ? new HashMap<>() + : new HashMap<>(existingRemoteCustomData); + // Determine the path type for use using the remoteStorePathResolver. + String newPathType = remoteStorePathResolver.resolveType().toString(); + String oldPathType = remoteCustomData.put(RemoteStorePathType.NAME, newPathType); + logger.trace(() -> new ParameterizedMessage("Added new path type {}, replaced old path type {}", newPathType, oldPathType)); + tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); + } + } + private ClusterState applyCreateIndexRequestWithV1Templates( final ClusterState currentState, final CreateIndexClusterStateUpdateRequest request, diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index bf2c7fc74be92..e5ac604e0a5e3 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -216,7 +216,6 @@ public RestoreService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. restoreSnapshotTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.RESTORE_SNAPSHOT_KEY, true); - } /** @@ -452,6 +451,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); + createIndexService.addRemoteCustomData(indexMdBuilder); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), From e6975e412b09a8d82675edd9a43c20f7c325c0f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 12:58:33 -0400 Subject: [PATCH 11/74] Bump org.apache.commons:commons-configuration2 from 2.9.0 to 2.10.0 in /plugins/repository-hdfs (#12721) * Bump org.apache.commons:commons-configuration2 Bumps org.apache.commons:commons-configuration2 from 2.9.0 to 2.10.0. --- updated-dependencies: - dependency-name: org.apache.commons:commons-configuration2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- .../licenses/commons-configuration2-2.10.0.jar.sha1 | 1 + .../licenses/commons-configuration2-2.9.0.jar.sha1 | 1 - 4 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-configuration2-2.9.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b5a7cc705a79..4ba3dbcdc8d9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -144,6 +144,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) - Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) - Bump `peter-evans/create-pull-request` from 5 to 6 ([#12724](https://github.com/opensearch-project/OpenSearch/pull/12724)) +- Bump `org.apache.commons:commons-configuration2` from 2.9.0 to 2.10.0 ([#12721](https://github.com/opensearch-project/OpenSearch/pull/12721)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 36843e3bc8700..c1f94320f2681 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -74,7 +74,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" - api 'org.apache.commons:commons-configuration2:2.9.0' + api 'org.apache.commons:commons-configuration2:2.10.0' api 'commons-io:commons-io:2.15.1' api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 new file mode 100644 index 0000000000000..17d1b64781e5b --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 @@ -0,0 +1 @@ +2b93eff3c83e5372262ed4996b609336305a810f \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.9.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.9.0.jar.sha1 deleted file mode 100644 index 086c769fe600c..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-configuration2-2.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adc3ee6b84fc62a6e75e901d080adacb72aac61e \ No newline at end of file From 4dbd6fa6c35f7a3d0c5e2ee284c2f8b3cc829a32 Mon Sep 17 00:00:00 2001 From: Shivansh Arora <31575408+shiv0408@users.noreply.github.com> Date: Wed, 20 Mar 2024 22:59:30 +0530 Subject: [PATCH 12/74] Created new ReplicaShardBatchAllocator (#8992) * Created new ReplicaShardBatchAllocator to be used instead of ReplicaShardAllocator for batch calls Signed-off-by: Shivansh Arora --- .../gateway/ReplicaShardBatchAllocator.java | 188 ++++ .../ReplicaShardBatchAllocatorTests.java | 849 ++++++++++++++++++ 2 files changed, 1037 insertions(+) create mode 100644 server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java create mode 100644 server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java new file mode 100644 index 0000000000000..3459f1591b633 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -0,0 +1,188 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.NodeAllocationResult; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.collect.Tuple; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.AsyncShardFetch.FetchResult; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Allocates replica shards in a batch mode + * + * @opensearch.internal + */ +public abstract class ReplicaShardBatchAllocator extends ReplicaShardAllocator { + + /** + * Process existing recoveries of replicas and see if we need to cancel them if we find a better + * match. Today, a better match is one that can perform a no-op recovery while the previous recovery + * has to copy segment files. + * + * @param allocation the overall routing allocation + * @param shardBatches a list of shard batches to check for existing recoveries + */ + public void processExistingRecoveries(RoutingAllocation allocation, List> shardBatches) { + List shardCancellationActions = new ArrayList<>(); + // iterate through the batches, each batch needs to be processed together as fetch call should be made for shards from same batch + for (List shardBatch : shardBatches) { + List eligibleShards = new ArrayList<>(); + List ineligibleShards = new ArrayList<>(); + // iterate over shards to check for match for each of those + for (ShardRouting shard : shardBatch) { + if (shard != null && !shard.primary()) { + // need to iterate over all the nodes to find matching shard + if (shouldSkipFetchForRecovery(shard)) { + ineligibleShards.add(shard); + continue; + } + eligibleShards.add(shard); + } + } + AsyncShardFetch.FetchResult shardState = fetchData(eligibleShards, ineligibleShards, allocation); + if (!shardState.hasData()) { + logger.trace("{}: fetching new stores for initializing shard batch", eligibleShards); + continue; // still fetching + } + for (ShardRouting shard : eligibleShards) { + Map nodeShardStores = convertToNodeStoreFilesMetadataMap(shard, shardState); + + Runnable cancellationAction = cancelExistingRecoveryForBetterMatch(shard, allocation, nodeShardStores); + if (cancellationAction != null) { + shardCancellationActions.add(cancellationAction); + } + } + } + for (Runnable action : shardCancellationActions) { + action.run(); + } + } + + abstract protected FetchResult fetchData( + List eligibleShards, + List ineligibleShards, + RoutingAllocation allocation + ); + + @Override + protected FetchResult fetchData( + ShardRouting shard, + RoutingAllocation allocation + ) { + logger.error("fetchData for single shard called via batch allocator"); + throw new IllegalStateException("ReplicaShardBatchAllocator should only be used for a batch of shards"); + } + + @Override + public AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassignedShard, RoutingAllocation allocation, Logger logger) { + return makeAllocationDecision(Collections.singletonList(unassignedShard), allocation, logger).get(unassignedShard); + } + + @Override + public HashMap makeAllocationDecision( + List shards, + RoutingAllocation allocation, + Logger logger + ) { + HashMap shardAllocationDecisions = new HashMap<>(); + final boolean explain = allocation.debugDecision(); + List eligibleShards = new ArrayList<>(); + List ineligibleShards = new ArrayList<>(); + HashMap>> nodeAllocationDecisions = new HashMap<>(); + for (ShardRouting shard : shards) { + if (!isResponsibleFor(shard)) { + // this allocator n is not responsible for allocating this shard + ineligibleShards.add(shard); + shardAllocationDecisions.put(shard, AllocateUnassignedDecision.NOT_TAKEN); + continue; + } + + Tuple> result = canBeAllocatedToAtLeastOneNode(shard, allocation); + Decision allocationDecision = result.v1(); + if (allocationDecision.type() != Decision.Type.YES && (!explain || !hasInitiatedFetching(shard))) { + // only return early if we are not in explain mode, or we are in explain mode but we have not + // yet attempted to fetch any shard data + logger.trace("{}: ignoring allocation, can't be allocated on any node", shard); + shardAllocationDecisions.put( + shard, + AllocateUnassignedDecision.no( + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.type()), + result.v2() != null ? new ArrayList<>(result.v2().values()) : null + ) + ); + continue; + } + // storing the nodeDecisions in nodeAllocationDecisions if the decision is not YES + // so that we don't have to compute the decisions again + nodeAllocationDecisions.put(shard, result); + + eligibleShards.add(shard); + } + + // Do not call fetchData if there are no eligible shards + if (eligibleShards.isEmpty()) { + return shardAllocationDecisions; + } + // only fetch data for eligible shards + final FetchResult shardsState = fetchData(eligibleShards, ineligibleShards, allocation); + + for (ShardRouting unassignedShard : eligibleShards) { + Tuple> result = nodeAllocationDecisions.get(unassignedShard); + shardAllocationDecisions.put( + unassignedShard, + getAllocationDecision( + unassignedShard, + allocation, + convertToNodeStoreFilesMetadataMap(unassignedShard, shardsState), + result, + logger + ) + ); + } + return shardAllocationDecisions; + } + + private Map convertToNodeStoreFilesMetadataMap( + ShardRouting unassignedShard, + FetchResult data + ) { + if (!data.hasData()) { + return null; + } + + Map map = new HashMap<>(); + + data.getData().forEach((discoveryNode, value) -> { + Map batch = value.getNodeStoreFilesMetadataBatch(); + NodeStoreFilesMetadata metadata = batch.get(unassignedShard.shardId()); + if (metadata != null) { + map.put(discoveryNode, metadata.storeFilesMetadata()); + } + }); + + return map; + } +} diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java new file mode 100644 index 0000000000000..464038c93228b --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java @@ -0,0 +1,849 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterInfo; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.seqno.ReplicationTracker; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper; +import org.opensearch.snapshots.SnapshotShardSizeInfo; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Collections.unmodifiableMap; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ReplicaShardBatchAllocatorTests extends OpenSearchAllocationTestCase { + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.opensearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; + private final ShardId shardId = new ShardId("test", "_na_", 0); + private final DiscoveryNode node1 = newNode("node1"); + private final DiscoveryNode node2 = newNode("node2"); + private final DiscoveryNode node3 = newNode("node3"); + + private TestBatchAllocator testBatchAllocator; + + @Before + public void buildTestAllocator() { + this.testBatchAllocator = new TestBatchAllocator(); + } + + private void allocateAllUnassignedBatch(final RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + List shardToBatch = new ArrayList<>(); + while (iterator.hasNext()) { + shardToBatch.add(iterator.next()); + } + testBatchAllocator.allocateUnassignedBatch(shardToBatch, allocation); + } + + /** + * Verifies that when we are still fetching data in an async manner, the replica shard moves to ignore unassigned. + */ + public void testNoAsyncFetchData() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.clean(); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that on index creation, we don't fetch data for any shards, but keep the replica shard unassigned to let + * the shard allocator to allocate it. There isn't a copy around to find anyhow. + */ + public void testAsyncFetchWithNoShardOnIndexCreation() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + yesAllocationDeciders(), + Settings.EMPTY, + UnassignedInfo.Reason.INDEX_CREATED + ); + testBatchAllocator.clean(); + allocateAllUnassignedBatch(allocation); + assertThat(testBatchAllocator.getFetchDataCalledAndClean(), equalTo(false)); + assertThat(testBatchAllocator.getShardEligibleFetchDataCountAndClean(), equalTo(0)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that for anything but index creation, fetch data ends up being called, since we need to go and try + * and find a better copy for the shard. + */ + public void testAsyncFetchOnAnythingButIndexCreation() { + UnassignedInfo.Reason reason = RandomPicks.randomFrom( + random(), + EnumSet.complementOf(EnumSet.of(UnassignedInfo.Reason.INDEX_CREATED)) + ); + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, reason); + testBatchAllocator.clean(); + allocateAllUnassignedBatch(allocation); + assertThat("failed with reason " + reason, testBatchAllocator.getFetchDataCalledAndClean(), equalTo(true)); + assertThat("failed with reason" + reason, testBatchAllocator.getShardEligibleFetchDataCountAndClean(), equalTo(1)); + } + + /** + * Verifies that when there is a full match (syncId and files) we allocate it to matching node. + */ + public void testSimpleFullMatchAllocation() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId()) + ); + } + + /** + * Verifies that when there is a sync id match but no files match, we allocate it to matching node. + */ + public void testSyncIdMatch() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", null, new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId()) + ); + } + + /** + * Verifies that when there is no sync id match but files match, we allocate it to matching node. + */ + public void testFileChecksumMatch() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "NO_MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId()) + ); + } + + public void testPreferCopyWithHighestMatchingOperations() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + long retainingSeqNoOnPrimary = randomLongBetween(1, Integer.MAX_VALUE); + long retainingSeqNoForNode2 = randomLongBetween(0, retainingSeqNoOnPrimary - 1); + // Rarely use a seqNo above retainingSeqNoOnPrimary, which could in theory happen when primary fails and comes back quickly. + long retainingSeqNoForNode3 = randomLongBetween(retainingSeqNoForNode2 + 1, retainingSeqNoOnPrimary + 100); + List retentionLeases = Arrays.asList( + newRetentionLease(node1, retainingSeqNoOnPrimary), + newRetentionLease(node2, retainingSeqNoForNode2), + newRetentionLease(node3, retainingSeqNoForNode3) + ); + testBatchAllocator.addData( + node1, + retentionLeases, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "NOT_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + } + + public void testCancelRecoveryIfFoundCopyWithNoopRetentionLease() { + final UnassignedInfo unassignedInfo; + final Set failedNodes; + if (randomBoolean()) { + failedNodes = Collections.emptySet(); + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null); + } else { + failedNodes = new HashSet<>(randomSubsetOf(Arrays.asList("node-4", "node-5", "node-6"))); + unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.ALLOCATION_FAILED, + null, + null, + randomIntBetween(1, 10), + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + failedNodes + ); + } + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo); + long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE); + testBatchAllocator.addData( + node1, + Arrays.asList(newRetentionLease(node1, retainingSeqNo), newRetentionLease(node3, retainingSeqNo)), + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "NO_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + Collection replicaShards = allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED); + List shardRoutingBatch = new ArrayList<>(replicaShards); + List> shardBatchList = Collections.singletonList( + new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING)) + ); + + testBatchAllocator.processExistingRecoveries(allocation, shardBatchList); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + List unassignedShards = allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED); + assertThat(unassignedShards, hasSize(1)); + assertThat(unassignedShards.get(0).shardId(), equalTo(shardId)); + assertThat(unassignedShards.get(0).unassignedInfo().getNumFailedAllocations(), equalTo(0)); + assertThat(unassignedShards.get(0).unassignedInfo().getFailedNodeIds(), equalTo(failedNodes)); + } + + public void testNotCancellingRecoveryIfCurrentRecoveryHasRetentionLease() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + List peerRecoveryRetentionLeasesOnPrimary = new ArrayList<>(); + long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE); + peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node1, retainingSeqNo)); + peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node2, randomLongBetween(1, retainingSeqNo))); + if (randomBoolean()) { + peerRecoveryRetentionLeasesOnPrimary.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNo))); + } + testBatchAllocator.addData( + node1, + peerRecoveryRetentionLeasesOnPrimary, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testNotCancelIfPrimaryDoesNotHaveValidRetentionLease() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + testBatchAllocator.addData( + node1, + Collections.singletonList(newRetentionLease(node3, randomNonNegativeLong())), + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "NOT_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + "NOT_MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testIgnoreRetentionLeaseIfCopyIsEmpty() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + long retainingSeqNo = randomLongBetween(1, Long.MAX_VALUE); + List retentionLeases = new ArrayList<>(); + retentionLeases.add(newRetentionLease(node1, retainingSeqNo)); + retentionLeases.add(newRetentionLease(node2, randomLongBetween(0, retainingSeqNo))); + if (randomBoolean()) { + retentionLeases.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNo))); + } + testBatchAllocator.addData( + node1, + retentionLeases, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData(node2, null, null); // has retention lease but store is empty + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + } + + /** + * When we can't find primary data, but still find replica data, we go ahead and keep it unassigned + * to be allocated. This is today behavior, which relies on a primary corruption identified with + * adding a replica and having that replica actually recover and cause the corruption to be identified + * See CorruptFileTest# + */ + public void testNoPrimaryData() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.addData( + node2, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that when there is primary data, but no data at all on other nodes, the shard keeps + * unassigned to be allocated later on. + */ + public void testNoDataForReplicaOnAnyNode() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.addData( + node1, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * Verifies that when there is primary data, but no matching data at all on other nodes, the shard keeps + * unassigned to be allocated later on. + */ + public void testNoMatchingFilesForReplicaOnAnyNode() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", null, new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + /** + * When there is no decision or throttle decision across all nodes for the shard, make sure the shard + * moves to the ignore unassigned list. + */ + public void testNoOrThrottleDecidersRemainsInUnassigned() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders() + ); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + /** + * Tests when the node to allocate to due to matching is being throttled, we move the shard to ignored + * to wait till throttling on it is done. + */ + public void testThrottleWhenAllocatingToMatchingNode() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + new AllocationDeciders( + Arrays.asList( + new TestAllocateDecision(Decision.YES), + new SameShardAllocationDecider( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ), + new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (node.node().equals(node2)) { + return Decision.THROTTLE; + } + return Decision.YES; + } + } + ) + ) + ); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + public void testDelayedAllocation() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + yesAllocationDeciders(), + Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), + UnassignedInfo.Reason.NODE_LEFT + ); + testBatchAllocator.addData( + node1, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + if (randomBoolean()) { + // we sometime return empty list of files, make sure we test this as well + testBatchAllocator.addData(node2, null, null); + } + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + + allocation = onePrimaryOnNode1And1Replica( + yesAllocationDeciders(), + Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), + UnassignedInfo.Reason.NODE_LEFT + ); + testBatchAllocator.addData( + node2, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + allocateAllUnassignedBatch(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId()) + ); + } + + public void testCancelRecoveryBetterSyncId() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); + } + + public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() { + final UnassignedInfo unassignedInfo; + if (randomBoolean()) { + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null); + } else { + unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.ALLOCATION_FAILED, + null, + null, + randomIntBetween(1, 10), + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + Collections.singleton("node-4") + ); + } + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo); + List retentionLeases = new ArrayList<>(); + if (randomBoolean()) { + long retainingSeqNoOnPrimary = randomLongBetween(0, Long.MAX_VALUE); + retentionLeases.add(newRetentionLease(node1, retainingSeqNoOnPrimary)); + if (randomBoolean()) { + retentionLeases.add(newRetentionLease(node2, randomLongBetween(0, retainingSeqNoOnPrimary))); + } + if (randomBoolean()) { + retentionLeases.add(newRetentionLease(node3, randomLongBetween(0, retainingSeqNoOnPrimary))); + } + } + testBatchAllocator.addData( + node1, + retentionLeases, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node2, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.addData( + node3, + randomSyncId(), + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testNotCancellingRecovery() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + testBatchAllocator.addData(node1, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } + + public void testDoNotCancelForBrokenNode() { + Set failedNodes = new HashSet<>(); + failedNodes.add(node3.getId()); + if (randomBoolean()) { + failedNodes.add("node4"); + } + UnassignedInfo unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.ALLOCATION_FAILED, + null, + null, + randomIntBetween(failedNodes.size(), 10), + System.nanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + failedNodes + ); + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), unassignedInfo); + long retainingSeqNoOnPrimary = randomLongBetween(0, Long.MAX_VALUE); + List retentionLeases = Arrays.asList( + newRetentionLease(node1, retainingSeqNoOnPrimary), + newRetentionLease(node3, retainingSeqNoOnPrimary) + ); + testBatchAllocator.addData( + node1, + retentionLeases, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ) + .addData(node2, randomSyncId(), null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, randomSyncId(), null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED), empty()); + } + + private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders) { + return onePrimaryOnNode1And1Replica(deciders, Settings.EMPTY, UnassignedInfo.Reason.CLUSTER_RECOVERED); + } + + private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED); + IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardId.getIndexName()) + .settings(settings(Version.CURRENT).put(settings)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())); + Metadata metadata = Metadata.builder().put(indexMetadata).build(); + // mark shard as delayed if reason is NODE_LEFT + boolean delayed = reason == UnassignedInfo.Reason.NODE_LEFT + && UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(settings).nanos() > 0; + int failedAllocations = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? 1 : 0; + RoutingTable routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard(primaryShard) + .addShard( + ShardRouting.newUnassigned( + shardId, + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo( + reason, + null, + null, + failedAllocations, + System.nanoTime(), + System.currentTimeMillis(), + delayed, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + Collections.emptySet() + ) + ) + ) + .build() + ) + ) + .build(); + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) + .build(); + return new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + } + + private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders, UnassignedInfo unassignedInfo) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(shardId.getIndexName()) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())) + ) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard(primaryShard) + .addShard( + TestShardRouting.newShardRouting( + shardId, + node2.getId(), + null, + false, + ShardRoutingState.INITIALIZING, + unassignedInfo + ) + ) + .build() + ) + ) + .build(); + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) + .build(); + return new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + } + + private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { + return onePrimaryOnNode1And1ReplicaRecovering(deciders, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); + } + + static RetentionLease newRetentionLease(DiscoveryNode node, long retainingSeqNo) { + return new RetentionLease( + ReplicationTracker.getPeerRecoveryRetentionLeaseId(node.getId()), + retainingSeqNo, + randomNonNegativeLong(), + ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE + ); + } + + static String randomSyncId() { + return randomFrom("MATCH", "NOT_MATCH", null); + } + + class TestBatchAllocator extends ReplicaShardBatchAllocator { + private Map data = null; + private AtomicBoolean fetchDataCalled = new AtomicBoolean(false); + private AtomicInteger eligibleShardFetchDataCount = new AtomicInteger(0); + + public void clean() { + data = null; + } + + public boolean getFetchDataCalledAndClean() { + return fetchDataCalled.getAndSet(false); + } + + public int getShardEligibleFetchDataCountAndClean() { + return eligibleShardFetchDataCount.getAndSet(0); + } + + public TestBatchAllocator addData( + DiscoveryNode node, + String syncId, + @Nullable Exception storeFileFetchException, + StoreFileMetadata... files + ) { + return addData(node, Collections.emptyList(), syncId, storeFileFetchException, files); + } + + public TestBatchAllocator addData( + DiscoveryNode node, + List peerRecoveryRetentionLeases, + String syncId, + @Nullable Exception storeFileFetchException, + StoreFileMetadata... files + ) { + if (data == null) { + data = new HashMap<>(); + } + Map filesAsMap = new HashMap<>(); + for (StoreFileMetadata file : files) { + filesAsMap.put(file.name(), file); + } + Map commitData = new HashMap<>(); + if (syncId != null) { + commitData.put(Engine.SYNC_COMMIT_ID, syncId); + } + data.put( + node, + new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata( + new TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata( + shardId, + new Store.MetadataSnapshot(unmodifiableMap(filesAsMap), unmodifiableMap(commitData), randomInt()), + peerRecoveryRetentionLeases + ), + storeFileFetchException + ) + ); + return this; + } + + @Override + protected AsyncShardFetch.FetchResult fetchData( + List eligibleShards, + List ineligibleShards, + RoutingAllocation allocation + ) { + fetchDataCalled.set(true); + eligibleShardFetchDataCount.set(eligibleShards.size()); + Map tData = null; + if (data != null) { + tData = new HashMap<>(); + for (Map.Entry entry : data.entrySet()) { + Map shardData = Map.of( + shardId, + entry.getValue() + ); + tData.put( + entry.getKey(), + new TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch(entry.getKey(), shardData) + ); + } + } + return new AsyncShardFetch.FetchResult<>(tData, new HashMap<>() { + { + put(shardId, Collections.emptySet()); + } + }); + } + + @Override + protected boolean hasInitiatedFetching(ShardRouting shard) { + return fetchDataCalled.get(); + } + } +} From d5b27b120705857f29fdc3e5ada47e2b087a050a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:46:06 -0400 Subject: [PATCH 13/74] Bump com.azure:azure-json from 1.0.1 to 1.1.0 in /plugins/repository-azure (#12723) * Bump com.azure:azure-json in /plugins/repository-azure Bumps [com.azure:azure-json](https://github.com/Azure/azure-sdk-for-java) from 1.0.1 to 1.1.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-json_1.0.1...v1.1.0) --- updated-dependencies: - dependency-name: com.azure:azure-json dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 | 1 - plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ba3dbcdc8d9b..026783a0d6089 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -145,6 +145,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) - Bump `peter-evans/create-pull-request` from 5 to 6 ([#12724](https://github.com/opensearch-project/OpenSearch/pull/12724)) - Bump `org.apache.commons:commons-configuration2` from 2.9.0 to 2.10.0 ([#12721](https://github.com/opensearch-project/OpenSearch/pull/12721)) +- Bump `com.azure:azure-json` from 1.0.1 to 1.1.0 ([#12723](https://github.com/opensearch-project/OpenSearch/pull/12723)) ### Changed - Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 31db767b2c68e..c7836170d658f 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -45,7 +45,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.47.0' - api 'com.azure:azure-json:1.0.1' + api 'com.azure:azure-json:1.1.0' api 'com.azure:azure-storage-common:12.21.2' api 'com.azure:azure-core-http-netty:1.12.8' api "io.netty:netty-codec-dns:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 b/plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 deleted file mode 100644 index 128a82717fef9..0000000000000 --- a/plugins/repository-azure/licenses/azure-json-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abdfdb0c49eebe75ed8532d047dea0c9f13c30ac \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 b/plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 new file mode 100644 index 0000000000000..e44ee47c40253 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-json-1.1.0.jar.sha1 @@ -0,0 +1 @@ +1f21cea72f54a6af3b0bb6831eb3874bd4afd213 \ No newline at end of file From 29086216427c26f0a82bb692a854dea988aa1193 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 16:07:09 -0400 Subject: [PATCH 14/74] Bump com.google.api.grpc:proto-google-common-protos from 2.33.0 to 2.34.0 in /plugins/repository-gcs (#12365) * Bump com.google.api.grpc:proto-google-common-protos Bumps [com.google.api.grpc:proto-google-common-protos](https://github.com/googleapis/sdk-platform-java) from 2.33.0 to 2.34.0. - [Release notes](https://github.com/googleapis/sdk-platform-java/releases) - [Changelog](https://github.com/googleapis/sdk-platform-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/sdk-platform-java/compare/v2.33.0...v2.34.0) --- updated-dependencies: - dependency-name: com.google.api.grpc:proto-google-common-protos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko --------- Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/repository-gcs/build.gradle | 5 ++++- .../licenses/proto-google-common-protos-2.33.0.jar.sha1 | 1 - .../licenses/proto-google-common-protos-2.37.1.jar.sha1 | 1 + 4 files changed, 6 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 026783a0d6089..504eba04993a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -123,7 +123,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.1 to 2.33.0 ([#12289](https://github.com/opensearch-project/OpenSearch/pull/12289)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.1 to 2.37.1 ([#12289](https://github.com/opensearch-project/OpenSearch/pull/12289), [#12365](https://github.com/opensearch-project/OpenSearch/pull/12365)) - Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) - Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) - Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 0ddcf0f6dddca..1dfc64e19601c 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -60,7 +60,7 @@ dependencies { api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.33.0' + api 'com.google.api.grpc:proto-google-common-protos:2.37.1' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" @@ -149,6 +149,9 @@ thirdPartyAudit { 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', 'com.google.auth.oauth2.GdchCredentials', + 'com.google.protobuf.MapFieldBuilder', + 'com.google.protobuf.MapFieldBuilder$Converter', + 'com.google.protobuf.MapFieldReflectionAccessor', 'com.google.protobuf.util.JsonFormat', 'com.google.protobuf.util.JsonFormat$Parser', 'com.google.protobuf.util.JsonFormat$Printer', diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 deleted file mode 100644 index 746e4e99fd881..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -644e11df1cec6d38a63a9a06a701e48c398b87d0 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 new file mode 100644 index 0000000000000..92f991778ccc3 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.37.1.jar.sha1 @@ -0,0 +1 @@ +3b8759ef0468cced72f8f0d4fc3cc57aeb8139f8 \ No newline at end of file From 7ad30171a8869f659c1feeb6185acf023ebaa203 Mon Sep 17 00:00:00 2001 From: rajiv-kv <157019998+rajiv-kv@users.noreply.github.com> Date: Thu, 21 Mar 2024 03:01:15 +0530 Subject: [PATCH 15/74] Light weight Transport action to verify local term before fetching cluster-state from remote (#12252) Signed-off-by: Rajiv Kumar Vaidyanathan --- CHANGELOG.md | 1 + .../cluster/state/FetchByTermVersionIT.java | 161 +++++++++ .../org/opensearch/action/ActionModule.java | 3 + .../state/TransportClusterStateAction.java | 11 +- .../state/term/GetTermVersionAction.java | 26 ++ .../state/term/GetTermVersionRequest.java | 34 ++ .../state/term/GetTermVersionResponse.java | 50 +++ .../term/TransportGetTermVersionAction.java | 85 +++++ .../cluster/state/term/package-info.java | 10 + .../TransportClusterManagerNodeAction.java | 183 +++++++--- .../coordination/ClusterStateTermVersion.java | 110 ++++++ .../state/term/ClusterTermVersionIT.java | 121 +++++++ .../state/term/ClusterTermVersionTests.java | 26 ++ ...ransportClusterManagerNodeActionTests.java | 18 - ...TransportClusterManagerTermCheckTests.java | 320 ++++++++++++++++++ .../snapshots/SnapshotResiliencyTests.java | 14 + 16 files changed, 1111 insertions(+), 62 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/state/FetchByTermVersionIT.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/term/TransportGetTermVersionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/term/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/ClusterStateTermVersion.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionIT.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionTests.java create mode 100644 server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 504eba04993a2..c65b20b99bc25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) - Introduce a new setting `index.check_pending_flush.enabled` to expose the ability to disable the check for pending flushes by write threads ([#12710](https://github.com/opensearch-project/OpenSearch/pull/12710)) - Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) +- Lightweight Transport action to verify local term before fetching cluster-state from remote ([#12252](https://github.com/opensearch-project/OpenSearch/pull/12252/)) ### Dependencies - Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/state/FetchByTermVersionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/state/FetchByTermVersionIT.java new file mode 100644 index 0000000000000..cef184b3fddf9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/state/FetchByTermVersionIT.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.state; + +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionResponse; +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.is; + +@SuppressWarnings("unchecked") +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class FetchByTermVersionIT extends OpenSearchIntegTestCase { + + AtomicBoolean isTermVersionCheckEnabled = new AtomicBoolean(); + + protected Collection> nodePlugins() { + return List.of(MockTransportService.TestPlugin.class); + } + + AtomicBoolean forceFetchFromCM = new AtomicBoolean(); + + public void testClusterStateResponseFromDataNode() throws Exception { + String cm = internalCluster().startClusterManagerOnlyNode(); + List dns = internalCluster().startDataOnlyNodes(5); + int numberOfShards = dns.size(); + stubClusterTermResponse(cm); + + ensureClusterSizeConsistency(); + ensureGreen(); + + List indices = new ArrayList<>(); + + // Create a large sized cluster-state by creating field mappings + IntStream.range(0, 20).forEachOrdered(n -> { + String index = "index_" + n; + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE) + .build() + ); + indices.add(index); + }); + IntStream.range(0, 5).forEachOrdered(n -> { + List mappings = new ArrayList<>(); + for (int i = 0; i < 2000; i++) { + mappings.add("t-123456789-123456789-" + n + "-" + i); + mappings.add("type=keyword"); + } + PutMappingRequest request = new PutMappingRequest().source(mappings.toArray(new String[0])) + .indices(indices.toArray(new String[0])); + internalCluster().dataNodeClient().admin().indices().putMapping(request).actionGet(); + }); + ensureGreen(); + + ClusterStateResponse stateResponseM = internalCluster().clusterManagerClient() + .admin() + .cluster() + .state(new ClusterStateRequest()) + .actionGet(); + + waitUntil(() -> { + ClusterStateResponse stateResponseD = internalCluster().dataNodeClient() + .admin() + .cluster() + .state(new ClusterStateRequest()) + .actionGet(); + return stateResponseD.getState().stateUUID().equals(stateResponseM.getState().stateUUID()); + }); + // cluster state response time with term check enabled on datanode + isTermVersionCheckEnabled.set(true); + { + List latencies = new ArrayList<>(); + IntStream.range(0, 50).forEachOrdered(n1 -> { + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + long start = System.currentTimeMillis(); + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(clusterStateRequest).actionGet(); + latencies.add(System.currentTimeMillis() - start); + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + assertThat(stateResponse.getState().metadata().indices().size(), is(indices.size())); + Map fieldMappings = (Map) stateResponse.getState() + .metadata() + .index(indices.get(0)) + .mapping() + .sourceAsMap() + .get("properties"); + + assertThat(fieldMappings.size(), is(10000)); + }); + Collections.sort(latencies); + + logger.info("cluster().state() fetch with Term Version enabled took {} milliseconds", (latencies.get(latencies.size() / 2))); + } + // cluster state response time with term check disabled on datanode + isTermVersionCheckEnabled.set(false); + { + List latencies = new ArrayList<>(); + IntStream.range(0, 50).forEachOrdered(n1 -> { + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + long start = System.currentTimeMillis(); + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(clusterStateRequest).actionGet(); + latencies.add(System.currentTimeMillis() - start); + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + assertThat(stateResponse.getState().metadata().indices().size(), is(indices.size())); + Map typeProperties = (Map) stateResponse.getState() + .metadata() + .index(indices.get(0)) + .mapping() + .sourceAsMap() + .get("properties"); + assertThat(typeProperties.size(), is(10000)); + + }); + Collections.sort(latencies); + logger.info("cluster().state() fetch with Term Version disabled took {} milliseconds", (latencies.get(latencies.size() / 2))); + } + + } + + private void stubClusterTermResponse(String master) { + MockTransportService primaryService = (MockTransportService) internalCluster().getInstance(TransportService.class, master); + primaryService.addRequestHandlingBehavior(GetTermVersionAction.NAME, (handler, request, channel, task) -> { + if (isTermVersionCheckEnabled.get()) { + handler.messageReceived(request, channel, task); + } else { + // always return response that does not match + channel.sendResponse(new GetTermVersionResponse(new ClusterStateTermVersion(new ClusterName("test"), "1", -1, -1))); + } + }); + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index b19bf9590f43b..f827b7f3f0097 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -107,6 +107,8 @@ import org.opensearch.action.admin.cluster.snapshots.status.TransportSnapshotsStatusAction; import org.opensearch.action.admin.cluster.state.ClusterStateAction; import org.opensearch.action.admin.cluster.state.TransportClusterStateAction; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; +import org.opensearch.action.admin.cluster.state.term.TransportGetTermVersionAction; import org.opensearch.action.admin.cluster.stats.ClusterStatsAction; import org.opensearch.action.admin.cluster.stats.TransportClusterStatsAction; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; @@ -614,6 +616,7 @@ public void reg actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); actions.register(ClusterStateAction.INSTANCE, TransportClusterStateAction.class); + actions.register(GetTermVersionAction.INSTANCE, TransportGetTermVersionAction.class); actions.register(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class); actions.register(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class); actions.register(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 4aaa7f1950823..cae465a90446e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -125,9 +125,12 @@ protected void clusterManagerOperation( ? clusterState -> true : clusterState -> clusterState.metadata().version() >= request.waitForMetadataVersion(); + // action will be executed on local node, if either the request is local only (or) the local node has the same cluster-state as + // ClusterManager final Predicate acceptableClusterStateOrNotMasterPredicate = request.local() - ? acceptableClusterStatePredicate - : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedClusterManager() == false); + || !state.nodes().isLocalNodeElectedClusterManager() + ? acceptableClusterStatePredicate + : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedClusterManager() == false); if (acceptableClusterStatePredicate.test(state)) { ActionListener.completeWith(listener, () -> buildResponse(request, state)); @@ -231,4 +234,8 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi return new ClusterStateResponse(currentState.getClusterName(), builder.build(), false); } + @Override + protected boolean localExecuteSupportedByAction() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionAction.java new file mode 100644 index 0000000000000..3344fd549b23f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state.term; + +import org.opensearch.action.ActionType; + +/** + * Transport action for fetching cluster term and version + * + * @opensearch.internal + */ +public class GetTermVersionAction extends ActionType { + + public static final GetTermVersionAction INSTANCE = new GetTermVersionAction(); + public static final String NAME = "cluster:monitor/term"; + + private GetTermVersionAction() { + super(NAME, GetTermVersionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionRequest.java new file mode 100644 index 0000000000000..b099f8087bd15 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionRequest.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state.term; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Request object to get cluster term and version + * + * @opensearch.internal + */ +public class GetTermVersionRequest extends ClusterManagerNodeReadRequest { + + public GetTermVersionRequest() {} + + public GetTermVersionRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionResponse.java new file mode 100644 index 0000000000000..16b355a80d1f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionResponse.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state.term; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response object of cluster term + * + * @opensearch.internal + */ +public class GetTermVersionResponse extends ActionResponse { + + private final ClusterStateTermVersion clusterStateTermVersion; + + public GetTermVersionResponse(ClusterStateTermVersion clusterStateTermVersion) { + this.clusterStateTermVersion = clusterStateTermVersion; + } + + public GetTermVersionResponse(StreamInput in) throws IOException { + super(in); + this.clusterStateTermVersion = new ClusterStateTermVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterStateTermVersion.writeTo(out); + } + + public ClusterStateTermVersion getClusterStateTermVersion() { + return clusterStateTermVersion; + } + + public boolean matches(ClusterState clusterState) { + return clusterStateTermVersion != null && clusterStateTermVersion.equals(new ClusterStateTermVersion(clusterState)); + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/TransportGetTermVersionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/TransportGetTermVersionAction.java new file mode 100644 index 0000000000000..88305252aa99c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/TransportGetTermVersionAction.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state.term; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for obtaining cluster term and version from cluster-manager + * + * @opensearch.internal + */ +public class TransportGetTermVersionAction extends TransportClusterManagerNodeReadAction { + + private final Logger logger = LogManager.getLogger(getClass()); + + @Inject + public TransportGetTermVersionAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetTermVersionAction.NAME, + false, + transportService, + clusterService, + threadPool, + actionFilters, + GetTermVersionRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetTermVersionResponse read(StreamInput in) throws IOException { + return new GetTermVersionResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(GetTermVersionRequest request, ClusterState state) { + // cluster state term and version needs to be retrieved even on a fully blocked cluster + return null; + } + + @Override + protected void clusterManagerOperation( + GetTermVersionRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + ActionListener.completeWith(listener, () -> buildResponse(request, state)); + } + + private GetTermVersionResponse buildResponse(GetTermVersionRequest request, ClusterState state) { + return new GetTermVersionResponse(new ClusterStateTermVersion(state)); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/package-info.java new file mode 100644 index 0000000000000..0ee559c527d7d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/term/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Term transport handler. */ +package org.opensearch.action.admin.cluster.state.term; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 536ddcdd402e2..6a081f9dfecde 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -37,6 +37,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ActionRunnable; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionRequest; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionResponse; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; @@ -66,11 +69,16 @@ import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Predicate; +import static org.opensearch.Version.V_3_0_0; + /** * A base class for operations that needs to be performed on the cluster-manager node. * @@ -252,23 +260,13 @@ protected void doStart(ClusterState clusterState) { }); } } else { - ActionListener delegate = ActionListener.delegateResponse(listener, (delegatedListener, t) -> { - if (t instanceof FailedToCommitClusterStateException || t instanceof NotClusterManagerException) { - logger.debug( - () -> new ParameterizedMessage( - "master could not publish cluster state or " - + "stepped down before publishing action [{}], scheduling a retry", - actionName - ), - t - ); - retryOnMasterChange(clusterState, t); - } else { - delegatedListener.onFailure(t); - } - }); threadPool.executor(executor) - .execute(ActionRunnable.wrap(delegate, l -> clusterManagerOperation(task, request, clusterState, l))); + .execute( + ActionRunnable.wrap( + getDelegateForLocalExecute(clusterState), + l -> clusterManagerOperation(task, request, clusterState, l) + ) + ); } } else { if (nodes.getClusterManagerNode() == null) { @@ -276,32 +274,15 @@ protected void doStart(ClusterState clusterState) { retryOnMasterChange(clusterState, null); } else { DiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); - final String actionName = getClusterManagerActionName(clusterManagerNode); - transportService.sendRequest( - clusterManagerNode, - actionName, - request, - new ActionListenerResponseHandler(listener, TransportClusterManagerNodeAction.this::read) { - @Override - public void handleException(final TransportException exp) { - Throwable cause = exp.unwrapCause(); - if (cause instanceof ConnectTransportException - || (exp instanceof RemoteTransportException && cause instanceof NodeClosedException)) { - // we want to retry here a bit to see if a new cluster-manager is elected - logger.debug( - "connection exception while trying to forward request with action name [{}] to " - + "master node [{}], scheduling a retry. Error: [{}]", - actionName, - nodes.getClusterManagerNode(), - exp.getDetailedMessage() - ); - retryOnMasterChange(clusterState, cause); - } else { - listener.onFailure(exp); - } - } - } - ); + if (clusterManagerNode.getVersion().onOrAfter(V_3_0_0) && localExecuteSupportedByAction()) { + BiConsumer executeOnLocalOrClusterManager = clusterStateLatestChecker( + this::executeOnLocalNode, + this::executeOnClusterManager + ); + executeOnLocalOrClusterManager.accept(clusterManagerNode, clusterState); + } else { + executeOnClusterManager(clusterManagerNode, clusterState); + } } } } catch (Exception e) { @@ -351,6 +332,114 @@ public void onTimeout(TimeValue timeout) { } }, statePredicate); } + + private ActionListener getDelegateForLocalExecute(ClusterState clusterState) { + return ActionListener.delegateResponse(listener, (delegatedListener, t) -> { + if (t instanceof FailedToCommitClusterStateException || t instanceof NotClusterManagerException) { + logger.debug( + () -> new ParameterizedMessage( + "cluster-manager could not publish cluster state or " + + "stepped down before publishing action [{}], scheduling a retry", + actionName + ), + t + ); + + retryOnMasterChange(clusterState, t); + } else { + delegatedListener.onFailure(t); + } + }); + } + + protected BiConsumer clusterStateLatestChecker( + Consumer onLatestLocalState, + BiConsumer onStaleLocalState + ) { + return (clusterManagerNode, clusterState) -> { + transportService.sendRequest( + clusterManagerNode, + GetTermVersionAction.NAME, + new GetTermVersionRequest(), + new TransportResponseHandler() { + @Override + public void handleResponse(GetTermVersionResponse response) { + boolean isLatestClusterStatePresentOnLocalNode = response.matches(clusterState); + logger.trace( + "Received GetTermVersionResponse response : ClusterStateTermVersion {}, latest-on-local {}", + response.getClusterStateTermVersion(), + isLatestClusterStatePresentOnLocalNode + ); + if (isLatestClusterStatePresentOnLocalNode) { + onLatestLocalState.accept(clusterState); + } else { + onStaleLocalState.accept(clusterManagerNode, clusterState); + } + } + + @Override + public void handleException(TransportException exp) { + handleTransportException(clusterManagerNode, clusterState, exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetTermVersionResponse read(StreamInput in) throws IOException { + return new GetTermVersionResponse(in); + } + + } + ); + }; + } + + private void executeOnLocalNode(ClusterState localClusterState) { + Runnable runTask = ActionRunnable.wrap( + getDelegateForLocalExecute(localClusterState), + l -> clusterManagerOperation(task, request, localClusterState, l) + ); + threadPool.executor(executor).execute(runTask); + } + + private void executeOnClusterManager(DiscoveryNode clusterManagerNode, ClusterState clusterState) { + final String actionName = getClusterManagerActionName(clusterManagerNode); + + transportService.sendRequest( + clusterManagerNode, + actionName, + request, + new ActionListenerResponseHandler(listener, TransportClusterManagerNodeAction.this::read) { + @Override + public void handleException(final TransportException exp) { + handleTransportException(clusterManagerNode, clusterState, exp); + } + } + ); + } + + private void handleTransportException(DiscoveryNode clusterManagerNode, ClusterState clusterState, final TransportException exp) { + Throwable cause = exp.unwrapCause(); + if (cause instanceof ConnectTransportException + || (exp instanceof RemoteTransportException && cause instanceof NodeClosedException)) { + // we want to retry here a bit to see if a new cluster-manager is elected + + logger.debug( + "connection exception while trying to forward request with action name [{}] to " + + "master node [{}], scheduling a retry. Error: [{}]", + actionName, + clusterManagerNode, + exp.getDetailedMessage() + ); + + retryOnMasterChange(clusterState, cause); + } else { + listener.onFailure(exp); + } + } } /** @@ -372,4 +461,14 @@ protected String getMasterActionName(DiscoveryNode node) { return getClusterManagerActionName(node); } + /** + * Override to true if the transport action can be executed locally and need NOT be executed always on cluster-manager (Read actions). + * The action is executed locally if this method returns true AND + * the ClusterState on local node is in-sync with ClusterManager. + * + * @return - boolean if the action can be run locally + */ + protected boolean localExecuteSupportedByAction() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStateTermVersion.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStateTermVersion.java new file mode 100644 index 0000000000000..b317b0d362825 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStateTermVersion.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Identifies a specific version of ClusterState at a node. + */ +public class ClusterStateTermVersion implements Writeable { + + private final ClusterName clusterName; + private final String clusterUUID; + private final long term; + private final long version; + + public ClusterStateTermVersion(ClusterName clusterName, String clusterUUID, long term, long version) { + this.clusterName = clusterName; + this.clusterUUID = clusterUUID; + this.term = term; + this.version = version; + } + + public ClusterStateTermVersion(StreamInput in) throws IOException { + this.clusterName = new ClusterName(in); + this.clusterUUID = in.readString(); + this.term = in.readLong(); + this.version = in.readLong(); + } + + public ClusterStateTermVersion(ClusterState state) { + this.clusterName = state.getClusterName(); + this.clusterUUID = state.metadata().clusterUUID(); + this.term = state.term(); + this.version = state.version(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeString(clusterUUID); + out.writeLong(term); + out.writeLong(version); + } + + public ClusterName getClusterName() { + return clusterName; + } + + public String getClusterUUID() { + return clusterUUID; + } + + public long getTerm() { + return term; + } + + public long getVersion() { + return version; + } + + @Override + public String toString() { + return "ClusterStateTermVersion{" + + "clusterName=" + + clusterName + + ", clusterUUID='" + + clusterUUID + + '\'' + + ", term=" + + term + + ", version=" + + version + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ClusterStateTermVersion that = (ClusterStateTermVersion) o; + + if (term != that.term) return false; + if (version != that.version) return false; + if (!clusterName.equals(that.clusterName)) return false; + return clusterUUID.equals(that.clusterUUID); + } + + @Override + public int hashCode() { + int result = clusterName.hashCode(); + result = 31 * result + clusterUUID.hashCode(); + result = 31 * result + (int) (term ^ (term >>> 32)); + result = 31 * result + (int) (version ^ (version >>> 32)); + return result; + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionIT.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionIT.java new file mode 100644 index 0000000000000..fa2a6121af349 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionIT.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state.term; + +import org.opensearch.action.admin.cluster.state.ClusterStateAction; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.is; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ClusterTermVersionIT extends OpenSearchIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(MockTransportService.TestPlugin.class); + } + + public void testClusterStateResponseFromDataNode() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + ensureClusterSizeConsistency(); + ensureGreen(); + + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.waitForTimeout(TimeValue.timeValueHours(1)); + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(clusterStateRequest).get(); + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + assertThat(stateResponse.isWaitForTimedOut(), is(false)); + + } + + public void testClusterStateResponseFromClusterManagerNode() throws Exception { + String master = internalCluster().startClusterManagerOnlyNode(); + String data = internalCluster().startDataOnlyNode(); + ensureClusterSizeConsistency(); + ensureGreen(); + Map callCounters = Map.ofEntries( + Map.entry(ClusterStateAction.NAME, new AtomicInteger()), + Map.entry(GetTermVersionAction.NAME, new AtomicInteger()) + ); + + addCallCountInterceptor(master, callCounters); + + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(new ClusterStateRequest()).get(); + + AtomicInteger clusterStateCallsOnMaster = callCounters.get(ClusterStateAction.NAME); + AtomicInteger termCallsOnMaster = callCounters.get(GetTermVersionAction.NAME); + + assertThat(clusterStateCallsOnMaster.get(), is(0)); + assertThat(termCallsOnMaster.get(), is(1)); + + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + + } + + public void testDatanodeOutOfSync() throws Exception { + String master = internalCluster().startClusterManagerOnlyNode(); + String data = internalCluster().startDataOnlyNode(); + ensureClusterSizeConsistency(); + ensureGreen(); + Map callCounters = Map.ofEntries( + Map.entry(ClusterStateAction.NAME, new AtomicInteger()), + Map.entry(GetTermVersionAction.NAME, new AtomicInteger()) + ); + + stubClusterTermResponse(master); + addCallCountInterceptor(master, callCounters); + + ClusterStateResponse stateResponse = dataNodeClient().admin().cluster().state(new ClusterStateRequest()).get(); + + AtomicInteger clusterStateCallsOnMaster = callCounters.get(ClusterStateAction.NAME); + AtomicInteger termCallsOnMaster = callCounters.get(GetTermVersionAction.NAME); + + assertThat(clusterStateCallsOnMaster.get(), is(1)); + assertThat(termCallsOnMaster.get(), is(1)); + + assertThat(stateResponse.getClusterName().value(), is(internalCluster().getClusterName())); + assertThat(stateResponse.getState().nodes().getSize(), is(internalCluster().getNodeNames().length)); + } + + private void addCallCountInterceptor(String nodeName, Map callCounters) { + MockTransportService primaryService = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeName); + for (var ctrEnty : callCounters.entrySet()) { + primaryService.addRequestHandlingBehavior(ctrEnty.getKey(), (handler, request, channel, task) -> { + ctrEnty.getValue().incrementAndGet(); + logger.info("--> {} response redirect", ClusterStateAction.NAME); + handler.messageReceived(request, channel, task); + }); + } + } + + private void stubClusterTermResponse(String master) { + MockTransportService primaryService = (MockTransportService) internalCluster().getInstance(TransportService.class, master); + primaryService.addRequestHandlingBehavior(GetTermVersionAction.NAME, (handler, request, channel, task) -> { + channel.sendResponse(new GetTermVersionResponse(new ClusterStateTermVersion(new ClusterName("test"), "1", -1, -1))); + }); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionTests.java new file mode 100644 index 0000000000000..22d9623eebdbe --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionTests.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state.term; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.concurrent.ExecutionException; + +public class ClusterTermVersionTests extends OpenSearchSingleNodeTestCase { + + public void testTransportTermResponse() throws ExecutionException, InterruptedException { + GetTermVersionRequest request = new GetTermVersionRequest(); + GetTermVersionResponse resp = client().execute(GetTermVersionAction.INSTANCE, request).get(); + + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + + assertTrue(resp.matches(clusterService.state())); + } +} diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 9ae1310a8b15c..538416e1137f5 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -6,24 +6,6 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ /* * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java new file mode 100644 index 0000000000000..8c7b7a0940c82 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java @@ -0,0 +1,320 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.clustermanager; + +import org.opensearch.Version; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.ThreadedActionListener; +import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.equalTo; + +public class TransportClusterManagerTermCheckTests extends OpenSearchTestCase { + private static ThreadPool threadPool; + + private ClusterService clusterService; + private TransportService transportService; + private CapturingTransport transport; + private DiscoveryNode localNode; + private DiscoveryNode remoteNode; + private DiscoveryNode[] allNodes; + + @BeforeClass + public static void beforeClass() { + threadPool = new TestThreadPool("TransportMasterNodeActionTests"); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + transport = new CapturingTransport(); + clusterService = createClusterService(threadPool); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> clusterService.localNode(), + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ); + transportService.start(); + transportService.acceptIncomingRequests(); + + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + + public static class Request extends ClusterManagerNodeRequest { + Request() {} + + Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + class Response extends ActionResponse { + private long identity = randomLong(); + + Response() {} + + Response(StreamInput in) throws IOException { + super(in); + identity = in.readLong(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return identity == response.identity; + } + + @Override + public int hashCode() { + return Objects.hash(identity); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(identity); + } + } + + class Action extends TransportClusterManagerNodeAction { + Action(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + super( + actionName, + transportService, + clusterService, + threadPool, + new ActionFilters(new HashSet<>()), + Request::new, + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)) + ); + } + + @Override + protected void doExecute(Task task, final Request request, ActionListener listener) { + // remove unneeded threading by wrapping listener with SAME to prevent super.doExecute from wrapping it with LISTENER + super.doExecute(task, request, new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.SAME, listener, false)); + } + + @Override + protected String executor() { + // very lightweight operation in memory, no need to fork to a thread + return ThreadPool.Names.SAME; + } + + @Override + protected boolean localExecuteSupportedByAction() { + return true; + } + + @Override + protected Response read(StreamInput in) throws IOException { + return new Response(in); + } + + @Override + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + listener.onResponse(new Response()); // default implementation, overridden in specific tests + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return null; // default implementation, overridden in specific tests + } + } + + public void testTermCheckMatchWithClusterManager() throws ExecutionException, InterruptedException { + setUpCluster(Version.CURRENT); + + TransportClusterManagerTermCheckTests.Request request = new TransportClusterManagerTermCheckTests.Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + new TransportClusterManagerTermCheckTests.Action("internal:testAction", transportService, clusterService, threadPool).execute( + request, + listener + ); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; + assertTrue(capturedRequest.node.isClusterManagerNode()); + assertThat(capturedRequest.action, equalTo("cluster:monitor/term")); + GetTermVersionResponse response = new GetTermVersionResponse( + new ClusterStateTermVersion( + clusterService.state().getClusterName(), + clusterService.state().metadata().clusterUUID(), + clusterService.state().term(), + clusterService.state().version() + ) + ); + transport.handleResponse(capturedRequest.requestId, response); + assertTrue(listener.isDone()); + } + + public void testTermCheckNoMatchWithClusterManager() throws ExecutionException, InterruptedException { + setUpCluster(Version.CURRENT); + TransportClusterManagerTermCheckTests.Request request = new TransportClusterManagerTermCheckTests.Request(); + + PlainActionFuture listener = new PlainActionFuture<>(); + new TransportClusterManagerTermCheckTests.Action("internal:testAction", transportService, clusterService, threadPool).execute( + request, + listener + ); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest termCheckRequest = transport.capturedRequests()[0]; + assertTrue(termCheckRequest.node.isClusterManagerNode()); + assertThat(termCheckRequest.action, equalTo("cluster:monitor/term")); + GetTermVersionResponse noMatchResponse = new GetTermVersionResponse( + new ClusterStateTermVersion( + clusterService.state().getClusterName(), + clusterService.state().metadata().clusterUUID(), + clusterService.state().term(), + clusterService.state().version() - 1 + ) + ); + transport.handleResponse(termCheckRequest.requestId, noMatchResponse); + assertFalse(listener.isDone()); + + assertThat(transport.capturedRequests().length, equalTo(2)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[1]; + assertTrue(capturedRequest.node.isClusterManagerNode()); + assertThat(capturedRequest.request, equalTo(request)); + assertThat(capturedRequest.action, equalTo("internal:testAction")); + + TransportClusterManagerTermCheckTests.Response response = new TransportClusterManagerTermCheckTests.Response(); + transport.handleResponse(capturedRequest.requestId, response); + assertTrue(listener.isDone()); + assertThat(listener.get(), equalTo(response)); + + } + + public void testTermCheckOnOldVersionClusterManager() throws ExecutionException, InterruptedException { + + setUpCluster(Version.V_2_12_0); + TransportClusterManagerTermCheckTests.Request request = new TransportClusterManagerTermCheckTests.Request(); + + PlainActionFuture listener = new PlainActionFuture<>(); + new TransportClusterManagerTermCheckTests.Action("internal:testAction", transportService, clusterService, threadPool).execute( + request, + listener + ); + + assertThat(transport.capturedRequests().length, equalTo(1)); + CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; + assertTrue(capturedRequest.node.isClusterManagerNode()); + assertThat(capturedRequest.request, equalTo(request)); + assertThat(capturedRequest.action, equalTo("internal:testAction")); + + TransportClusterManagerTermCheckTests.Response response = new TransportClusterManagerTermCheckTests.Response(); + transport.handleResponse(capturedRequest.requestId, response); + assertTrue(listener.isDone()); + assertThat(listener.get(), equalTo(response)); + + } + + private void setUpCluster(Version clusterManagerVersion) { + localNode = new DiscoveryNode( + "local_node", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + remoteNode = new DiscoveryNode( + "remote_node", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + clusterManagerVersion + ); + allNodes = new DiscoveryNode[] { localNode, remoteNode }; + setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); + + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 635939e68de71..58315ba031b84 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -66,6 +66,8 @@ import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.cluster.state.TransportClusterStateAction; +import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; +import org.opensearch.action.admin.cluster.state.term.TransportGetTermVersionAction; import org.opensearch.action.admin.indices.create.CreateIndexAction; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; @@ -2437,6 +2439,18 @@ public void onFailure(final Exception e) { indexNameExpressionResolver ) ); + + actions.put( + GetTermVersionAction.INSTANCE, + new TransportGetTermVersionAction( + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ) + ); + DynamicActionRegistry dynamicActionRegistry = new DynamicActionRegistry(); dynamicActionRegistry.registerUnmodifiableActionMap(actions); client.initialize( From f3d2beee637f63e38c8f26dbcee9f2a82f9c87b6 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Thu, 21 Mar 2024 10:35:24 +0530 Subject: [PATCH 16/74] [Remote Migration] Changes for Primary Relocation during migration (#12494) Changes for Primary Relocation during migration --------- Signed-off-by: Gaurav Bafna --- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../SegmentReplicationClusterSettingIT.java | 8 +- .../MigrationBaseTestCase.java | 23 +- .../RemotePrimaryRelocationIT.java | 223 ++++++++++++++++++ .../RemoteIndexPrimaryRelocationIT.java | 1 - .../RemoteStoreBaseIntegTestCase.java | 121 +++++++++- .../SegmentReplicationSnapshotIT.java | 2 +- ...ransportSegmentReplicationStatsAction.java | 2 +- .../coordination/JoinTaskExecutor.java | 2 +- .../org/opensearch/index/IndexService.java | 35 ++- .../org/opensearch/index/IndexSettings.java | 16 +- .../SegmentReplicationPressureService.java | 7 +- .../index/SegmentReplicationStatsTracker.java | 2 +- .../opensearch/index/engine/EngineConfig.java | 4 +- .../index/engine/InternalEngine.java | 8 +- .../index/engine/NRTReplicationEngine.java | 3 +- .../RemoteStoreStatsTrackerFactory.java | 2 +- .../index/seqno/ReplicationTracker.java | 10 +- .../opensearch/index/shard/IndexShard.java | 198 +++++++++++++--- .../shard/RemoteStoreRefreshListener.java | 2 +- .../opensearch/index/shard/StoreRecovery.java | 35 +-- .../org/opensearch/index/store/Store.java | 4 +- .../index/translog/RemoteFsTranslog.java | 2 +- .../opensearch/index/translog/Translog.java | 2 +- .../opensearch/indices/IndicesService.java | 26 +- .../indices/recovery/MultiFileWriter.java | 2 +- .../recovery/PeerRecoverySourceService.java | 2 +- .../recovery/PeerRecoveryTargetService.java | 7 +- .../recovery/RecoverySourceHandler.java | 10 +- .../RecoverySourceHandlerFactory.java | 3 +- .../indices/recovery/RecoveryTarget.java | 26 +- .../SegmentReplicationSourceService.java | 9 +- .../SegmentReplicationTargetService.java | 12 +- .../remotestore/RemoteStoreNodeAttribute.java | 8 + .../snapshots/SnapshotShardsService.java | 2 +- .../IndexLevelReplicationTests.java | 23 +- .../RecoveryDuringReplicationTests.java | 13 +- .../index/shard/IndexShardTests.java | 6 +- ...overyWithRemoteTranslogOnPrimaryTests.java | 2 +- .../SegmentReplicationIndexShardTests.java | 2 +- .../index/translog/RemoteFsTranslogTests.java | 5 +- ...dicesLifecycleListenerSingleNodeTests.java | 18 +- .../PeerRecoveryTargetServiceTests.java | 6 +- .../indices/recovery/RecoveryTests.java | 7 +- .../recovery/ReplicationCollectionTests.java | 2 +- ...enSearchIndexLevelReplicationTestCase.java | 2 +- .../index/shard/IndexShardTestCase.java | 22 +- 47 files changed, 745 insertions(+), 185 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index c394a1f631690..7e0c1630a76e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -713,7 +713,8 @@ public static final IndexShard newIndexShard( null, () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, nodeId, - null + null, + false ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index f2cb7c9c6bfc8..d2f1e6313db07 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -89,8 +89,8 @@ public void testIndexReplicationSettingOverridesSegRepClusterSetting() throws Ex Index index = resolveIndex(INDEX_NAME); Index anotherIndex = resolveIndex(ANOTHER_INDEX); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNode); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); - assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), true); + assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabledOrRemoteNode(), false); + assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabledOrRemoteNode(), true); } public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Exception { @@ -119,8 +119,8 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex Index index = resolveIndex(INDEX_NAME); Index anotherIndex = resolveIndex(ANOTHER_INDEX); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNode); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), true); - assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); + assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabledOrRemoteNode(), true); + assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabledOrRemoteNode(), false); } public void testReplicationTypesOverrideNotAllowed_IndexAPI() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 88d6f6897ee68..19da668c432cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -8,13 +8,19 @@ package org.opensearch.remotemigration; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; +import java.util.concurrent.ExecutionException; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class MigrationBaseTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; @@ -35,11 +41,10 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) - .put("discovery.initial_state_timeout", "500ms") .build(); } else { logger.info("Adding docrep node"); - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("discovery.initial_state_timeout", "500ms").build(); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); } } @@ -47,4 +52,16 @@ protected Settings nodeSettings(int nodeOrdinal) { protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); } + + protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); + GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); + RepositoryMetadata rmd = res.repositories().get(0); + Settings.Builder settings = Settings.builder() + .put("location", rmd.settings().get("location")) + .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); + assertAcked( + client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java new file mode 100644 index 0000000000000..b1c429a45a1a1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Arrays.asList; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemotePrimaryRelocationIT extends MigrationBaseTestCase { + protected int maximumNumberOfShards() { + return 1; + } + + // ToDo : Fix me when we support migration of replicas + protected int maximumNumberOfReplicas() { + return 0; + } + + protected Collection> nodePlugins() { + return asList(MockTransportService.TestPlugin.class); + } + + public void testMixedModeRelocation() throws Exception { + String docRepNode = internalCluster().startNode(); + Client client = internalCluster().client(docRepNode); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // create shard with 0 replica and 1 shard + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); + ensureGreen("test"); + + AtomicInteger numAutoGenDocs = new AtomicInteger(); + final AtomicBoolean finished = new AtomicBoolean(false); + Thread indexingThread = getIndexingThread(finished, numAutoGenDocs); + + refresh("test"); + + // add remote node in mixed mode cluster + addRemote = true; + String remoteNode = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + String remoteNode2 = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + // assert repo gets registered + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); + assertEquals(1, getRepositoriesResponse.repositories().size()); + + // Index some more docs + int currentDoc = numAutoGenDocs.get(); + int finalCurrentDoc1 = currentDoc; + waitUntil(() -> numAutoGenDocs.get() > finalCurrentDoc1 + 5); + + logger.info("--> relocating from {} to {} ", docRepNode, remoteNode); + client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + + assertEquals(0, clusterHealthResponse.getRelocatingShards()); + assertEquals(remoteNode, primaryNodeName("test")); + logger.info("--> relocation from docrep to remote complete"); + + // Index some more docs + currentDoc = numAutoGenDocs.get(); + int finalCurrentDoc = currentDoc; + waitUntil(() -> numAutoGenDocs.get() > finalCurrentDoc + 5); + + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand("test", 0, remoteNode, remoteNode2)) + .execute() + .actionGet(); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + + assertEquals(0, clusterHealthResponse.getRelocatingShards()); + assertEquals(remoteNode2, primaryNodeName("test")); + + logger.info("--> relocation from remote to remote complete"); + + finished.set(true); + indexingThread.join(); + refresh("test"); + OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); + OpenSearchAssertions.assertHitCount( + client().prepareSearch("test") + .setTrackTotalHits(true)// extra paranoia ;) + .setQuery(QueryBuilders.termQuery("auto", true)) + .get(), + numAutoGenDocs.get() + ); + + } + + public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { + String docRepNode = internalCluster().startNode(); + Client client = internalCluster().client(docRepNode); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // create shard with 0 replica and 1 shard + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); + ensureGreen("test"); + + AtomicInteger numAutoGenDocs = new AtomicInteger(); + final AtomicBoolean finished = new AtomicBoolean(false); + Thread indexingThread = getIndexingThread(finished, numAutoGenDocs); + + refresh("test"); + + // add remote node in mixed mode cluster + addRemote = true; + String remoteNode = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + // assert repo gets registered + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); + assertEquals(1, getRepositoriesResponse.repositories().size()); + + setFailRate(REPOSITORY_NAME, 100); + + logger.info("--> relocating from {} to {} ", docRepNode, remoteNode); + client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, docRepNode, remoteNode)).execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(5)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + + assertTrue(clusterHealthResponse.getRelocatingShards() == 1); + setFailRate(REPOSITORY_NAME, 0); + Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000)); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(45)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + assertTrue(clusterHealthResponse.getRelocatingShards() == 0); + logger.info("--> remote to remote relocation complete"); + finished.set(true); + indexingThread.join(); + refresh("test"); + OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); + OpenSearchAssertions.assertHitCount( + client().prepareSearch("test") + .setTrackTotalHits(true)// extra paranoia ;) + .setQuery(QueryBuilders.termQuery("auto", true)) + .get(), + numAutoGenDocs.get() + ); + } + + private static Thread getIndexingThread(AtomicBoolean finished, AtomicInteger numAutoGenDocs) { + Thread indexingThread = new Thread(() -> { + while (finished.get() == false && numAutoGenDocs.get() < 10_000) { + IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex("test").setSource("auto", true).get(); + numAutoGenDocs.incrementAndGet(); + } + }); + indexingThread.start(); + return indexingThread; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index 869032a84c2c2..67316ed0e6e6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -44,7 +44,6 @@ public Settings indexSettings() { .build(); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9191") public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().startClusterManagerOnlyNode(); super.testPrimaryRelocationWhileIndexing(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 3899c8a80f442..ba90cbe96e157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -28,6 +28,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; @@ -56,8 +57,11 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -187,7 +191,122 @@ protected BulkResponse indexBulk(String indexName, int numDocs) { return client().bulk(bulkRequest).actionGet(); } - private Settings defaultIndexSettings() { + public static Settings remoteStoreClusterSettings(String name, Path path) { + return remoteStoreClusterSettings(name, path, name, path); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put( + buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + segmentRepoType, + translogRepoName, + translogRepoPath, + translogRepoType, + false + ) + ); + return settingsBuilder.build(); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); + return settingsBuilder.build(); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath, + boolean withRateLimiterAttributes + ) { + return buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + ReloadableFsRepository.TYPE, + translogRepoName, + translogRepoPath, + ReloadableFsRepository.TYPE, + withRateLimiterAttributes + ); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType, + boolean withRateLimiterAttributes + ) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + + Settings.Builder settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(segmentRepoTypeAttributeKey, segmentRepoType) + .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) + .put(translogRepoTypeAttributeKey, translogRepoType) + .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(stateRepoTypeAttributeKey, segmentRepoType) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + + if (withRateLimiterAttributes) { + settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) + .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); + } + + return settings.build(); + } + + Settings defaultIndexSettings() { return Settings.builder() .put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java index c649c4ab13e7e..b019bb57743c9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -303,7 +303,7 @@ public void testSnapshotRestoreOnIndexWithSegRepClusterSetting() throws Exceptio // Verify index setting isSegRepEnabled. Index index = resolveIndex(RESTORED_INDEX_NAME); IndicesService indicesService = internalCluster().getInstance(IndicesService.class); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); + assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabledOrRemoteNode(), false); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java index 1b912518d7e04..fc97d67c6c3af 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java @@ -148,7 +148,7 @@ protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplication IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); ShardId shardId = shardRouting.shardId(); - if (indexShard.indexSettings().isSegRepEnabled() == false) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() == false) { return null; } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index bc365b9872037..5d896e392e6bc 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -215,7 +215,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); - if (remoteDN.isEmpty()) { + if (remoteDN.isEmpty() && node.isRemoteStoreNode()) { // This is hit only on cases where we encounter first remote node logger.info("Updating system repository now for remote store"); repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 0909e2d5c8ff0..11dc4474cfa42 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -90,6 +90,7 @@ import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.SimilarityService; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; @@ -99,7 +100,9 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -455,7 +458,10 @@ public synchronized IndexShard createShard( final Consumer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final SegmentReplicationCheckpointPublisher checkpointPublisher, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final RepositoriesService repositoriesService, + final DiscoveryNode targetNode, + @Nullable DiscoveryNode sourceNode ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -484,10 +490,26 @@ public synchronized IndexShard createShard( warmer.warm(reader, shard, IndexService.this.indexSettings); } }; - Store remoteStore = null; - if (this.indexSettings.isRemoteStoreEnabled()) { - Directory remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); + boolean seedRemote = false; + if (targetNode.isRemoteStoreNode()) { + final Directory remoteDirectory; + if (this.indexSettings.isRemoteStoreEnabled()) { + remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); + } else { + if (sourceNode != null && sourceNode.isRemoteStoreNode() == false) { + if (routing.primary() == false) { + throw new IllegalStateException("Can't migrate a remote shard to replica before primary " + routing.shardId()); + } + logger.info("DocRep shard {} is migrating to remote", shardId); + seedRemote = true; + } + remoteDirectory = ((RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory).newDirectory( + RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), + this.indexSettings.getUUID(), + shardId + ); + } remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); } @@ -523,12 +545,13 @@ public synchronized IndexShard createShard( retentionLeaseSyncer, circuitBreakerService, translogFactorySupplier, - this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, + this.indexSettings.isSegRepEnabledOrRemoteNode() ? checkpointPublisher : null, remoteStore, remoteStoreStatsTrackerFactory, clusterRemoteTranslogBufferIntervalSupplier, nodeEnv.nodeId(), - recoverySettings + recoverySettings, + seedRemote ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 5aaea2c498701..7e49726c259cb 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -52,6 +52,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.search.pipeline.SearchPipelineService; import java.util.Arrays; @@ -1221,17 +1222,20 @@ public int getNumberOfReplicas() { /** * Returns true if segment replication is enabled on the index. + * + * Every shard on a remote node would also have SegRep enabled even without + * proper index setting during the migration. */ - public boolean isSegRepEnabled() { - return ReplicationType.SEGMENT.equals(replicationType); + public boolean isSegRepEnabledOrRemoteNode() { + return ReplicationType.SEGMENT.equals(replicationType) || isRemoteNode(); } public boolean isSegRepLocalEnabled() { - return isSegRepEnabled() && !isRemoteStoreEnabled(); + return isSegRepEnabledOrRemoteNode() && !isRemoteStoreEnabled(); } public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabled() && isRemoteStoreEnabled(); + return isSegRepEnabledOrRemoteNode() && isRemoteStoreEnabled(); } /** @@ -1241,6 +1245,10 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } + public boolean isRemoteNode() { + return RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); + } + /** * Returns if remote translog store is enabled for this index. */ diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index ce38dd3bb236c..297fe093f7f4e 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -145,7 +145,9 @@ public void isSegrepLimitBreached(ShardId shardId) { final IndexService indexService = indicesService.indexService(shardId.getIndex()); if (indexService != null) { final IndexShard shard = indexService.getShard(shardId.id()); - if (isSegmentReplicationBackpressureEnabled && shard.indexSettings().isSegRepEnabled() && shard.routingEntry().primary()) { + if (isSegmentReplicationBackpressureEnabled + && shard.indexSettings().isSegRepEnabledOrRemoteNode() + && shard.routingEntry().primary()) { validateReplicationGroup(shard); } } @@ -264,7 +266,8 @@ protected void runInternal() { stats.getShardStats().get(shardId).getReplicaStats() ); final IndexService indexService = pressureService.indicesService.indexService(shardId.getIndex()); - if (indexService.getIndexSettings() != null && indexService.getIndexSettings().isSegRepEnabled() == false) { + if (indexService.getIndexSettings() != null + && indexService.getIndexSettings().isSegRepEnabledOrRemoteNode() == false) { return; } final IndexShard primaryShard = indexService.getShard(shardId.getId()); diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java index f5fc8aa1c1eea..e48a76c438057 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java @@ -45,7 +45,7 @@ public SegmentReplicationStats getStats() { Map stats = new HashMap<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { - if (indexShard.indexSettings().isSegRepEnabled() && indexShard.routingEntry().primary()) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.routingEntry().primary()) { stats.putIfAbsent(indexShard.shardId(), getStatsForShard(indexShard)); } } diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index bf3e10d684c94..8106b65bddeec 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -244,7 +244,7 @@ private static void doValidateCodecSettings(final String codec) { * Creates a new {@link org.opensearch.index.engine.EngineConfig} */ private EngineConfig(Builder builder) { - if (builder.isReadOnlyReplica && builder.indexSettings.isSegRepEnabled() == false) { + if (builder.isReadOnlyReplica && builder.indexSettings.isSegRepEnabledOrRemoteNode() == false) { throw new IllegalArgumentException("Shard can only be wired as a read only replica with Segment Replication enabled"); } this.shardId = builder.shardId; @@ -491,7 +491,7 @@ public LongSupplier getPrimaryTermSupplier() { * @return true if this engine should be wired as read only. */ public boolean isReadOnlyReplica() { - return indexSettings.isSegRepEnabled() && isReadOnlyReplica; + return indexSettings.isSegRepEnabledOrRemoteNode() && isReadOnlyReplica; } /** diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index a25ec95f58e05..7bacec22fc850 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -710,7 +710,7 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) final OpVsLuceneDocStatus status; VersionValue versionValue = getVersionFromMap(op.uid().bytes()); assert incrementVersionLookup(); - boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode(); if (versionValue != null) { status = compareOpToVersionMapOnSeqNo(op.id(), op.seqNo(), op.primaryTerm(), versionValue); } else { @@ -1005,7 +1005,7 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO assert maxSeqNoOfUpdatesOrDeletes < index.seqNo() : index.seqNo() + ">=" + maxSeqNoOfUpdatesOrDeletes; plan = IndexingStrategy.optimizedAppendOnly(index.version(), 0); } else { - boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode(); versionMap.enforceSafeAccess(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { @@ -1452,7 +1452,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws // See testRecoveryWithOutOfOrderDelete for an example of peer recovery plan = DeletionStrategy.processButSkipLucene(false, delete.version()); } else { - boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { if (segRepEnabled) { @@ -1868,7 +1868,7 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { // only after the active reader is updated. This ensures that a flush does not wipe out a required commit point file // while we are // in refresh listeners. - final GatedCloseable latestCommit = engineConfig.getIndexSettings().isSegRepEnabled() + final GatedCloseable latestCommit = engineConfig.getIndexSettings().isSegRepEnabledOrRemoteNode() ? acquireLastIndexCommit(false) : null; commitIndexWriter(indexWriter, translogManager.getTranslogUUID()); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index ed8dba2f8902d..1e1825e1f8ace 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -436,7 +436,8 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { This is not required for remote store implementations given on failover the replica re-syncs with the store during promotion. */ - if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false) { + if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false + && engineConfig.getIndexSettings().isRemoteNode() == false) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java index 9a146be96c9de..e4c7eb56d02c6 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java @@ -68,7 +68,7 @@ public RemoteStoreStatsTrackerFactory(ClusterService clusterService, Settings se @Override public void afterIndexShardCreated(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteStoreEnabled() == false) { + if (indexShard.indexSettings().isRemoteStoreEnabled() == false && indexShard.indexSettings().isRemoteNode() == false) { return; } ShardId shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 7b9c1d3aa548f..0e625e9f30320 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1059,7 +1059,7 @@ public ReplicationTracker( this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings()); this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; - this.latestReplicationCheckpoint = indexSettings.isSegRepEnabled() ? ReplicationCheckpoint.empty(shardId) : null; + this.latestReplicationCheckpoint = indexSettings.isSegRepEnabledOrRemoteNode() ? ReplicationCheckpoint.empty(shardId) : null; assert Version.V_EMPTY.equals(indexSettings.getIndexVersionCreated()) == false; assert invariant(); } @@ -1173,7 +1173,7 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI * @param visibleCheckpoint the visible checkpoint */ public synchronized void updateVisibleCheckpointForShard(final String allocationId, final ReplicationCheckpoint visibleCheckpoint) { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); assert primaryMode; assert handoffInProgress == false; assert invariant(); @@ -1217,7 +1217,7 @@ public synchronized void updateVisibleCheckpointForShard(final String allocation * @param checkpoint {@link ReplicationCheckpoint} */ public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint checkpoint) { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); if (checkpoint.equals(latestReplicationCheckpoint) == false) { this.latestReplicationCheckpoint = checkpoint; } @@ -1269,7 +1269,7 @@ && isPrimaryRelocation(allocationId) == false * @param checkpoint {@link ReplicationCheckpoint} */ public synchronized void startReplicationLagTimers(ReplicationCheckpoint checkpoint) { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); if (checkpoint.equals(latestReplicationCheckpoint) == false) { this.latestReplicationCheckpoint = checkpoint; } @@ -1294,7 +1294,7 @@ && isPrimaryRelocation(e.getKey()) == false * V2 - Set of {@link SegmentReplicationShardStats} per shard in this primary's replication group. */ public synchronized Set getSegmentReplicationStats() { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); if (primaryMode) { return this.checkpoints.entrySet() .stream() diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 977155a1cbb72..72ce858661031 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -234,6 +234,9 @@ import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_SEEDED; +import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_UNSEEDED; +import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_NON_MIGRATING; import static org.opensearch.index.translog.Translog.Durability; import static org.opensearch.index.translog.Translog.TRANSLOG_UUID_KEY; @@ -346,6 +349,12 @@ Runnable getGlobalCheckpointSyncer() { private final List internalRefreshListener = new ArrayList<>(); private final RemoteStoreFileDownloader fileDownloader; private final RecoverySettings recoverySettings; + /* + On source doc rep node, It will be DOCREP_NON_MIGRATING. + On source remote node , it will be REMOTE_MIGRATING_SEEDED when relocating from remote node + On source remote node , it will be REMOTE_MIGRATING_UNSEEDED when relocating from docrep node + */ + private final ShardMigrationState shardMigrationState; public IndexShard( final ShardRouting shardRouting, @@ -374,7 +383,8 @@ public IndexShard( final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, final Supplier clusterRemoteTranslogBufferIntervalSupplier, final String nodeId, - final RecoverySettings recoverySettings + final RecoverySettings recoverySettings, + boolean seedRemote ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -394,7 +404,7 @@ public IndexShard( logger, threadPool, this::getEngine, - indexSettings.isRemoteTranslogStoreEnabled(), + indexSettings.isRemoteNode(), () -> getRemoteTranslogUploadBufferInterval(clusterRemoteTranslogBufferIntervalSupplier) ); this.mapperService = mapperService; @@ -472,6 +482,7 @@ public boolean shouldCache(Query query) { this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; this.recoverySettings = recoverySettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); + this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); } public ThreadPool getThreadPool() { @@ -482,6 +493,20 @@ public Store store() { return this.store; } + public boolean isMigratingToRemote() { + // set it true only if shard is remote, but index setting doesn't say so + return shardMigrationState == REMOTE_MIGRATING_UNSEEDED || shardMigrationState == REMOTE_MIGRATING_SEEDED; + } + + public boolean shouldSeedRemoteStore() { + // set it true only if relocating from docrep to remote store + return shardMigrationState == REMOTE_MIGRATING_UNSEEDED; + } + + public boolean isRemoteSeeded() { + return shardMigrationState == REMOTE_MIGRATING_SEEDED; + } + public Store remoteStore() { return this.remoteStore; } @@ -625,7 +650,7 @@ public void updateShardState( // Flush here after relocation of primary, so that replica get all changes from new primary rather than waiting for more // docs to get indexed. - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { flush(new FlushRequest().waitIfOngoing(true).force(true)); } } else if (currentRouting.primary() @@ -705,7 +730,7 @@ public void updateShardState( + newRouting; assert getOperationPrimaryTerm() == newPrimaryTerm; try { - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; ReplicationTimer timer = new ReplicationTimer(); @@ -725,7 +750,7 @@ public void updateShardState( } replicationTracker.activatePrimaryMode(getLocalCheckpoint()); - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { // force publish a checkpoint once in primary mode so that replicas not caught up to previous primary // are brought up to date. checkpointPublisher.publish(this, getLatestReplicationCheckpoint()); @@ -839,8 +864,8 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta * relocated. After all operations are successfully blocked, performSegRep is executed followed by target relocation * handoff. * + * @param consumer a {@link Runnable} that is executed after performSegRep * @param performSegRep a {@link Runnable} that is executed after operations are blocked - * @param consumer a {@link Runnable} that is executed after performSegRep * @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation * @throws IllegalStateException if the relocation target is no longer part of the replication group * @throws InterruptedException if blocking operations is interrupted @@ -858,7 +883,8 @@ public void relocated( indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { forceRefreshes.close(); - boolean syncTranslog = isRemoteTranslogEnabled() && Durability.ASYNC == indexSettings.getTranslogDurability(); + boolean syncTranslog = (isRemoteTranslogEnabled() || this.isMigratingToRemote()) + && Durability.ASYNC == indexSettings.getTranslogDurability(); // Since all the index permits are acquired at this point, the translog buffer will not change. // It is safe to perform sync of translogs now as this will ensure for remote-backed indexes, the // translogs has been uploaded to the remote store. @@ -881,6 +907,7 @@ public void relocated( : "in-flight operations in progress while moving shard state to relocated"; performSegRep.run(); + /* * We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a * network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations. @@ -1041,7 +1068,7 @@ private Engine.IndexResult applyIndexOperation( // For Segment Replication enabled replica shards we can be skip parsing the documents as we directly copy segments from primary // shard. - if (indexSettings.isSegRepEnabled() && routingEntry().primary() == false) { + if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary() == false) { Engine.Index index = new Engine.Index( new Term(IdFieldMapper.NAME, Uid.encodeId(id)), new ParsedDocument(null, null, id, null, null, sourceToParse.source(), sourceToParse.getMediaType(), null), @@ -1240,7 +1267,7 @@ public Engine.DeleteResult applyDeleteOperationOnPrimary( } public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long opPrimaryTerm, long version, String id) throws IOException { - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { final Engine.Delete delete = new Engine.Delete( id, new Term(IdFieldMapper.NAME, Uid.encodeId(id)), @@ -1435,12 +1462,12 @@ public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean inclu SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes, includeUnloadedSegments); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); // Populate remote_store stats only if the index is remote store backed - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings().isRemoteNode()) { segmentsStats.addRemoteSegmentStats( new RemoteSegmentStats(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId).stats()) ); } - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { segmentsStats.addReplicationStats(getReplicationStats()); } return segmentsStats; @@ -1457,7 +1484,7 @@ public FieldDataStats fieldDataStats(String... fields) { public TranslogStats translogStats() { TranslogStats translogStats = getEngine().translogManager().getTranslogStats(); // Populate remote_store stats only if the index is remote store backed - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteNode()) { translogStats.addRemoteTranslogStats( new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats()) ); @@ -1496,7 +1523,7 @@ public void flush(FlushRequest request) { * {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details */ public void trimTranslog() { - if (isRemoteTranslogEnabled()) { + if (indexSettings.isRemoteNode()) { return; } verifyNotClosed(); @@ -1661,7 +1688,7 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { * */ public Tuple, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); // do not close the snapshot - caller will close it. GatedCloseable snapshot = null; @@ -1720,7 +1747,7 @@ ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) th * @return - True if the shard is able to perform segment replication. */ public boolean isSegmentReplicationAllowed() { - if (indexSettings.isSegRepEnabled() == false) { + if (indexSettings.isSegRepEnabledOrRemoteNode() == false) { logger.trace("Attempting to perform segment replication when it is not enabled on the index"); return false; } @@ -2016,7 +2043,7 @@ public void close(String reason, boolean flushEngine, boolean deleted) throws IO ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003 */ public RemoteSegmentStoreDirectory getRemoteDirectory() { - assert indexSettings.isRemoteStoreEnabled(); + assert indexSettings.isRemoteNode(); assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); @@ -2028,8 +2055,8 @@ public RemoteSegmentStoreDirectory getRemoteDirectory() { * Returns true iff it is able to verify that remote segment store * is in sync with local */ - boolean isRemoteSegmentStoreInSync() { - assert indexSettings.isRemoteStoreEnabled(); + public boolean isRemoteSegmentStoreInSync() { + assert indexSettings.isRemoteNode(); try { RemoteSegmentStoreDirectory directory = getRemoteDirectory(); if (directory.readLatestMetadataFile() != null) { @@ -2059,6 +2086,46 @@ boolean isRemoteSegmentStoreInSync() { return false; } + public void waitForRemoteStoreSync() { + waitForRemoteStoreSync(() -> {}); + } + + /* + Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout + Calls onProgress on seeing an increased file count on remote + */ + public void waitForRemoteStoreSync(Runnable onProgress) { + assert indexSettings.isRemoteNode(); + RemoteSegmentStoreDirectory directory = getRemoteDirectory(); + int segmentUploadeCount = 0; + if (shardRouting.primary() == false) { + return; + } + long startNanos = System.nanoTime(); + + while (System.nanoTime() - startNanos < getRecoverySettings().internalRemoteUploadTimeout().nanos()) { + try { + if (isRemoteSegmentStoreInSync()) { + break; + } else { + if (directory.getSegmentsUploadedToRemoteStore().size() > segmentUploadeCount) { + onProgress.run(); + logger.debug("Uploaded segment count {}", directory.getSegmentsUploadedToRemoteStore().size()); + segmentUploadeCount = directory.getSegmentsUploadedToRemoteStore().size(); + } + try { + Thread.sleep(TimeValue.timeValueSeconds(30).seconds()); + } catch (InterruptedException ie) { + throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); + } + } + } catch (AlreadyClosedException e) { + // There is no point in waiting as shard is now closed . + return; + } + } + } + public void preRecovery() { final IndexShardState currentState = this.state; // single volatile read if (currentState == IndexShardState.CLOSED) { @@ -2203,7 +2270,7 @@ public long recoverLocallyAndFetchStartSeqNo(boolean localTranslog) { * @return the starting sequence number from which the recovery should start. */ private long recoverLocallyUptoLastCommit() { - assert isRemoteTranslogEnabled() : "Remote translog store is not enabled"; + assert indexSettings.isRemoteNode() : "Remote translog store is not enabled"; long seqNo; validateLocalRecoveryState(); @@ -2449,7 +2516,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b synchronized (engineMutex) { assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded()) { // Download missing segments from remote segment store. if (syncFromRemote) { syncSegmentsFromRemoteSegmentStore(false); @@ -2488,7 +2555,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b onNewEngine(newEngine); currentEngineReference.set(newEngine); - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { // set initial replication checkpoints into tracker. updateReplicationCheckpoint(); } @@ -2900,7 +2967,7 @@ public Translog.Snapshot getHistoryOperations(String reason, long startingSeqNo, * This method should only be invoked if Segment Replication or Remote Store is not enabled. */ public Translog.Snapshot getHistoryOperationsFromTranslog(long startingSeqNo, long endSeqNo) throws IOException { - assert (indexSettings.isSegRepEnabled() || indexSettings.isRemoteStoreEnabled()) == false + assert indexSettings.isSegRepEnabledOrRemoteNode() == false : "unsupported operation for segment replication enabled indices or remote store backed indices"; return getEngine().translogManager().newChangesSnapshot(startingSeqNo, endSeqNo, true); } @@ -3067,7 +3134,7 @@ public Set getReplicationStatsForTrackedReplicas() } public ReplicationStats getReplicationStats() { - if (indexSettings.isSegRepEnabled() && routingEntry().primary()) { + if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary()) { final Set stats = getReplicationStatsForTrackedReplicas(); long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); @@ -3446,7 +3513,14 @@ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext p + "] does not contain relocation target [" + routingEntry() + "]"; - assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint() + String allocationId = routingEntry().allocationId().getId(); + if (isRemoteStoreEnabled() || isMigratingToRemote()) { + // For remote backed indexes, old primary may not have updated value of local checkpoint of new primary. + // But the new primary is always updated with data in remote sore and is at par with old primary. + // So, we can use a stricter check where local checkpoint of new primary is checked against that of old primary. + allocationId = primaryContext.getRoutingTable().primaryShard().allocationId().getId(); + } + assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(allocationId).getLocalCheckpoint() || indexSettings().getTranslogDurability() == Durability.ASYNC : "local checkpoint [" + getLocalCheckpoint() + "] does not match checkpoint from primary context [" @@ -3459,7 +3533,7 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingE } private void postActivatePrimaryMode() { - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteNode()) { // We make sure to upload translog (even if it does not contain any operations) to remote translog. // This helps to get a consistent state in remote store where both remote segment store and remote // translog contains data. @@ -3846,14 +3920,14 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.clear(); internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); - if (indexSettings.isSegRepEnabled()) { + if (indexSettings.isSegRepEnabledOrRemoteNode()) { internalRefreshListener.add(new ReplicationCheckpointUpdater()); } if (this.checkpointPublisher != null && shardRouting.primary() && indexSettings.isSegRepLocalEnabled()) { internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); } - if (isRemoteStoreEnabled()) { + if (isRemoteStoreEnabled() || isMigratingToRemote()) { internalRefreshListener.add( new RemoteStoreRefreshListener( this, @@ -3867,10 +3941,15 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro With segment replication enabled for primary relocation, recover replica shard initially as read only and change to a writeable engine during relocation handoff after a round of segment replication. */ - boolean isReadOnlyReplica = indexSettings.isSegRepEnabled() + boolean isReadOnlyReplica = indexSettings.isSegRepEnabledOrRemoteNode() && (shardRouting.primary() == false || (shardRouting.isRelocationTarget() && recoveryState.getStage() != RecoveryState.Stage.FINALIZE)); + // For mixed mode, when relocating from doc rep to remote node, we use a writeable engine + if (shouldSeedRemoteStore()) { + isReadOnlyReplica = false; + } + return this.engineConfigFactory.newEngineConfig( shardId, threadPool, @@ -3895,7 +3974,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro this::getOperationPrimaryTerm, tombstoneDocSupplier(), isReadOnlyReplica, - this::isStartedPrimary, + this::enableUploadToRemoteTranslog, translogFactorySupplier.apply(indexSettings, shardRouting), isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for // timeseries @@ -3916,7 +3995,24 @@ public boolean isRemoteTranslogEnabled() { * translog uploads. */ public boolean isStartedPrimary() { - return getReplicationTracker().isPrimaryMode() && state() == IndexShardState.STARTED; + return (getReplicationTracker().isPrimaryMode() && state() == IndexShardState.STARTED); + } + + public boolean enableUploadToRemoteTranslog() { + return isStartedPrimary() || (shouldSeedRemoteStore() && hasOneRemoteSegmentSyncHappened()); + } + + private boolean hasOneRemoteSegmentSyncHappened() { + assert indexSettings.isRemoteNode(); + // We upload remote translog only after one remote segment upload in case of migration + RemoteSegmentStoreDirectory rd = getRemoteDirectory(); + AtomicBoolean segment_n_uploaded = new AtomicBoolean(false); + rd.getSegmentsUploadedToRemoteStore().forEach((key, value) -> { + if (key.startsWith("segments")) { + segment_n_uploaded.set(true); + } + }); + return segment_n_uploaded.get(); } /** @@ -4229,7 +4325,7 @@ private void innerAcquireReplicaOperationPermit( ); // With Segment Replication enabled, we never want to reset a replica's engine unless // it is promoted to primary. - if (currentGlobalCheckpoint < maxSeqNo && indexSettings.isSegRepEnabled() == false) { + if (currentGlobalCheckpoint < maxSeqNo && indexSettings.isSegRepEnabledOrRemoteNode() == false) { resetEngineToGlobalCheckpoint(); } else { getEngine().translogManager().rollTranslogGeneration(); @@ -4521,10 +4617,10 @@ public final boolean isSearchIdle() { public final boolean isSearchIdleSupported() { // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh // task continues to upload to remote store periodically. - if (isRemoteTranslogEnabled()) { + if (isRemoteTranslogEnabled() || indexSettings.isRemoteNode()) { return false; } - return indexSettings.isSegRepEnabled() == false || indexSettings.getNumberOfReplicas() == 0; + return indexSettings.isSegRepEnabledOrRemoteNode() == false || indexSettings.getNumberOfReplicas() == 0; } /** @@ -4786,10 +4882,10 @@ public void close() throws IOException { } }; IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine)); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded()) { syncSegmentsFromRemoteSegmentStore(false); } - if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { + if ((indexSettings.isRemoteTranslogStoreEnabled() || this.isRemoteSeeded()) && shardRouting.primary()) { syncRemoteTranslogAndUpdateGlobalCheckpoint(); } newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker))); @@ -4808,7 +4904,9 @@ public void close() throws IOException { // of truth for translog, we play all translogs that exists locally. Otherwise, the recoverUpto happens upto global checkpoint. // We also replay all local translog ops with Segment replication, because on engine swap our local translog may // hold more ops than the global checkpoint. - long recoverUpto = this.isRemoteTranslogEnabled() || indexSettings().isSegRepEnabled() ? Long.MAX_VALUE : globalCheckpoint; + long recoverUpto = this.isRemoteTranslogEnabled() || indexSettings().isSegRepEnabledOrRemoteNode() + ? Long.MAX_VALUE + : globalCheckpoint; newEngineReference.get() .translogManager() .recoverFromTranslog(translogRunner, newEngineReference.get().getProcessedLocalCheckpoint(), recoverUpto); @@ -4837,6 +4935,16 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { RemoteFsTranslog.cleanup(repository, shardId, getThreadPool()); } + /* + Cleans up remote store and remote translog contents. + This is used in remote store migration, where we want to clean up all stale segment and translog data + and seed the remote store afresh + */ + public void deleteRemoteStoreContents() throws IOException { + deleteTranslogFilesFromRemoteTranslog(); + getRemoteDirectory().deleteStaleSegments(0); + } + public void syncTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; @@ -4862,7 +4970,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOE public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { boolean syncSegmentSuccess = false; long startTimeMs = System.currentTimeMillis(); - assert indexSettings.isRemoteStoreEnabled(); + assert indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded(); logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); // We need to call RemoteSegmentStoreDirectory.init() in order to get latest metadata of the files that @@ -5127,4 +5235,20 @@ private TimeValue getRemoteTranslogUploadBufferInterval(Supplier clus public AsyncIOProcessor getTranslogSyncProcessor() { return translogSyncProcessor; } + + enum ShardMigrationState { + REMOTE_NON_MIGRATING, + REMOTE_MIGRATING_SEEDED, + REMOTE_MIGRATING_UNSEEDED, + DOCREP_NON_MIGRATING + } + + static ShardMigrationState getShardMigrationState(IndexSettings indexSettings, boolean shouldSeed) { + if (indexSettings.isRemoteNode() && indexSettings.isRemoteStoreEnabled()) { + return REMOTE_NON_MIGRATING; + } else if (indexSettings.isRemoteNode()) { + return shouldSeed ? REMOTE_MIGRATING_UNSEEDED : REMOTE_MIGRATING_SEEDED; + } + return ShardMigrationState.DOCREP_NON_MIGRATING; + } } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 7bb80b736693f..fb96102bc6094 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -528,7 +528,7 @@ private void initializeRemoteDirectoryOnTermUpdate() throws IOException { * @return true iff the shard is a started with primary mode true or it is local or snapshot recovery. */ private boolean isReadyForUpload() { - boolean isReady = indexShard.isStartedPrimary() || isLocalOrSnapshotRecovery(); + boolean isReady = indexShard.isStartedPrimary() || isLocalOrSnapshotRecovery() || indexShard.shouldSeedRemoteStore(); if (isReady == false) { StringBuilder sb = new StringBuilder("Skipped syncing segments with"); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 3faef2da05320..5f09b1a0802f3 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -38,13 +38,11 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; -import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -194,7 +192,7 @@ void recoverFromLocalShards( // copied segments - we will also see them in stats etc. indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { - waitForRemoteStoreSync(indexShard); + indexShard.waitForRemoteStoreSync(); if (indexShard.isRemoteSegmentStoreInSync() == false) { throw new IndexShardRecoveryException( indexShard.shardId(), @@ -436,7 +434,7 @@ void recoverFromSnapshotAndRemoteStore( indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { - waitForRemoteStoreSync(indexShard); + indexShard.waitForRemoteStoreSync(); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -722,7 +720,7 @@ private void restore( indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { - waitForRemoteStoreSync(indexShard); + indexShard.waitForRemoteStoreSync(); if (indexShard.isRemoteSegmentStoreInSync() == false) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); return; @@ -796,31 +794,4 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO ); store.associateIndexWithNewTranslog(translogUUID); } - - /* - Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout - */ - private void waitForRemoteStoreSync(IndexShard indexShard) { - if (indexShard.shardRouting.primary() == false) { - return; - } - long startNanos = System.nanoTime(); - - while (System.nanoTime() - startNanos < indexShard.getRecoverySettings().internalRemoteUploadTimeout().nanos()) { - try { - if (indexShard.isRemoteSegmentStoreInSync()) { - break; - } else { - try { - Thread.sleep(TimeValue.timeValueMinutes(1).seconds()); - } catch (InterruptedException ie) { - throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); - } - } - } catch (AlreadyClosedException e) { - // There is no point in waiting as shard is now closed . - return; - } - } - } } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 1930a37daa400..0992d86d6f0aa 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -385,7 +385,7 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio * @return {@link Map} map file name to {@link StoreFileMetadata}. */ public Map getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode(); failIfCorrupted(); try { return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; @@ -893,7 +893,7 @@ public void beforeClose() { * @throws IOException when there is an IO error committing. */ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, long processedCheckpoint) throws IOException { - assert indexSettings.isSegRepEnabled(); + assert indexSettings.isSegRepEnabledOrRemoteNode() || indexSettings.isRemoteNode(); metadataLock.writeLock().lock(); try { final Map userData = new HashMap<>(latestSegmentInfos.getUserData()); diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 7b969a37e4aa6..43eec01b2d365 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -334,7 +334,7 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc } private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws IOException { - logger.trace("uploading translog for {} {}", primaryTerm, generation); + logger.trace("uploading translog for primary term {} generation {}", primaryTerm, generation); try ( TranslogCheckpointTransferSnapshot transferSnapshotProvider = new TranslogCheckpointTransferSnapshot.Builder( primaryTerm, diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 9f877e87415dd..e78300e368099 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -525,7 +525,7 @@ TranslogWriter createWriter( tragedy, persistedSequenceNumberConsumer, bigArrays, - indexSettings.isRemoteTranslogStoreEnabled() + indexSettings.isRemoteNode() ); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 0e64894e6f708..9bc81c1826c2d 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -150,6 +150,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; @@ -201,6 +202,7 @@ import static org.opensearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.opensearch.index.IndexService.IndexCreationContext.METADATA_VERIFICATION; import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteDataAttributePresent; import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; /** @@ -503,7 +505,12 @@ protected void closeInternal() { this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; - this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool, remoteStoreStatsTrackerFactory); + this.translogFactorySupplier = getTranslogFactorySupplier( + repositoriesServiceSupplier, + threadPool, + remoteStoreStatsTrackerFactory, + settings + ); this.searchRequestStats = searchRequestStats; this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() @@ -533,7 +540,8 @@ private void onRefreshIntervalUpdate(TimeValue clusterDefaultRefreshInterval) { private static BiFunction getTranslogFactorySupplier( Supplier repositoriesServiceSupplier, ThreadPool threadPool, - RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + Settings settings ) { return (indexSettings, shardRouting) -> { if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { @@ -543,6 +551,13 @@ private static BiFunction getTrans indexSettings.getRemoteStoreTranslogRepository(), remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) ); + } else if (isRemoteDataAttributePresent(settings) && shardRouting.primary()) { + return new RemoteBlobStoreInternalTranslogFactory( + repositoriesServiceSupplier, + threadPool, + RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(indexSettings.getNodeSettings()), + remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) + ); } return new InternalTranslogFactory(); }; @@ -932,7 +947,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isRemoteSnapshot()) { return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); } - if (idxSettings.isSegRepEnabled()) { + if (idxSettings.isSegRepEnabledOrRemoteNode() || idxSettings.isRemoteNode()) { return new NRTReplicationEngineFactory(); } return new InternalEngineFactory(); @@ -1032,7 +1047,10 @@ public IndexShard createShard( globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + repositoriesService, + targetNode, + sourceNode ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { diff --git a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java index 29ee097d36cac..fac6924435cf3 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java @@ -161,7 +161,7 @@ private void innerWriteFileChunk(StoreFileMetadata fileMetadata, long position, + "] in " + Arrays.toString(store.directory().listAll()); // With Segment Replication, we will fsync after a full commit has been received. - if (store.indexSettings().isSegRepEnabled() == false) { + if (store.indexSettings().isSegRepEnabledOrRemoteNode() == false) { store.directory().sync(Collections.singleton(temporaryFileName)); } IndexOutput remove = removeOpenIndexOutputs(name); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index cb2bedf00de99..30f517fda9931 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -377,7 +377,7 @@ private Tuple createRecovery request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime), - shard.isRemoteTranslogEnabled() + shard.isRemoteTranslogEnabled() || request.targetNode().isRemoteStoreNode() ); handler = RecoverySourceHandlerFactory.create(shard, recoveryTarget, request, recoverySettings); return Tuple.tuple(handler, recoveryTarget); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 4232d32987e86..227496f72f83d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -189,7 +189,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.start( - new RecoveryTarget(indexShard, sourceNode, listener), + new RecoveryTarget(indexShard, sourceNode, listener, threadPool), recoverySettings.activityTimeout() ); // we fork off quickly here and go async but this is called from the cluster state applier thread too and that can cause @@ -246,7 +246,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); - if (hasRemoteSegmentStore) { + if (hasRemoteSegmentStore || indexShard.isRemoteSeeded()) { // ToDo: This is a temporary mitigation to not fail the peer recovery flow in case there is // an exception while downloading segments from remote store. For remote backed indexes, we // plan to revamp this flow so that node-node segment copy will not happen. @@ -260,7 +260,8 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); } } - final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); + final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false + && indexShard.indexSettings().isRemoteNode(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); final boolean verifyTranslog = (hasRemoteTranslog || hasNoTranslog || hasRemoteSegmentStore) == false; final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!hasRemoteTranslog); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 7996c48b2b04b..abf9b1aaeb2cc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -841,9 +841,11 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); - final Runnable forceSegRepRunnable = shard.indexSettings().isSegRepEnabled() - ? recoveryTarget::forceSegmentFileSync - : () -> {}; + final Runnable forceSegRepRunnable = shard.indexSettings().isSegRepEnabledOrRemoteNode() + || (request.sourceNode().isRemoteStoreNode() && request.targetNode().isRemoteStoreNode()) + ? recoveryTarget::forceSegmentFileSync + : () -> {}; + // TODO: make relocated async // this acquires all IndexShard operation permits and will thus delay new recoveries until it is done cancellableThreads.execute( @@ -855,7 +857,7 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis */ } else { // Force round of segment replication to update its checkpoint to primary's - if (shard.indexSettings().isSegRepEnabled()) { + if (shard.indexSettings().isSegRepEnabledOrRemoteNode()) { cancellableThreads.execute(recoveryTarget::forceSegmentFileSync); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java index ea13ca18bbfca..0ccb1ac2133cf 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java @@ -23,7 +23,8 @@ public static RecoverySourceHandler create( StartRecoveryRequest request, RecoverySettings recoverySettings ) { - boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false && shard.isRemoteTranslogEnabled(); + boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false + && (shard.isRemoteTranslogEnabled() || shard.isMigratingToRemote()); if (isReplicaRecoveryWithRemoteTranslog) { return new RemoteStorePeerRecoverySourceHandler( shard, diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index f3b5d0d790f83..16311d5d2cfb7 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -61,6 +61,7 @@ import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTarget; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.channels.FileChannel; @@ -87,16 +88,20 @@ public class RecoveryTarget extends ReplicationTarget implements RecoveryTargetH // latch that can be used to blockingly wait for RecoveryTarget to be closed private final CountDownLatch closedLatch = new CountDownLatch(1); + private final ThreadPool threadPool; + /** * Creates a new recovery target object that represents a recovery to the provided shard. * - * @param indexShard local shard where we want to recover to - * @param sourceNode source node of the recovery where we recover from - * @param listener called when recovery is completed/failed + * @param indexShard local shard where we want to recover to + * @param sourceNode source node of the recovery where we recover from + * @param listener called when recovery is completed/failed + * @param threadPool threadpool instance */ - public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, ReplicationListener listener) { + public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, ReplicationListener listener, ThreadPool threadPool) { super("recovery_status", indexShard, indexShard.recoveryState().getIndex(), listener); this.sourceNode = sourceNode; + this.threadPool = threadPool; indexShard.recoveryStats().incCurrentAsTarget(); final String tempFilePrefix = getPrefix() + UUIDs.randomBase64UUID() + "."; this.multiFileWriter = new MultiFileWriter(indexShard.store(), stateIndex, tempFilePrefix, logger, this::ensureRefCount); @@ -108,7 +113,7 @@ public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, Replicati * @return a copy of this recovery target */ public RecoveryTarget retryCopy() { - return new RecoveryTarget(indexShard, sourceNode, listener); + return new RecoveryTarget(indexShard, sourceNode, listener, threadPool); } public String source() { @@ -209,6 +214,15 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener { indexShard.refresh("remote store migration"); }); + indexShard.waitForRemoteStoreSync(this::setLastAccessTime); + logger.info("Remote Store is now seeded for {}", indexShard.shardId()); + } return null; }); } @@ -360,7 +374,7 @@ public void cleanFiles( // Replicas for segment replication or remote snapshot indices do not create // their own commit points and therefore do not modify the commit user data // in their store. In these cases, reuse the primary's translog UUID. - final boolean reuseTranslogUUID = indexShard.indexSettings().isSegRepEnabled() + final boolean reuseTranslogUUID = indexShard.indexSettings().isSegRepEnabledOrRemoteNode() || indexShard.indexSettings().isRemoteSnapshot(); if (reuseTranslogUUID) { final String translogUUID = store.getMetadata().getCommitUserData().get(TRANSLOG_UUID_KEY); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 4062f9702fb3a..a393faabae0ea 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -175,7 +175,7 @@ public void clusterChanged(ClusterChangedEvent event) { // we need to ensure its state has cleared up in ongoing replications. if (event.routingTableChanged()) { for (IndexService indexService : indicesService) { - if (indexService.getIndexSettings().isSegRepEnabled()) { + if (indexService.getIndexSettings().isSegRepEnabledOrRemoteNode()) { for (IndexShard indexShard : indexService) { if (indexShard.routingEntry().primary()) { final IndexMetadata indexMetadata = indexService.getIndexSettings().getIndexMetadata(); @@ -221,7 +221,7 @@ protected void doClose() throws IOException { */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { - if (indexShard != null && indexShard.indexSettings().isSegRepEnabled()) { + if (indexShard != null && indexShard.indexSettings().isSegRepEnabledOrRemoteNode()) { ongoingSegmentReplications.cancel(indexShard, "shard is closed"); } } @@ -231,7 +231,10 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh */ @Override public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { - if (indexShard != null && indexShard.indexSettings().isSegRepEnabled() && oldRouting.primary() == false && newRouting.primary()) { + if (indexShard != null + && indexShard.indexSettings().isSegRepEnabledOrRemoteNode() + && oldRouting.primary() == false + && newRouting.primary()) { ongoingSegmentReplications.cancel(indexShard.routingEntry().allocationId().getId(), "Relocating primary shard."); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index f28f829545d59..4942d39cfa48a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -168,7 +168,8 @@ protected void doClose() throws IOException { public void clusterChanged(ClusterChangedEvent event) { if (event.routingTableChanged()) { for (IndexService indexService : indicesService) { - if (indexService.getIndexSettings().isSegRepEnabled() && event.indexRoutingTableChanged(indexService.index().getName())) { + if (indexService.getIndexSettings().isSegRepEnabledOrRemoteNode() + && event.indexRoutingTableChanged(indexService.index().getName())) { for (IndexShard shard : indexService) { if (shard.routingEntry().primary() == false) { // for this shard look up its primary routing, if it has completed a relocation trigger replication @@ -197,7 +198,7 @@ public void clusterChanged(ClusterChangedEvent event) { */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { - if (indexShard != null && indexShard.indexSettings().isSegRepEnabled()) { + if (indexShard != null && indexShard.indexSettings().isSegRepEnabledOrRemoteNode()) { onGoingReplications.cancelForShard(indexShard.shardId(), "Shard closing"); latestReceivedCheckpoint.remove(shardId); } @@ -209,7 +210,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh */ @Override public void afterIndexShardStarted(IndexShard indexShard) { - if (indexShard.indexSettings().isSegRepEnabled() && indexShard.routingEntry().primary() == false) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.routingEntry().primary() == false) { processLatestReceivedCheckpoint(indexShard, Thread.currentThread()); } } @@ -219,7 +220,10 @@ public void afterIndexShardStarted(IndexShard indexShard) { */ @Override public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { - if (oldRouting != null && indexShard.indexSettings().isSegRepEnabled() && oldRouting.primary() == false && newRouting.primary()) { + if (oldRouting != null + && indexShard.indexSettings().isSegRepEnabledOrRemoteNode() + && oldRouting.primary() == false + && newRouting.primary()) { onGoingReplications.cancelForShard(indexShard.shardId(), "Shard has been promoted to primary"); latestReceivedCheckpoint.remove(indexShard.shardId()); } diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index 7575c6ff5fb34..a3bfe1195d8cc 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -168,6 +168,14 @@ public static boolean isRemoteClusterStateAttributePresent(Settings settings) { .isEmpty() == false; } + public static String getRemoteStoreSegmentRepo(Settings settings) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY); + } + + public static String getRemoteStoreTranslogRepo(Settings settings) { + return settings.get(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY); + } + public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) && isRemoteClusterStateAttributePresent(settings); diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 1c25d8c71f948..89f1ea142336e 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -381,7 +381,7 @@ private void snapshot( if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); } - if (indexShard.indexSettings().isSegRepEnabled() && indexShard.isPrimaryMode() == false) { + if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() && indexShard.isPrimaryMode() == false) { throw new IndexShardSnapshotFailedException( shardId, "snapshot triggered on a new primary following failover and cannot proceed until promotion is complete" diff --git a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java index 33e08a482b9c3..ec1600094084a 100644 --- a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java @@ -142,7 +142,7 @@ public void run() { IndexShard replica = shards.addReplica(); Future future = shards.asyncRecoverReplica( replica, - (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener) { + (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener, threadPool) { @Override public void cleanFiles( int totalTranslogOps, @@ -223,17 +223,20 @@ public IndexResult index(Index op) throws IOException { }); thread.start(); IndexShard replica = shards.addReplica(); - Future fut = shards.asyncRecoverReplica(replica, (shard, node) -> new RecoveryTarget(shard, node, recoveryListener) { - @Override - public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { - try { - indexedOnPrimary.await(); - } catch (InterruptedException e) { - throw new AssertionError(e); + Future fut = shards.asyncRecoverReplica( + replica, + (shard, node) -> new RecoveryTarget(shard, node, recoveryListener, threadPool) { + @Override + public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { + try { + indexedOnPrimary.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + super.prepareForTranslogOperations(totalTranslogOps, listener); } - super.prepareForTranslogOperations(totalTranslogOps, listener); } - }); + ); fut.get(); recoveryDone.countDown(); thread.join(); diff --git a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java index 17b5440ab5424..b891ac63378ac 100644 --- a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java @@ -72,6 +72,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; @@ -118,7 +119,8 @@ public void testIndexingDuringFileRecovery() throws Exception { indexShard, node, recoveryListener, - logger + logger, + threadPool ) ); @@ -482,7 +484,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { AtomicBoolean recoveryDone = new AtomicBoolean(false); final Future recoveryFuture = shards.asyncRecoverReplica(newReplica, (indexShard, node) -> { recoveryStart.countDown(); - return new RecoveryTarget(indexShard, node, recoveryListener) { + return new RecoveryTarget(indexShard, node, recoveryListener, threadPool) { @Override public void finalizeRecovery(long globalCheckpoint, long trimAboveSeqNo, ActionListener listener) { recoveryDone.set(true); @@ -536,7 +538,7 @@ protected EngineFactory getEngineFactory(final ShardRouting routing) { final IndexShard replica = shards.addReplica(); final Future recoveryFuture = shards.asyncRecoverReplica( replica, - (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener) { + (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener, threadPool) { @Override public void indexTranslogOperations( final List operations, @@ -812,9 +814,10 @@ public BlockingTarget( IndexShard shard, DiscoveryNode sourceNode, ReplicationListener listener, - Logger logger + Logger logger, + ThreadPool threadPool ) { - super(shard, sourceNode, listener); + super(shard, sourceNode, listener, threadPool); this.recoveryBlocked = recoveryBlocked; this.releaseRecovery = releaseRecovery; this.stageToBlock = stageToBlock; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 46be10ce62840..537bfcf8f8a6b 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -3208,7 +3208,7 @@ public void testTranslogRecoverySyncsTranslog() throws IOException { indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); IndexShard replica = newShard(primary.shardId(), false, "n2", metadata, null); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) { + recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, threadPool) { @Override public void indexTranslogOperations( final List operations, @@ -3340,7 +3340,7 @@ public void testShardActiveDuringPeerRecovery() throws IOException { replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); // Shard is still inactive since we haven't started recovering yet assertFalse(replica.isActive()); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) { + recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, threadPool) { @Override public void indexTranslogOperations( final List operations, @@ -3397,7 +3397,7 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); assertListenerCalled.accept(replica); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) { + recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, threadPool) { // we're only checking that listeners are called when the engine is open, before there is no point @Override public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 4f5cad70fd643..85864eebd6d0d 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -86,7 +86,7 @@ public void testStartSequenceForReplicaRecovery() throws Exception { ); shards.addReplica(newReplicaShard); AtomicBoolean assertDone = new AtomicBoolean(false); - shards.recoverReplica(newReplicaShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener) { + shards.recoverReplica(newReplicaShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool) { @Override public IndexShard indexShard() { IndexShard idxShard = super.indexShard(); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 7caff3e5f5479..e93d266dcab4c 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -256,7 +256,7 @@ public void onDone(ReplicationState state) { public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertEquals(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), "Expected failure"); } - }), + }, threadPool), true, true, replicatePrimaryFunction diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index a83e737dc25c1..7ff4c3ecf5236 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -219,8 +219,9 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting new ByteSizeValue(8, ByteSizeUnit.KB), new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) ); - - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); + // To simulate that the node is remote backed + Settings nodeSettings = Settings.builder().put("node.attr.remote_store.translog.repository", "my-repo-1").build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings, nodeSettings); return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 5e6398da6fa1b..0e16e81b1bb70 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -149,22 +149,26 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting = newRouting.moveToUnassigned(unassignedInfo) .updateUnassigned(unassignedInfo, RecoverySource.EmptyStoreRecoverySource.INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); + final DiscoveryNode localNode = new DiscoveryNode( + "foo", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ); IndexShard shard = index.createShard( newRouting, s -> {}, RetentionLeaseSyncer.EMPTY, SegmentReplicationCheckpointPublisher.EMPTY, + null, + null, + localNode, null ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); - final DiscoveryNode localNode = new DiscoveryNode( - "foo", - buildNewFakeTransportAddress(), - emptyMap(), - emptySet(), - Version.CURRENT - ); + shard.markAsRecovering("store", new RecoveryState(newRouting, localNode, null)); IndexShardTestCase.recoverFromStore(shard); newRouting = ShardRoutingHelper.moveToStarted(newRouting); diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 34f854cae56ba..1e6cc43703672 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -95,7 +95,7 @@ public void testWriteFileChunksConcurrently() throws Exception { final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode)); - final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null); + final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null, threadPool); final PlainActionFuture receiveFileInfoFuture = new PlainActionFuture<>(); recoveryTarget.receiveFileInfo( mdFiles.stream().map(StoreFileMetadata::name).collect(Collectors.toList()), @@ -355,7 +355,7 @@ public void testResetStartingSeqNoIfLastCommitCorrupted() throws Exception { shard.prepareForIndexRecovery(); long startingSeqNo = shard.recoverLocallyAndFetchStartSeqNo(true); shard.store().markStoreCorrupted(new IOException("simulated")); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, threadPool); StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest(logger, rNode, recoveryTarget, startingSeqNo); assertThat(request.startingSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); assertThat(request.metadataSnapshot().size(), equalTo(0)); @@ -396,7 +396,7 @@ public void testResetStartRequestIfTranslogIsCorrupted() throws Exception { shard = reinitShard(shard, ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.PeerRecoverySource.INSTANCE)); shard.markAsRecovering("peer recovery", new RecoveryState(shard.routingEntry(), pNode, rNode)); shard.prepareForIndexRecovery(); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, threadPool); StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest( logger, rNode, diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index ad90255a3cc3f..71d89e2856c6e 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -137,7 +137,8 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception { indexShard, node, recoveryListener, - logger + logger, + threadPool ) ); recoveryBlocked.await(); @@ -348,7 +349,7 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { } IndexShard replicaShard = newShard(primaryShard.shardId(), false); updateMappings(replicaShard, primaryShard.indexSettings().getIndexMetadata()); - recoverReplica(replicaShard, primaryShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener) { + recoverReplica(replicaShard, primaryShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool) { @Override public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { super.prepareForTranslogOperations(totalTranslogOps, listener); @@ -480,7 +481,7 @@ public void onDone(ReplicationState state) { public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); } - })) + }, threadPool)) ); expectThrows(AlreadyClosedException.class, () -> replica.refresh("test")); group.removeReplica(replica); diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index fb4dc97435512..4ce4e28690697 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -225,6 +225,6 @@ long startRecovery( final DiscoveryNode rNode = getDiscoveryNode(indexShard.routingEntry().currentNodeId()); indexShard.markAsRecovering("remote", new RecoveryState(indexShard.routingEntry(), sourceNode, rNode)); indexShard.prepareForIndexRecovery(); - return collection.start(new RecoveryTarget(indexShard, sourceNode, listener), timeValue); + return collection.start(new RecoveryTarget(indexShard, sourceNode, listener, threadPool), timeValue); } } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 9800782272ede..e6e20ce8f8566 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -520,7 +520,7 @@ public synchronized boolean removeReplica(IndexShard replica) throws IOException } public void recoverReplica(IndexShard replica) throws IOException { - recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener)); + recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool)); } public void recoverReplica(IndexShard replica, BiFunction targetSupplier) diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index bf1c4d4c94e04..a2f9eb677c0ac 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -617,7 +617,14 @@ protected IndexShard newShard( @Nullable Path remotePath, IndexingOperationListener... listeners ) throws IOException { - final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); + Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); + // To simulate that the node is remote backed + if (indexMetadata.getSettings().get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) == "true") { + nodeSettings = Settings.builder() + .put("node.name", routing.currentNodeId()) + .put("node.attr.remote_store.translog.repository", "seg_repo") + .build(); + } final IndexSettings indexSettings = new IndexSettings(indexMetadata, nodeSettings); final IndexShard indexShard; if (storeProvider == null) { @@ -646,7 +653,7 @@ protected IndexShard newShard( RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; RepositoriesService mockRepoSvc = mock(RepositoriesService.class); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() || indexSettings.isRemoteNode()) { String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); // remote path via setting a repository . This is a hack used for shards are created using reset . // since we can't get remote path from IndexShard directly, we are using repository to store it . @@ -703,7 +710,8 @@ protected IndexShard newShard( remoteStoreStatsTrackerFactory, () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, "dummy-node", - DefaultRecoverySettings.INSTANCE + DefaultRecoverySettings.INSTANCE, + false ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { @@ -1001,7 +1009,7 @@ public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRoutin protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) throws IOException { IndexShard primary = null; try { - primary = newStartedShard(true); + primary = newStartedShard(true, replica.indexSettings.getSettings()); recoverReplica(replica, primary, startReplica); } finally { closeShards(primary); @@ -1033,7 +1041,7 @@ protected void recoverReplica( recoverReplica( replica, primary, - (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener), + (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, threadPool), true, startReplica, replicatePrimaryFunction @@ -1051,7 +1059,7 @@ protected void recoverReplica( } public Function, List> getReplicationFunc(final IndexShard target) { - return target.indexSettings().isSegRepEnabled() ? (shardList) -> { + return target.indexSettings().isSegRepEnabledOrRemoteNode() ? (shardList) -> { try { assert shardList.size() >= 2; final IndexShard primary = shardList.get(0); @@ -1489,7 +1497,7 @@ private SegmentReplicationTargetService prepareForReplication( SegmentReplicationSourceFactory sourceFactory = null; SegmentReplicationTargetService targetService; - if (primaryShard.indexSettings.isRemoteStoreEnabled()) { + if (primaryShard.indexSettings.isRemoteStoreEnabled() || primaryShard.indexSettings.isRemoteNode()) { RecoverySettings recoverySettings = new RecoverySettings( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) From 2069bd805804dd93bd69695a4c6521cc6f2b9bb6 Mon Sep 17 00:00:00 2001 From: rajiv-kv <157019998+rajiv-kv@users.noreply.github.com> Date: Thu, 21 Mar 2024 14:42:58 +0530 Subject: [PATCH 17/74] Integrate with CPU admission controller for cluster-manager Read API's. (#12496) * Integrate with CPU admission controller for cluster-manager Read API's. The admission control is enforced at the transport layer. Signed-off-by: Rajiv Kumar Vaidyanathan --- CHANGELOG.md | 1 + .../AdmissionForClusterManagerIT.java | 198 ++++++++++++++++++ .../support/HandledTransportAction.java | 37 +++- .../TransportClusterManagerNodeAction.java | 29 ++- ...TransportClusterManagerNodeReadAction.java | 38 +++- .../common/settings/ClusterSettings.java | 7 +- .../CpuBasedAdmissionController.java | 5 +- .../IoBasedAdmissionController.java | 5 +- .../enums/AdmissionControlActionType.java | 5 +- .../CpuBasedAdmissionControllerSettings.java | 21 ++ .../IoBasedAdmissionControllerSettings.java | 18 ++ .../transport/TransportService.java | 6 +- ...BasedAdmissionControllerSettingsTests.java | 32 ++- ...BasedAdmissionControllerSettingsTests.java | 16 ++ 14 files changed, 405 insertions(+), 13 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index c65b20b99bc25..9dced730cbcad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -121,6 +121,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a new setting `index.check_pending_flush.enabled` to expose the ability to disable the check for pending flushes by write threads ([#12710](https://github.com/opensearch-project/OpenSearch/pull/12710)) - Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) - Lightweight Transport action to verify local term before fetching cluster-state from remote ([#12252](https://github.com/opensearch-project/OpenSearch/pull/12252/)) +- Integrate with admission controller for cluster-manager Read API. ([#12496](https://github.com/opensearch-project/OpenSearch/pull/12496)) ### Dependencies - Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) diff --git a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java new file mode 100644 index 0000000000000..4d1964326820e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java @@ -0,0 +1,198 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.node.IoUsageStats; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; +import org.opensearch.rest.AbstractRestChannel; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.admin.indices.RestGetAliasesAction; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AdmissionForClusterManagerIT extends OpenSearchIntegTestCase { + + private static final Logger LOGGER = LogManager.getLogger(AdmissionForClusterManagerIT.class); + + public static final String INDEX_NAME = "test_index"; + + private String clusterManagerNodeId; + private String datanode; + private ResourceUsageCollectorService cMResourceCollector; + + private static final Settings DISABLE_ADMISSION_CONTROL = Settings.builder() + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + .build(); + + private static final Settings ENFORCE_ADMISSION_CONTROL = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(CLUSTER_ADMIN_CPU_USAGE_LIMIT.getKey(), 50) + .build(); + + @Before + public void init() { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(DISABLE_ADMISSION_CONTROL).build() + ); + datanode = internalCluster().startDataOnlyNode(Settings.builder().put(DISABLE_ADMISSION_CONTROL).build()); + + ensureClusterSizeConsistency(); + ensureGreen(); + + // Disable the automatic resource collection + clusterManagerNodeId = internalCluster().clusterService(clusterManagerNode).localNode().getId(); + cMResourceCollector = internalCluster().getClusterManagerNodeInstance(ResourceUsageCollectorService.class); + cMResourceCollector.stop(); + + // Enable admission control + client().admin().cluster().prepareUpdateSettings().setTransientSettings(ENFORCE_ADMISSION_CONTROL).execute().actionGet(); + } + + public void testAdmissionControlEnforced() throws Exception { + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 99, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}")); + + // Read API on ClusterManager + GetAliasesRequest aliasesRequest = new GetAliasesRequest(); + aliasesRequest.aliases("alias1"); + try { + dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + fail("expected failure"); + } catch (Exception e) { + assertTrue(e instanceof OpenSearchRejectedExecutionException); + assertTrue(e.getMessage().contains("CPU usage admission controller rejected the request")); + assertTrue(e.getMessage().contains("[indices:admin/aliases/get]")); + assertTrue(e.getMessage().contains("action-type [CLUSTER_ADMIN]")); + } + + client().admin().cluster().prepareUpdateSettings().setTransientSettings(DISABLE_ADMISSION_CONTROL).execute().actionGet(); + GetAliasesResponse getAliasesResponse = dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); + + AdmissionControlService admissionControlServiceCM = internalCluster().getClusterManagerNodeInstance(AdmissionControlService.class); + + AdmissionControllerStats admissionStats = getAdmissionControlStats(admissionControlServiceCM).get( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER + ); + + assertEquals(admissionStats.rejectionCount.get(AdmissionControlActionType.CLUSTER_ADMIN.getType()).longValue(), 1); + assertNull(admissionStats.rejectionCount.get(AdmissionControlActionType.SEARCH.getType())); + assertNull(admissionStats.rejectionCount.get(AdmissionControlActionType.INDEXING.getType())); + } + + public void testAdmissionControlEnabledOnNoBreach() throws InterruptedException { + // CPU usage is less than threshold 50% + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 35, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}").execute().actionGet()); + + // Read API on ClusterManager + GetAliasesRequest aliasesRequest = new GetAliasesRequest(); + aliasesRequest.aliases("alias1"); + GetAliasesResponse getAliasesResponse = dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); + } + + public void testAdmissionControlMonitorOnBreach() throws InterruptedException { + admissionControlDisabledOnBreach( + Settings.builder().put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()).build() + ); + } + + public void testAdmissionControlDisabledOnBreach() throws InterruptedException { + admissionControlDisabledOnBreach(DISABLE_ADMISSION_CONTROL); + } + + public void admissionControlDisabledOnBreach(Settings admission) throws InterruptedException { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(admission).execute().actionGet(); + + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 97, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}").execute().actionGet()); + + // Read API on ClusterManager + GetAliasesRequest aliasesRequest = new GetAliasesRequest(); + aliasesRequest.aliases("alias1"); + GetAliasesResponse getAliasesResponse = dataNodeClient().admin().indices().getAliases(aliasesRequest).actionGet(); + assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); + + } + + public void testAdmissionControlResponseStatus() throws Exception { + cMResourceCollector.collectNodeResourceUsageStats(clusterManagerNodeId, System.currentTimeMillis(), 97, 99, new IoUsageStats(98)); + + // Write API on ClusterManager + assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}")); + + // Read API on ClusterManager + FakeRestRequest aliasesRequest = new FakeRestRequest(); + aliasesRequest.params().put("name", "alias1"); + CountDownLatch waitForResponse = new CountDownLatch(1); + AtomicReference aliasResponse = new AtomicReference<>(); + AbstractRestChannel channel = new AbstractRestChannel(aliasesRequest, true) { + + @Override + public void sendResponse(RestResponse response) { + waitForResponse.countDown(); + aliasResponse.set(response); + } + }; + + RestGetAliasesAction restHandler = internalCluster().getInstance(RestGetAliasesAction.class, datanode); + restHandler.handleRequest(aliasesRequest, channel, internalCluster().getInstance(NodeClient.class, datanode)); + + waitForResponse.await(); + assertEquals(RestStatus.TOO_MANY_REQUESTS, aliasResponse.get().status()); + } + + @Override + public void tearDown() throws Exception { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(DISABLE_ADMISSION_CONTROL).execute().actionGet(); + super.tearDown(); + } + + Map getAdmissionControlStats(AdmissionControlService admissionControlService) { + Map acStats = new HashMap<>(); + for (AdmissionControllerStats admissionControllerStats : admissionControlService.stats().getAdmissionControllerStatsList()) { + acStats.put(admissionControllerStats.getAdmissionControllerName(), admissionControllerStats); + } + return acStats; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java b/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java index 786d8cfb6fa1d..a5054b966b2f9 100644 --- a/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; @@ -65,7 +66,7 @@ protected HandledTransportAction( Writeable.Reader requestReader, String executor ) { - this(actionName, true, transportService, actionFilters, requestReader, executor); + this(actionName, true, null, transportService, actionFilters, requestReader, executor); } protected HandledTransportAction( @@ -75,19 +76,49 @@ protected HandledTransportAction( ActionFilters actionFilters, Writeable.Reader requestReader ) { - this(actionName, canTripCircuitBreaker, transportService, actionFilters, requestReader, ThreadPool.Names.SAME); + this(actionName, canTripCircuitBreaker, null, transportService, actionFilters, requestReader, ThreadPool.Names.SAME); } protected HandledTransportAction( String actionName, boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, + TransportService transportService, + ActionFilters actionFilters, + Writeable.Reader requestReader + ) { + this( + actionName, + canTripCircuitBreaker, + admissionControlActionType, + transportService, + actionFilters, + requestReader, + ThreadPool.Names.SAME + ); + } + + protected HandledTransportAction( + String actionName, + boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, TransportService transportService, ActionFilters actionFilters, Writeable.Reader requestReader, String executor ) { super(actionName, actionFilters, transportService.getTaskManager()); - transportService.registerRequestHandler(actionName, executor, false, canTripCircuitBreaker, requestReader, new TransportHandler()); + + transportService.registerRequestHandler( + actionName, + executor, + false, + canTripCircuitBreaker, + admissionControlActionType, + requestReader, + new TransportHandler() + ); + } /** diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 6a081f9dfecde..12d572b30272a 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -64,6 +64,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; @@ -105,7 +106,7 @@ protected TransportClusterManagerNodeAction( Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver ) { - this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + this(actionName, true, null, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); } protected TransportClusterManagerNodeAction( @@ -118,7 +119,31 @@ protected TransportClusterManagerNodeAction( Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver ) { - super(actionName, canTripCircuitBreaker, transportService, actionFilters, request); + this( + actionName, + canTripCircuitBreaker, + null, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + + protected TransportClusterManagerNodeAction( + String actionName, + boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super(actionName, canTripCircuitBreaker, admissionControlActionType, transportService, actionFilters, request); this.transportService = transportService; this.clusterService = clusterService; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java index d8cd5af992028..d58487a475bcf 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -59,12 +60,46 @@ protected TransportClusterManagerNodeReadAction( Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver ) { - this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + this( + actionName, + true, + AdmissionControlActionType.CLUSTER_ADMIN, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + + protected TransportClusterManagerNodeReadAction( + String actionName, + boolean checkSizeLimit, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + actionName, + checkSizeLimit, + null, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); } protected TransportClusterManagerNodeReadAction( String actionName, boolean checkSizeLimit, + AdmissionControlActionType admissionControlActionType, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, @@ -75,6 +110,7 @@ protected TransportClusterManagerNodeReadAction( super( actionName, checkSizeLimit, + admissionControlActionType, transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 7c8afb6b5c1b5..4f1815de224db 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -708,15 +708,18 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, + + // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT, IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT, IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT, - IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, // Concurrent segment search settings SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java index 5c180346c05e1..7ad0715a2a38e 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java @@ -67,7 +67,8 @@ private void applyForTransportLayer(String actionName, AdmissionControlActionTyp throw new OpenSearchRejectedExecutionException( String.format( Locale.ROOT, - "CPU usage admission controller rejected the request for action [%s] as CPU limit reached", + "CPU usage admission controller rejected the request for action [%s] as CPU limit reached for action-type [%s]", + actionName, admissionControlActionType.name() ) ); @@ -112,6 +113,8 @@ private long getCpuRejectionThreshold(AdmissionControlActionType admissionContro return this.settings.getSearchCPULimit(); case INDEXING: return this.settings.getIndexingCPULimit(); + case CLUSTER_ADMIN: + return this.settings.getClusterAdminCPULimit(); default: throw new IllegalArgumentException( String.format( diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java index ad6cc3ff378f0..d03b2050cd5f3 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java @@ -68,7 +68,8 @@ private void applyForTransportLayer(String actionName, AdmissionControlActionTyp throw new OpenSearchRejectedExecutionException( String.format( Locale.ROOT, - "Io usage admission controller rejected the request for action [%s] as IO limit reached", + "IO usage admission controller rejected the request for action [%s] as IO limit reached for action-type [%s]", + actionName, admissionControlActionType.name() ) ); @@ -113,6 +114,8 @@ private long getIoRejectionThreshold(AdmissionControlActionType admissionControl return this.settings.getSearchIOUsageLimit(); case INDEXING: return this.settings.getIndexingIOUsageLimit(); + case CLUSTER_ADMIN: + return this.settings.getClusterAdminIOUsageLimit(); default: throw new IllegalArgumentException( String.format( diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java index 8cf6e973ceb64..6acc440180281 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java @@ -15,7 +15,8 @@ */ public enum AdmissionControlActionType { INDEXING("indexing"), - SEARCH("search"); + SEARCH("search"), + CLUSTER_ADMIN("cluster_admin"); private final String type; @@ -38,6 +39,8 @@ public static AdmissionControlActionType fromName(String name) { return INDEXING; case "search": return SEARCH; + case "cluster_admin": + return CLUSTER_ADMIN; default: throw new IllegalArgumentException("Not Supported TransportAction Type: " + name); } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java index 1bddd1446a4c4..30012176d59af 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java @@ -30,6 +30,8 @@ public static class Defaults { private AdmissionControlMode transportLayerMode; private Long searchCPULimit; private Long indexingCPULimit; + private Long clusterInfoCPULimit; + /** * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set * rejection will be performed, otherwise only rejection metrics will be populated. @@ -62,14 +64,24 @@ public static class Defaults { Setting.Property.NodeScope ); + public static final Setting CLUSTER_ADMIN_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.cluster.admin.cpu_usage.limit", + Defaults.CPU_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + // currently limited to one setting will add further more settings in follow-up PR's public CpuBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { this.transportLayerMode = CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); clusterSettings.addSettingsUpdateConsumer(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); this.searchCPULimit = SEARCH_CPU_USAGE_LIMIT.get(settings); this.indexingCPULimit = INDEXING_CPU_USAGE_LIMIT.get(settings); + this.clusterInfoCPULimit = CLUSTER_ADMIN_CPU_USAGE_LIMIT.get(settings); clusterSettings.addSettingsUpdateConsumer(INDEXING_CPU_USAGE_LIMIT, this::setIndexingCPULimit); clusterSettings.addSettingsUpdateConsumer(SEARCH_CPU_USAGE_LIMIT, this::setSearchCPULimit); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ADMIN_CPU_USAGE_LIMIT, this::setClusterInfoCPULimit); + } private void setTransportLayerMode(AdmissionControlMode admissionControlMode) { @@ -88,6 +100,10 @@ public Long getIndexingCPULimit() { return indexingCPULimit; } + public Long getClusterAdminCPULimit() { + return clusterInfoCPULimit; + } + public void setIndexingCPULimit(Long indexingCPULimit) { this.indexingCPULimit = indexingCPULimit; } @@ -95,4 +111,9 @@ public void setIndexingCPULimit(Long indexingCPULimit) { public void setSearchCPULimit(Long searchCPULimit) { this.searchCPULimit = searchCPULimit; } + + public void setClusterInfoCPULimit(Long clusterInfoCPULimit) { + this.clusterInfoCPULimit = clusterInfoCPULimit; + } + } diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java index e58ed28d21605..e442906ea77d7 100644 --- a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java @@ -25,11 +25,14 @@ public class IoBasedAdmissionControllerSettings { */ public static class Defaults { public static final long IO_USAGE_LIMIT = 95; + public static final long CLUSTER_ADMIN_IO_USAGE_LIMIT = 100; + } private AdmissionControlMode transportLayerMode; private Long searchIOUsageLimit; private Long indexingIOUsageLimit; + private Long clusterAdminIOUsageLimit; /** * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set @@ -63,11 +66,22 @@ public static class Defaults { Setting.Property.NodeScope ); + /** + * This setting used to set the limits for cluster admin requests by default it will use default cluster_admin IO usage limit + */ + public static final Setting CLUSTER_ADMIN_IO_USAGE_LIMIT = Setting.longSetting( + "admission_control.cluster_admin.io_usage.limit", + Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT, + Setting.Property.Final, + Setting.Property.NodeScope + ); + public IoBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { this.transportLayerMode = IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); clusterSettings.addSettingsUpdateConsumer(IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); this.searchIOUsageLimit = SEARCH_IO_USAGE_LIMIT.get(settings); this.indexingIOUsageLimit = INDEXING_IO_USAGE_LIMIT.get(settings); + this.clusterAdminIOUsageLimit = CLUSTER_ADMIN_IO_USAGE_LIMIT.get(settings); clusterSettings.addSettingsUpdateConsumer(INDEXING_IO_USAGE_LIMIT, this::setIndexingIOUsageLimit); clusterSettings.addSettingsUpdateConsumer(SEARCH_IO_USAGE_LIMIT, this::setSearchIOUsageLimit); } @@ -95,4 +109,8 @@ public Long getIndexingIOUsageLimit() { public Long getSearchIOUsageLimit() { return searchIOUsageLimit; } + + public Long getClusterAdminIOUsageLimit() { + return clusterAdminIOUsageLimit; + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 652d57f4c5348..d08b28730d417 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -1214,7 +1214,11 @@ public void registerRequestHandler( TransportRequestHandler handler ) { validateActionName(action); - handler = interceptor.interceptHandler(action, executor, forceExecution, handler, admissionControlActionType); + if (admissionControlActionType != null) { + handler = interceptor.interceptHandler(action, executor, forceExecution, handler, admissionControlActionType); + } else { + handler = interceptor.interceptHandler(action, executor, forceExecution, handler); + } RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, requestReader, diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java index 9ce28bc7fdb40..6836ecb3d615f 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java @@ -49,7 +49,8 @@ public void testSettingsExists() { Arrays.asList( CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, - CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT + CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT ) ) ); @@ -149,4 +150,33 @@ public void testUpdateAfterGetConfiguredSettings() { assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); } + + public void testConfiguredSettingsForAdmin() { + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT.getKey(), 50) + .build(); + + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getClusterAdminCPULimit().longValue(), 50); + + Settings updatedSettings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.CLUSTER_ADMIN_CPU_USAGE_LIMIT.getKey(), 90) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); + assertEquals(cpuBasedAdmissionControllerSettings.getClusterAdminCPULimit().longValue(), 90); + + } } diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java index ff777c175ec0e..c462f9700264d 100644 --- a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java @@ -72,6 +72,10 @@ public void testDefaultSettings() { assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), percent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); } public void testGetConfiguredSettings() { @@ -134,6 +138,10 @@ public void testUpdateAfterGetConfiguredSettings() { assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); Settings updatedSettings = Settings.builder() .put( @@ -146,6 +154,10 @@ public void testUpdateAfterGetConfiguredSettings() { assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); searchPercent = 70; updatedSettings = Settings.builder() @@ -156,5 +168,9 @@ public void testUpdateAfterGetConfiguredSettings() { clusterService.getClusterSettings().applySettings(updatedSettings); assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + assertEquals( + ioBasedAdmissionControllerSettings.getClusterAdminIOUsageLimit().longValue(), + IoBasedAdmissionControllerSettings.Defaults.CLUSTER_ADMIN_IO_USAGE_LIMIT + ); } } From 53c0ce38b26569c90f8980f1c2b0b70e5ebbf6fc Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Thu, 21 Mar 2024 20:40:05 +0800 Subject: [PATCH 18/74] Update supported version for the wait_for_completion parameter in open&clone&shrink&split APIs (#12819) Signed-off-by: Gao Binlong --- .../test/indices.clone/40_wait_for_completion.yml | 4 ++-- .../test/indices.open/30_wait_for_completion.yml | 4 ++-- .../test/indices.shrink/50_wait_for_completion.yml | 4 ++-- .../test/indices.split/40_wait_for_completion.yml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml index b298575d15410..c9c1558797a35 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/40_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the clone operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml index 2caf604eb4296..b93c75f6819c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/30_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the open operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml index f7568b1446967..53df9f61700cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/50_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the shrink operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml index 2ce4fc620742a..9d56cc0800b09 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/40_wait_for_completion.yml @@ -4,8 +4,8 @@ # will return a task immediately and the split operation will run in background. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.6.99" + reason: "wait_for_completion was introduced in 2.7.0" features: allowed_warnings - do: From fd458d62faa0a264d3e7378d9a0a8ffd90f6f235 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Thu, 21 Mar 2024 21:35:58 +0800 Subject: [PATCH 19/74] Fix Flaky SimpleQueryStringIT Tests (#12575) * Fix Flaky SimpleQueryStringIT Tests Signed-off-by: kkewwei * add the comment to unit test Signed-off-by: kkewwei --------- Signed-off-by: kkewwei --- .../org/opensearch/search/query/SimpleQueryStringIT.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 31678d3f018a1..cae543506f919 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -112,7 +112,10 @@ public static Collection parameters() { @BeforeClass public static void createRandomClusterSetting() { - CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); + // Lower bound can't be small(such as 60), simpleQueryStringQuery("foo Bar 19 127.0.0.1") in testDocWithAllTypes + // will create many clauses of BooleanClause, In that way, it will throw too_many_nested_clauses exception. + // So we need to set a higher bound(such as 80) to avoid failures. + CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(80, 100); } @Override From e673b6194fab6f672b62dfdaf8f082bc8dec403f Mon Sep 17 00:00:00 2001 From: rajiv-kv <157019998+rajiv-kv@users.noreply.github.com> Date: Thu, 21 Mar 2024 22:36:47 +0530 Subject: [PATCH 20/74] Update the support version to 2.13 as term-check is merged to 2.x (#12830) Signed-off-by: Rajiv Kumar Vaidyanathan --- .../clustermanager/TransportClusterManagerNodeAction.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 12d572b30272a..5f57658e33924 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -78,7 +78,7 @@ import java.util.function.Consumer; import java.util.function.Predicate; -import static org.opensearch.Version.V_3_0_0; +import static org.opensearch.Version.V_2_13_0; /** * A base class for operations that needs to be performed on the cluster-manager node. @@ -299,7 +299,7 @@ protected void doStart(ClusterState clusterState) { retryOnMasterChange(clusterState, null); } else { DiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); - if (clusterManagerNode.getVersion().onOrAfter(V_3_0_0) && localExecuteSupportedByAction()) { + if (clusterManagerNode.getVersion().onOrAfter(V_2_13_0) && localExecuteSupportedByAction()) { BiConsumer executeOnLocalOrClusterManager = clusterStateLatestChecker( this::executeOnLocalNode, this::executeOnClusterManager From 7bd3715c81acdaa3c3eaf2c50032db139718f485 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 21 Mar 2024 17:56:02 -0400 Subject: [PATCH 21/74] Add 2.14.0 to version & BWC (#12842) Signed-off-by: Andriy Redko --- .ci/bwcVersions | 1 + libs/core/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1e3b913c5cb5a..78d8796c624d7 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -30,3 +30,4 @@ BWC_VERSION: - "2.12.0" - "2.12.1" - "2.13.0" + - "2.14.0" diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 66ba446d4fc54..56df46ae94d44 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -101,6 +101,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_12_1 = new Version(2120199, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_13_0 = new Version(2130099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_2_14_0 = new Version(2140099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version CURRENT = V_3_0_0; From 0c0bcd9ffa10ba7999418f2f7d400ba3a1d446dd Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Fri, 22 Mar 2024 05:16:45 +0530 Subject: [PATCH 22/74] Add release-notes for 2.13.0 (#12841) Signed-off-by: Varun Bansal --- .../opensearch.release-notes-2.13.0.md | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.13.0.md diff --git a/release-notes/opensearch.release-notes-2.13.0.md b/release-notes/opensearch.release-notes-2.13.0.md new file mode 100644 index 0000000000000..517092564545d --- /dev/null +++ b/release-notes/opensearch.release-notes-2.13.0.md @@ -0,0 +1,71 @@ +## 2024-03-21 Version 2.13.0 Release Notes + +## [2.13.0] +### Added +- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) +- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) +- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) +- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) +- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) +- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533)) +- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) +- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) +- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) +- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- [Metrics Framework] Adds support for asynchronous gauge metric type. ([#12642](https://github.com/opensearch-project/OpenSearch/issues/12642)) +- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625)) +- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601)) +- [Tiered caching] Add policies controlling which values can enter pluggable caches [EXPERIMENTAL] ([#12542](https://github.com/opensearch-project/OpenSearch/pull/12542)) +- [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709)) +- [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) +- Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) +- Lightweight Transport action to verify local term before fetching cluster-state from remote ([#12252](https://github.com/opensearch-project/OpenSearch/pull/12252/)) + +### Dependencies +- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12326](https://github.com/opensearch-project/OpenSearch/pull/12326)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12326](https://github.com/opensearch-project/OpenSearch/pull/12326)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12326](https://github.com/opensearch-project/OpenSearch/pull/12326)) +- Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) +- Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) +- Bump `opentelemetry` from 1.34.1 to 1.36.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388), [#12618](https://github.com/opensearch-project/OpenSearch/pull/12618)) +- Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.1 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464), [#12587](https://github.com/opensearch-project/OpenSearch/pull/12587)) +- Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) +- Bump `com.netflix.nebula.ospackage-base` from 11.8.0 to 11.8.1 ([#12461](https://github.com/opensearch-project/OpenSearch/pull/12461)) +- Bump `peter-evans/create-or-update-comment` from 3 to 4 ([#12462](https://github.com/opensearch-project/OpenSearch/pull/12462)) +- Bump `lycheeverse/lychee-action` from 1.9.1 to 1.9.3 ([#12521](https://github.com/opensearch-project/OpenSearch/pull/12521)) +- Bump `com.azure:azure-core` from 1.39.0 to 1.47.0 ([#12520](https://github.com/opensearch-project/OpenSearch/pull/12520)) +- Bump `ch.qos.logback:logback-core` from 1.2.13 to 1.5.3 ([#12519](https://github.com/opensearch-project/OpenSearch/pull/12519)) +- Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) +- Bump Jackson version from 2.16.1 to 2.17.0 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611), [#12662](https://github.com/opensearch-project/OpenSearch/pull/12662)) +- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) + +### Changed +- Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) +- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) +- Mark fuzzy filter GA and remove experimental setting ([12631](https://github.com/opensearch-project/OpenSearch/pull/12631)) +- Keep the election scheduler open until cluster state has been applied ([#11699](https://github.com/opensearch-project/OpenSearch/pull/11699)) + +### Fixed +- [Revert] [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Add support of special WrappingSearchAsyncActionPhase so the onPhaseStart() will always be followed by onPhaseEnd() within AbstractSearchAsyncAction ([#12293](https://github.com/opensearch-project/OpenSearch/pull/12293)) +- Add a system property to configure YamlParser codepoint limits ([#12298](https://github.com/opensearch-project/OpenSearch/pull/12298)) +- Prevent read beyond slice boundary in ByteArrayIndexInput ([#10481](https://github.com/opensearch-project/OpenSearch/issues/10481)) +- Fix the "highlight.max_analyzer_offset" request parameter with "plain" highlighter ([#10919](https://github.com/opensearch-project/OpenSearch/pull/10919)) +- Prevent unnecessary fetch sub phase processor initialization during fetch phase execution ([#12503](https://github.com/opensearch-project/OpenSearch/pull/12503)) +- Fix `terms` query on `float` field when `doc_values` are turned off by reverting back to `FloatPoint` from `FloatField` ([#12499](https://github.com/opensearch-project/OpenSearch/pull/12499)) +- Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) +- Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) +- onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed ([#12158](https://github.com/opensearch-project/OpenSearch/pull/12158)) +- Avoid overflow when sorting missing last on `epoch_millis` datetime field ([#12676](https://github.com/opensearch-project/OpenSearch/pull/12676)) From 0c956e81373c9d5878710c1bafe471431000a64f Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Thu, 21 Mar 2024 20:35:21 -0400 Subject: [PATCH 23/74] Catch task description error (#12834) * Always return a task description even when it cannot be serialized. Signed-off-by: dblock * Expect tasks to fail. Signed-off-by: dblock * Only catch exceptions when getting description. Signed-off-by: dblock * Added to mark error more clearly. Signed-off-by: dblock --------- Signed-off-by: dblock --- .../opensearch/action/search/SearchRequest.java | 9 ++++++++- .../search/builder/SearchSourceBuilder.java | 2 +- .../action/search/SearchRequestTests.java | 15 +++++++++++++++ .../search/geo/GeoPointShapeQueryTests.java | 8 ++++++-- 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index f738c182c06da..3b8a6937815aa 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; @@ -712,7 +713,13 @@ public final String buildDescription() { sb.append("scroll[").append(scroll.keepAlive()).append("], "); } if (source != null) { - sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]"); + sb.append("source["); + try { + sb.append(source.toString(FORMAT_PARAMS)); + } catch (final OpenSearchException ex) { + sb.append(""); + } + sb.append("]"); } else { sb.append("source[]"); } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 1a5a9dc6d1f03..bf7045d43ba67 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -1842,7 +1842,7 @@ public String toString() { public String toString(Params params) { try { return XContentHelper.toXContent(this, MediaTypeRegistry.JSON, params, true).utf8ToString(); - } catch (IOException e) { + } catch (IOException | UnsupportedOperationException e) { throw new OpenSearchException(e); } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index f025e3a63b9bf..9ee314e77ca7e 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -39,6 +39,8 @@ import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.Strings; import org.opensearch.core.tasks.TaskId; +import org.opensearch.geometry.LinearRing; +import org.opensearch.index.query.GeoShapeQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.AbstractSearchTestCase; import org.opensearch.search.Scroll; @@ -269,6 +271,19 @@ public void testDescriptionIncludesScroll() { ); } + public void testDescriptionOnSourceError() { + LinearRing linearRing = new LinearRing(new double[] { -25, -35, -25 }, new double[] { -25, -35, -25 }); + GeoShapeQueryBuilder queryBuilder = new GeoShapeQueryBuilder("geo", linearRing); + SearchRequest request = new SearchRequest(); + request.source(new SearchSourceBuilder().query(queryBuilder)); + assertThat( + toDescription(request), + equalTo( + "indices[], search_type[QUERY_THEN_FETCH], source[]" + ) + ); + } + private String toDescription(SearchRequest request) { return request.createTask(0, "test", SearchAction.NAME, TaskId.EMPTY_TASK_ID, emptyMap()).getDescription(); } diff --git a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java index b5d34a78ab5a4..b00f36ef52d4a 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java @@ -100,6 +100,7 @@ public void testProcessRelationSupport() throws Exception { client().prepareSearch("test") .setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, rectangle).relation(shapeRelation)) .get(); + fail("Expected " + shapeRelation + " query relation not supported for Field [" + defaultGeoFieldName + "]"); } catch (SearchPhaseExecutionException e) { assertThat( e.getCause().getMessage(), @@ -119,6 +120,7 @@ public void testQueryLine() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, line)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support LINEARRING queries"); } catch (SearchPhaseExecutionException e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.LINESTRING + " queries")); } @@ -138,13 +140,12 @@ public void testQueryLinearRing() throws Exception { searchRequestBuilder.setQuery(queryBuilder); searchRequestBuilder.setIndices("test"); searchRequestBuilder.get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support LINEARRING queries"); } catch (SearchPhaseExecutionException e) { assertThat( e.getCause().getMessage(), containsString("Field [" + defaultGeoFieldName + "] does not support LINEARRING queries") ); - } catch (UnsupportedOperationException e) { - assertThat(e.getMessage(), containsString("line ring cannot be serialized using GeoJson")); } } @@ -162,6 +163,7 @@ public void testQueryMultiLine() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, multiline)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support " + GeoShapeType.MULTILINESTRING + " queries"); } catch (Exception e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.MULTILINESTRING + " queries")); } @@ -177,6 +179,7 @@ public void testQueryMultiPoint() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, multiPoint)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support " + GeoShapeType.MULTIPOINT + " queries"); } catch (Exception e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.MULTIPOINT + " queries")); } @@ -192,6 +195,7 @@ public void testQueryPoint() throws Exception { try { client().prepareSearch("test").setQuery(QueryBuilders.geoShapeQuery(defaultGeoFieldName, point)).get(); + fail("Expected field [" + defaultGeoFieldName + "] does not support " + GeoShapeType.POINT + " queries"); } catch (Exception e) { assertThat(e.getCause().getMessage(), containsString("does not support " + GeoShapeType.POINT + " queries")); } From 13604c88cf31a402e3c771ed4ec90466b594fa1b Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 21 Mar 2024 20:49:15 -0500 Subject: [PATCH 24/74] Clear out 2.x section of changelog (#12847) The 2.13 release notes have been created. Signed-off-by: Andrew Ross --- CHANGELOG.md | 60 ---------------------------------------------------- 1 file changed, 60 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9dced730cbcad..af20332c61146 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -101,76 +101,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) -- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) -- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) -- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) -- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) -- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) -- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) -- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) -- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533)) -- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) -- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) -- [Metrics Framework] Adds support for asynchronous gauge metric type. ([#12642](https://github.com/opensearch-project/OpenSearch/issues/12642)) -- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601)) -- [Tiered caching] Add policies controlling which values can enter pluggable caches [EXPERIMENTAL] ([#12542](https://github.com/opensearch-project/OpenSearch/pull/12542)) -- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625)) -- [Tiered caching] Add serializer integration to allow ehcache disk cache to use non-primitive values ([#12709](https://github.com/opensearch-project/OpenSearch/pull/12709)) -- [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) -- Introduce a new setting `index.check_pending_flush.enabled` to expose the ability to disable the check for pending flushes by write threads ([#12710](https://github.com/opensearch-project/OpenSearch/pull/12710)) -- Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) -- Lightweight Transport action to verify local term before fetching cluster-state from remote ([#12252](https://github.com/opensearch-project/OpenSearch/pull/12252/)) -- Integrate with admission controller for cluster-manager Read API. ([#12496](https://github.com/opensearch-project/OpenSearch/pull/12496)) ### Dependencies -- Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.1 to 2.37.1 ([#12289](https://github.com/opensearch-project/OpenSearch/pull/12289), [#12365](https://github.com/opensearch-project/OpenSearch/pull/12365)) -- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) -- Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) -- Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) -- Bump `opentelemetry` from 1.34.1 to 1.36.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388), [#12618](https://github.com/opensearch-project/OpenSearch/pull/12618)) -- Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.1 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464), [#12587](https://github.com/opensearch-project/OpenSearch/pull/12587)) -- Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) -- Bump `com.netflix.nebula.ospackage-base` from 11.8.0 to 11.8.1 ([#12461](https://github.com/opensearch-project/OpenSearch/pull/12461)) -- Bump `peter-evans/create-or-update-comment` from 3 to 4 ([#12462](https://github.com/opensearch-project/OpenSearch/pull/12462)) -- Bump `lycheeverse/lychee-action` from 1.9.1 to 1.9.3 ([#12521](https://github.com/opensearch-project/OpenSearch/pull/12521)) -- Bump `com.azure:azure-core` from 1.39.0 to 1.47.0 ([#12520](https://github.com/opensearch-project/OpenSearch/pull/12520)) -- Bump `ch.qos.logback:logback-core` from 1.2.13 to 1.5.3 ([#12519](https://github.com/opensearch-project/OpenSearch/pull/12519)) -- Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) -- Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) -- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) -- Bump Jackson version from 2.16.1 to 2.17.0 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611), [#12662](https://github.com/opensearch-project/OpenSearch/pull/12662)) -- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) -- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) -- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) -- Bump `peter-evans/create-pull-request` from 5 to 6 ([#12724](https://github.com/opensearch-project/OpenSearch/pull/12724)) -- Bump `org.apache.commons:commons-configuration2` from 2.9.0 to 2.10.0 ([#12721](https://github.com/opensearch-project/OpenSearch/pull/12721)) -- Bump `com.azure:azure-json` from 1.0.1 to 1.1.0 ([#12723](https://github.com/opensearch-project/OpenSearch/pull/12723)) ### Changed -- Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) -- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) -- Mark fuzzy filter GA and remove experimental setting ([12631](https://github.com/opensearch-project/OpenSearch/pull/12631)) -- Keep the election scheduler open until cluster state has been applied ([#11699](https://github.com/opensearch-project/OpenSearch/pull/11699)) ### Deprecated ### Removed ### Fixed -- Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) -- [Revert] [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) -- Add support of special WrappingSearchAsyncActionPhase so the onPhaseStart() will always be followed by onPhaseEnd() within AbstractSearchAsyncAction ([#12293](https://github.com/opensearch-project/OpenSearch/pull/12293)) -- Add a system property to configure YamlParser codepoint limits ([#12298](https://github.com/opensearch-project/OpenSearch/pull/12298)) -- Prevent read beyond slice boundary in ByteArrayIndexInput ([#10481](https://github.com/opensearch-project/OpenSearch/issues/10481)) -- Fix the "highlight.max_analyzer_offset" request parameter with "plain" highlighter ([#10919](https://github.com/opensearch-project/OpenSearch/pull/10919)) -- Prevent unnecessary fetch sub phase processor initialization during fetch phase execution ([#12503](https://github.com/opensearch-project/OpenSearch/pull/12503)) -- Warn about deprecated and ignored index.mapper.dynamic index setting ([#11193](https://github.com/opensearch-project/OpenSearch/pull/11193)) -- Fix `terms` query on `float` field when `doc_values` are turned off by reverting back to `FloatPoint` from `FloatField` ([#12499](https://github.com/opensearch-project/OpenSearch/pull/12499)) -- Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) -- onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed ([#12158](https://github.com/opensearch-project/OpenSearch/pull/12158)) ### Security From 208520e2062bc675fd3a890a2b097e29b99d6479 Mon Sep 17 00:00:00 2001 From: Vikas Bansal <43470111+vikasvb90@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:47:54 +0530 Subject: [PATCH 25/74] Implementing reload in encrypted blob store (#12826) Signed-off-by: vikasvb90 --- .../common/blobstore/EncryptedBlobStore.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java index a18ca8b9d5c39..c41641921c822 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -9,6 +9,7 @@ package org.opensearch.common.blobstore; import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.crypto.CryptoHandler; import org.opensearch.crypto.CryptoHandlerRegistry; import org.opensearch.crypto.CryptoRegistryException; @@ -65,6 +66,15 @@ public BlobContainer blobContainer(BlobPath path) { return new EncryptedBlobContainer<>(blobContainer, cryptoHandler); } + /** + * Reoload blobstore metadata + * @param repositoryMetadata new repository metadata + */ + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + blobStore.reload(repositoryMetadata); + } + /** * Retrieves statistics about the BlobStore. Delegates the call to the underlying BlobStore's stats() method. * From 122e944f1e8f2a774a783dc49db8ae73c1e33f9b Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Fri, 22 Mar 2024 15:50:42 +0530 Subject: [PATCH 26/74] add missing changelog entry to 2.13 release nodes (#12851) Signed-off-by: Varun Bansal --- release-notes/opensearch.release-notes-2.13.0.md | 1 + 1 file changed, 1 insertion(+) diff --git a/release-notes/opensearch.release-notes-2.13.0.md b/release-notes/opensearch.release-notes-2.13.0.md index 517092564545d..e55c22d6b851d 100644 --- a/release-notes/opensearch.release-notes-2.13.0.md +++ b/release-notes/opensearch.release-notes-2.13.0.md @@ -26,6 +26,7 @@ - Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) - Built-in secure transports support ([#12435](https://github.com/opensearch-project/OpenSearch/pull/12435)) - Lightweight Transport action to verify local term before fetching cluster-state from remote ([#12252](https://github.com/opensearch-project/OpenSearch/pull/12252/)) +- Integrate with admission controller for cluster-manager Read API. ([#12496](https://github.com/opensearch-project/OpenSearch/pull/12496)) ### Dependencies - Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) From 4010ff129310eb84addd3ac07ff769ea8cc15f4e Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Fri, 22 Mar 2024 05:41:57 -0700 Subject: [PATCH 27/74] Fix SimpleNestedExplainIT.testExplainMultipleDocs flakiness (#12776) Signed-off-by: Neetika Singhal --- .../search/nested/SimpleNestedExplainIT.java | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java index a6554271a0bc5..2efec6a63e6c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java @@ -30,6 +30,11 @@ */ public class SimpleNestedExplainIT extends OpenSearchIntegTestCase { + @Override + protected int numberOfShards() { + return 1; + } + /* * Tests the explain output for multiple docs. Concurrent search with multiple slices is tested * here as call to indexRandomForMultipleSlices is made and compared with explain output for @@ -70,7 +75,23 @@ public void testExplainMultipleDocs() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - indexRandomForMultipleSlices("test"); + client().prepareIndex("test") + .setId("2") + .setSource( + jsonBuilder().startObject() + .field("field1", "value2") + .startArray("nested1") + .startObject() + .field("n_field1", "n_value2") + .endObject() + .startObject() + .field("n_field1", "n_value2") + .endObject() + .endArray() + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); // Turn off the concurrent search setting to test search with non-concurrent search client().admin() From c04dad58bbc941b8d5f18326d5c5cfb7ac312239 Mon Sep 17 00:00:00 2001 From: Zelin Hao Date: Fri, 22 Mar 2024 06:31:59 -0700 Subject: [PATCH 28/74] Add explicit dependency to validatePom and generatePom tasks. (#12807) * Add explicit dependency to PomValidation Signed-off-by: Zelin Hao * Update CHANGELOG Signed-off-by: Zelin Hao --------- Signed-off-by: Zelin Hao --- CHANGELOG.md | 1 + .../opensearch/gradle/pluginzip/Publish.java | 9 +++++---- .../precommit/PomValidationPrecommitPlugin.java | 17 ++++++++++++----- .../gradle/precommit/PomValidationTask.java | 1 + 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index af20332c61146..74042a06c73c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) - [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) - Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- Add explicit dependency to validatePom and generatePom tasks ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12807)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 5d7e78589306f..599beb8649fcd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -65,9 +65,6 @@ public void apply(Project project) { addLocalMavenRepo(project); addZipArtifact(project); Task validatePluginZipPom = project.getTasks().findByName("validatePluginZipPom"); - if (validatePluginZipPom != null) { - validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); - } // There are number of tasks prefixed by 'publishPluginZipPublication', f.e.: // publishPluginZipPublicationToZipStagingRepository, publishPluginZipPublicationToMavenLocal @@ -76,7 +73,11 @@ public void apply(Project project) { .filter(t -> t.getName().startsWith("publishPluginZipPublicationTo")) .collect(Collectors.toSet()); if (!publishPluginZipPublicationToTasks.isEmpty()) { - publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); + if (validatePluginZipPom != null) { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn(validatePluginZipPom)); + } else { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); + } } } else { project.getLogger() diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java index 0e7a357dd5d18..62194af3c3350 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java @@ -48,17 +48,24 @@ public class PomValidationPrecommitPlugin extends PrecommitPlugin { public TaskProvider createTask(Project project) { TaskProvider validatePom = project.getTasks().register("validatePom"); PublishingExtension publishing = project.getExtensions().getByType(PublishingExtension.class); - publishing.getPublications().all(publication -> { + publishing.getPublications().configureEach(publication -> { String publicationName = Util.capitalize(publication.getName()); TaskProvider validateTask = project.getTasks() .register("validate" + publicationName + "Pom", PomValidationTask.class); validatePom.configure(t -> t.dependsOn(validateTask)); + TaskProvider generateMavenPom = project.getTasks() + .withType(GenerateMavenPom.class) + .named("generatePomFileFor" + publicationName + "Publication"); validateTask.configure(task -> { - GenerateMavenPom generateMavenPom = project.getTasks() - .withType(GenerateMavenPom.class) - .getByName("generatePomFileFor" + publicationName + "Publication"); task.dependsOn(generateMavenPom); - task.getPomFile().fileValue(generateMavenPom.getDestination()); + task.getPomFile().fileProvider(generateMavenPom.map(GenerateMavenPom::getDestination)); + publishing.getPublications().configureEach(publicationForPomGen -> { + task.mustRunAfter( + project.getTasks() + .withType(GenerateMavenPom.class) + .getByName("generatePomFileFor" + Util.capitalize(publicationForPomGen.getName()) + "Publication") + ); + }); }); }); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java index aca882fbb6477..b76e0d6dd93cf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java @@ -106,6 +106,7 @@ private void validateNonNull(String element, T value, Runnable validator) { private void validateString(String element, String value) { validateNonNull(element, value, () -> validateNonEmpty(element, value, s -> s.trim().isEmpty())); + getLogger().info(element + " with value " + value + " is validated."); } private void validateCollection(String element, Collection value, Consumer validator) { From ac0bb8344a2dcd0b1c251e450710801c78b6249f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 22 Mar 2024 12:42:50 -0400 Subject: [PATCH 29/74] Added missed API visibility annotations for public / experimental APIs (#12864) Signed-off-by: Andriy Redko --- .../java/org/opensearch/OpenSearchException.java | 4 +++- .../core/common/breaker/CircuitBreaker.java | 9 +++++++-- .../core/common/transport/BoundTransportAddress.java | 4 +++- .../opensearch/core/transport/TransportResponse.java | 4 +++- .../main/java/org/opensearch/semver/SemverRange.java | 4 ++++ .../clustermanager/ClusterManagerNodeRequest.java | 4 +++- .../org/opensearch/common/cache/RemovalListener.java | 5 ++++- .../common/cache/policy/CachedQueryResult.java | 4 ++++ .../common/cache/serializer/Serializer.java | 5 +++++ .../lucene/index/OpenSearchDirectoryReader.java | 7 ++++++- .../org/opensearch/common/metrics/MeanMetric.java | 5 ++++- .../src/main/java/org/opensearch/http/HttpInfo.java | 4 +++- .../org/opensearch/http/HttpServerTransport.java | 4 +++- .../src/main/java/org/opensearch/http/HttpStats.java | 4 +++- .../index/codec/fuzzy/LongArrayBackedBitSet.java | 2 +- .../store/RemoteSegmentStoreDirectoryFactory.java | 4 +++- .../java/org/opensearch/rest/BaseRestHandler.java | 5 +++++ .../main/java/org/opensearch/rest/RestChannel.java | 2 ++ .../main/java/org/opensearch/rest/RestHandler.java | 4 +++- .../main/java/org/opensearch/rest/RestResponse.java | 2 ++ .../org/opensearch/transport/ConnectionProfile.java | 4 +++- .../main/java/org/opensearch/transport/Header.java | 4 +++- .../org/opensearch/transport/InboundMessage.java | 4 +++- .../opensearch/transport/RequestHandlerRegistry.java | 4 +++- .../java/org/opensearch/transport/StatsTracker.java | 4 +++- .../java/org/opensearch/transport/TcpChannel.java | 7 +++++-- .../java/org/opensearch/transport/TcpTransport.java | 7 ++++++- .../java/org/opensearch/transport/Transport.java | 12 ++++++++++-- .../org/opensearch/transport/TransportChannel.java | 2 ++ .../transport/TransportMessageListener.java | 4 +++- .../transport/TransportRequestHandler.java | 4 +++- .../transport/TransportResponseHandler.java | 4 +++- .../org/opensearch/transport/TransportStats.java | 4 +++- 33 files changed, 121 insertions(+), 29 deletions(-) diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchException.java b/libs/core/src/main/java/org/opensearch/OpenSearchException.java index cce86b452f698..dda3983fbb4d1 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchException.java @@ -33,6 +33,7 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; @@ -69,8 +70,9 @@ /** * A core library base class for all opensearch exceptions. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchException extends RuntimeException implements Writeable, ToXContentFragment { protected static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0); diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java index 846950ff17c63..9a09b3b38a5f2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java @@ -32,14 +32,17 @@ package org.opensearch.core.common.breaker; +import org.opensearch.common.annotation.PublicApi; + import java.util.Locale; /** * Interface for an object that can be incremented, breaking after some * configured limit has been reached. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface CircuitBreaker { /** @@ -72,8 +75,10 @@ public interface CircuitBreaker { /** * The type of breaker * can be {@link #MEMORY}, {@link #PARENT}, or {@link #NOOP} - * @opensearch.internal + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum Type { /** A regular or ChildMemoryCircuitBreaker */ MEMORY, diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java index 8908a172395f2..e2266339c058f 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.InetAddresses; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ * the addresses the transport is bound to, and the other is the published one that represents the address clients * should communicate on. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BoundTransportAddress implements Writeable { private TransportAddress[] boundAddresses; diff --git a/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java index 038069e93a51b..4ae01e140a89c 100644 --- a/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java @@ -32,6 +32,7 @@ package org.opensearch.core.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -40,8 +41,9 @@ /** * Response over the transport interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TransportResponse extends TransportMessage { /** diff --git a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java index da87acc7124aa..da8c06c07d8e5 100644 --- a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java +++ b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java @@ -10,6 +10,7 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.semver.expr.Caret; @@ -31,7 +32,10 @@ *
  • '~' Allows for patch version variability starting from the range version. For example, ~1.2.3 range would match versions greater than or equal to 1.2.3 but less than 1.3.0
  • *
  • '^' Allows for patch and minor version variability starting from the range version. For example, ^1.2.3 range would match versions greater than or equal to 1.2.3 but less than 2.0.0
  • * + * + * @opensearch.api */ +@PublicApi(since = "2.13.0") public class SemverRange implements ToXContentFragment { private final Version rangeVersion; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java index a43d6fb0b1e7a..03fc41e829e3d 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * A based request for cluster-manager based operation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ClusterManagerNodeRequest> extends ActionRequest { public static final TimeValue DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); diff --git a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java index 369313f9f93f4..68e1cdf6139e2 100644 --- a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java +++ b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java @@ -32,11 +32,14 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Listener for removing an element from the cache * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi @FunctionalInterface public interface RemovalListener { void onRemoval(RemovalNotification notification); diff --git a/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java b/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java index 0a98542a05bb7..df698112c60d1 100644 --- a/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java +++ b/server/src/main/java/org/opensearch/common/cache/policy/CachedQueryResult.java @@ -8,6 +8,7 @@ package org.opensearch.common.cache.policy; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -62,7 +63,10 @@ public void writeToNoId(StreamOutput out) throws IOException { /** * A class containing information needed for all cache policies * to decide whether to admit a given value. + * + * @opensearch.experimental */ + @ExperimentalApi public static class PolicyValues implements Writeable { final long tookTimeNanos; // More values can be added here as they're needed for future policies diff --git a/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java b/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java index 35e28707d1ca3..46a8ed5a72ccf 100644 --- a/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java +++ b/server/src/main/java/org/opensearch/common/cache/serializer/Serializer.java @@ -8,10 +8,15 @@ package org.opensearch.common.cache.serializer; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Defines an interface for serializers, to be used by pluggable caches. * T is the class of the original object, and U is the serialized class. + * + * @opensearch.experimental */ +@ExperimentalApi public interface Serializer { /** * Serializes an object. diff --git a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java index f9a87b9e74214..ec2cfde84ca5f 100644 --- a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java +++ b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java @@ -84,8 +84,10 @@ public DelegatingCacheHelper getDelegatingCacheHelper() { /** * Wraps existing IndexReader cache helper which internally provides a way to wrap CacheKey. - * @opensearch.internal + * + * @opensearch.api */ + @PublicApi(since = "2.13.0") public class DelegatingCacheHelper implements CacheHelper { private final CacheHelper cacheHelper; private final DelegatingCacheKey serializableCacheKey; @@ -113,7 +115,10 @@ public void addClosedListener(ClosedListener listener) { /** * Wraps internal IndexReader.CacheKey and attaches a uniqueId to it which can be eventually be used instead of * object itself for serialization purposes. + * + * @opensearch.api */ + @PublicApi(since = "2.13.0") public class DelegatingCacheKey { private final CacheKey cacheKey; private final String uniqueId; diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 359facdce633b..94d44d5b35d74 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -32,13 +32,16 @@ package org.opensearch.common.metrics; +import org.opensearch.common.annotation.PublicApi; + import java.util.concurrent.atomic.LongAdder; /** * An average metric for tracking. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MeanMetric implements Metric { private final LongAdder counter = new LongAdder(); diff --git a/server/src/main/java/org/opensearch/http/HttpInfo.java b/server/src/main/java/org/opensearch/http/HttpInfo.java index 10f2d50dacb14..4a39e40c471b1 100644 --- a/server/src/main/java/org/opensearch/http/HttpInfo.java +++ b/server/src/main/java/org/opensearch/http/HttpInfo.java @@ -32,6 +32,7 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +48,9 @@ /** * Information about an http connection * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class HttpInfo implements ReportingService.Info { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(HttpInfo.class); diff --git a/server/src/main/java/org/opensearch/http/HttpServerTransport.java b/server/src/main/java/org/opensearch/http/HttpServerTransport.java index 890136cb67e60..012b69c29c1d4 100644 --- a/server/src/main/java/org/opensearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/HttpServerTransport.java @@ -32,6 +32,7 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.transport.BoundTransportAddress; @@ -42,8 +43,9 @@ /** * HTTP Transport server * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpServerTransport extends LifecycleComponent, ReportingService { String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; diff --git a/server/src/main/java/org/opensearch/http/HttpStats.java b/server/src/main/java/org/opensearch/http/HttpStats.java index 078b84b7bc563..f69eff59e830d 100644 --- a/server/src/main/java/org/opensearch/http/HttpStats.java +++ b/server/src/main/java/org/opensearch/http/HttpStats.java @@ -32,6 +32,7 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Stats for HTTP connections * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class HttpStats implements Writeable, ToXContentFragment { private final long serverOpen; diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java index bd4936aeec366..392a925c21143 100644 --- a/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java @@ -39,7 +39,7 @@ class LongArrayBackedBitSet implements Accountable, Closeable { /** * Constructor which uses Lucene's IndexInput to read the bitset into a read-only buffer. * @param in IndexInput containing the serialized bitset. - * @throws IOException + * @throws IOException I/O exception */ LongArrayBackedBitSet(IndexInput in) throws IOException { underlyingArrayLength = in.readLong(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index eca8d9ec702e1..d6d3f1fca833c 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -9,6 +9,7 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; @@ -28,8 +29,9 @@ /** * Factory for a remote store directory * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.DirectoryFactory { private static final String SEGMENTS = "segments"; diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index e18a594236fc8..3552e32022b2c 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -40,6 +40,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; @@ -73,6 +74,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class BaseRestHandler implements RestHandler { public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting( @@ -195,8 +197,11 @@ protected final String unrecognized( /** * REST requests are handled by preparing a channel consumer that represents the execution of * the request against a channel. + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") protected interface RestChannelConsumer extends CheckedConsumer {} /** diff --git a/server/src/main/java/org/opensearch/rest/RestChannel.java b/server/src/main/java/org/opensearch/rest/RestChannel.java index b8ce3e92e0098..b3ded1389f754 100644 --- a/server/src/main/java/org/opensearch/rest/RestChannel.java +++ b/server/src/main/java/org/opensearch/rest/RestChannel.java @@ -33,6 +33,7 @@ package org.opensearch.rest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,6 +45,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface RestChannel { XContentBuilder newBuilder() throws IOException; diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 294dc3ffbe329..387e8b25a8274 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -33,6 +33,7 @@ package org.opensearch.rest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.XContent; import org.opensearch.rest.RestRequest.Method; @@ -180,8 +181,9 @@ public boolean allowSystemIndexAccessByDefault() { /** * Route for the request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class Route { protected final String path; diff --git a/server/src/main/java/org/opensearch/rest/RestResponse.java b/server/src/main/java/org/opensearch/rest/RestResponse.java index 2eff746e8508c..482eb6b052e9b 100644 --- a/server/src/main/java/org/opensearch/rest/RestResponse.java +++ b/server/src/main/java/org/opensearch/rest/RestResponse.java @@ -33,6 +33,7 @@ package org.opensearch.rest; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.rest.RestStatus; @@ -49,6 +50,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RestResponse { private Map> customHeaders; diff --git a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java index b9764c0c53f4a..931707e4a1cdc 100644 --- a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java @@ -33,6 +33,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -49,8 +50,9 @@ * A connection profile describes how many connection are established to specific node for each of the available request types. * ({@link org.opensearch.transport.TransportRequestOptions.Type}). This allows to tailor a connection towards a specific usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ConnectionProfile { /** diff --git a/server/src/main/java/org/opensearch/transport/Header.java b/server/src/main/java/org/opensearch/transport/Header.java index a179cfb35288e..57c1da6f46aec 100644 --- a/server/src/main/java/org/opensearch/transport/Header.java +++ b/server/src/main/java/org/opensearch/transport/Header.java @@ -33,6 +33,7 @@ package org.opensearch.transport; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,8 +48,9 @@ /** * Transport Header * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Header { private static final String RESPONSE_NAME = "NO_ACTION_NAME_FOR_RESPONSES"; diff --git a/server/src/main/java/org/opensearch/transport/InboundMessage.java b/server/src/main/java/org/opensearch/transport/InboundMessage.java index a1ed682ff7d7f..71c4d6973505d 100644 --- a/server/src/main/java/org/opensearch/transport/InboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/InboundMessage.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; @@ -43,8 +44,9 @@ /** * Inbound data as a message * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class InboundMessage implements Releasable { private final Header header; diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index 98c182c562928..4368dbdece6cf 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.concurrent.ThreadContext; @@ -47,8 +48,9 @@ /** * Registry for OpenSearch RequestHandlers * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RequestHandlerRegistry { private final String action; diff --git a/server/src/main/java/org/opensearch/transport/StatsTracker.java b/server/src/main/java/org/opensearch/transport/StatsTracker.java index 5548d2d558ae2..02bc0a51c9330 100644 --- a/server/src/main/java/org/opensearch/transport/StatsTracker.java +++ b/server/src/main/java/org/opensearch/transport/StatsTracker.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.MeanMetric; import java.util.concurrent.atomic.LongAdder; @@ -39,8 +40,9 @@ /** * Tracks transport statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class StatsTracker { private final LongAdder bytesRead = new LongAdder(); diff --git a/server/src/main/java/org/opensearch/transport/TcpChannel.java b/server/src/main/java/org/opensearch/transport/TcpChannel.java index f98b65d0a4df1..7d4515de85d80 100644 --- a/server/src/main/java/org/opensearch/transport/TcpChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpChannel.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; @@ -45,8 +46,9 @@ * abstraction used by the {@link TcpTransport} and {@link TransportService}. All tcp transport * implementations must return channels that adhere to the required method contracts. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TcpChannel extends CloseableChannel { /** @@ -114,8 +116,9 @@ default Optional get(String name, Class clazz) { /** * Channel statistics * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ChannelStats { private volatile long lastAccessedTime; diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index d0e6516973382..7d45152089f37 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -39,6 +39,7 @@ import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.metrics.MeanMetric; @@ -111,8 +112,9 @@ /** * The TCP Transport layer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { private static final Logger logger = LogManager.getLogger(TcpTransport.class); @@ -966,7 +968,10 @@ public static Set getProfileSettings(Settings settings) { /** * Representation of a transport profile settings for a {@code transport.profiles.$profilename.*} + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class ProfileSettings { public final String profileName; public final boolean tcpNoDelay; diff --git a/server/src/main/java/org/opensearch/transport/Transport.java b/server/src/main/java/org/opensearch/transport/Transport.java index 8abedff37db14..b89393615c95f 100644 --- a/server/src/main/java/org/opensearch/transport/Transport.java +++ b/server/src/main/java/org/opensearch/transport/Transport.java @@ -58,8 +58,9 @@ /** * OpenSearch Transport Interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Transport extends LifecycleComponent { /** @@ -166,7 +167,10 @@ default Object getCacheKey() { /** * This class represents a response context that encapsulates the actual response handler, the action and the connection it was * executed on. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") final class ResponseContext { private final TransportResponseHandler handler; @@ -196,7 +200,10 @@ public String action() { /** * This class is a registry that allows + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") final class ResponseHandlers { private final ConcurrentMapLong> handlers = ConcurrentCollections .newConcurrentMapLongWithAggressiveConcurrency(); @@ -276,8 +283,9 @@ public TransportResponseHandler onResponseReceived( /** * Request handler implementations * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") final class RequestHandlers { private volatile Map> requestHandlers = Collections.emptyMap(); diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index f84ee5dc745c3..7b6715ff2c73d 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.transport.TransportResponse; import java.io.IOException; @@ -46,6 +47,7 @@ * * @opensearch.internal */ +@PublicApi(since = "1.0.0") public interface TransportChannel { Logger logger = LogManager.getLogger(TransportChannel.class); diff --git a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java index dfcd7acce3706..284c4646655c5 100644 --- a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java +++ b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java @@ -32,13 +32,15 @@ package org.opensearch.transport; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.transport.TransportResponse; /** * Listens for transport messages * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TransportMessageListener { TransportMessageListener NOOP_LISTENER = new TransportMessageListener() { diff --git a/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java b/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java index 54ee1b68fc9aa..0419c0b82029b 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequestHandler.java @@ -32,13 +32,15 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.tasks.Task; /** * Handles transport requests * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TransportRequestHandler { void messageReceived(T request, TransportChannel channel, Task task) throws Exception; diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 8992af18edb48..748d2a4d867ec 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.transport.TransportResponse; @@ -42,8 +43,9 @@ /** * Handles transport responses * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TransportResponseHandler extends Writeable.Reader { void handleResponse(T response); diff --git a/server/src/main/java/org/opensearch/transport/TransportStats.java b/server/src/main/java/org/opensearch/transport/TransportStats.java index e3c4773f4a472..01980ce529caa 100644 --- a/server/src/main/java/org/opensearch/transport/TransportStats.java +++ b/server/src/main/java/org/opensearch/transport/TransportStats.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Stats for transport activity * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TransportStats implements Writeable, ToXContentFragment { private final long serverOpen; From 8e332b6cfee70221a62426d4ae282df8dc205981 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 22 Mar 2024 16:36:17 -0400 Subject: [PATCH 30/74] Added missed API visibility annotations for public APIs and enable the check at the build time (#12872) * Added missed API visibility annotations for public APIs and enable the check at the build time Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + server/build.gradle | 2 +- .../main/java/org/opensearch/action/ActionModule.java | 4 +++- .../main/java/org/opensearch/action/ActionRequest.java | 2 ++ .../src/main/java/org/opensearch/action/ActionType.java | 2 ++ .../org/opensearch/action/support/TransportAction.java | 4 +++- .../main/java/org/opensearch/client/node/NodeClient.java | 4 +++- .../main/java/org/opensearch/common/inject/Binder.java | 4 +++- .../main/java/org/opensearch/common/inject/Binding.java | 4 +++- .../opensearch/common/inject/ConfigurationException.java | 4 +++- .../main/java/org/opensearch/common/inject/Injector.java | 5 ++++- .../src/main/java/org/opensearch/common/inject/Key.java | 4 +++- .../org/opensearch/common/inject/MembersInjector.java | 5 ++++- .../main/java/org/opensearch/common/inject/Module.java | 5 ++++- .../java/org/opensearch/common/inject/PrivateBinder.java | 4 +++- .../main/java/org/opensearch/common/inject/Provider.java | 5 ++++- .../main/java/org/opensearch/common/inject/Scope.java | 5 ++++- .../main/java/org/opensearch/common/inject/Stage.java | 5 ++++- .../java/org/opensearch/common/inject/TypeLiteral.java | 4 +++- .../common/inject/binder/AnnotatedBindingBuilder.java | 5 ++++- .../inject/binder/AnnotatedConstantBindingBuilder.java | 5 ++++- .../common/inject/binder/AnnotatedElementBuilder.java | 5 ++++- .../common/inject/binder/ConstantBindingBuilder.java | 5 ++++- .../common/inject/binder/LinkedBindingBuilder.java | 4 +++- .../common/inject/binder/ScopedBindingBuilder.java | 4 +++- .../org/opensearch/common/inject/matcher/Matcher.java | 5 ++++- .../common/inject/spi/BindingScopingVisitor.java | 4 +++- .../common/inject/spi/BindingTargetVisitor.java | 5 ++++- .../opensearch/common/inject/spi/ConstructorBinding.java | 4 +++- .../common/inject/spi/ConvertedConstantBinding.java | 4 +++- .../org/opensearch/common/inject/spi/Dependency.java | 4 +++- .../java/org/opensearch/common/inject/spi/Element.java | 4 +++- .../org/opensearch/common/inject/spi/ElementVisitor.java | 4 +++- .../org/opensearch/common/inject/spi/ExposedBinding.java | 4 +++- .../opensearch/common/inject/spi/InjectionListener.java | 5 ++++- .../org/opensearch/common/inject/spi/InjectionPoint.java | 4 +++- .../opensearch/common/inject/spi/InjectionRequest.java | 4 +++- .../opensearch/common/inject/spi/InstanceBinding.java | 4 +++- .../opensearch/common/inject/spi/LinkedKeyBinding.java | 4 +++- .../common/inject/spi/MembersInjectorLookup.java | 4 +++- .../java/org/opensearch/common/inject/spi/Message.java | 4 +++- .../opensearch/common/inject/spi/PrivateElements.java | 4 +++- .../opensearch/common/inject/spi/ProviderBinding.java | 4 +++- .../common/inject/spi/ProviderInstanceBinding.java | 4 +++- .../opensearch/common/inject/spi/ProviderKeyBinding.java | 4 +++- .../org/opensearch/common/inject/spi/ProviderLookup.java | 4 +++- .../org/opensearch/common/inject/spi/ScopeBinding.java | 4 +++- .../common/inject/spi/StaticInjectionRequest.java | 4 +++- .../org/opensearch/common/inject/spi/TypeConverter.java | 4 +++- .../common/inject/spi/TypeConverterBinding.java | 4 +++- .../org/opensearch/common/inject/spi/TypeEncounter.java | 4 +++- .../org/opensearch/common/inject/spi/TypeListener.java | 4 +++- .../common/inject/spi/TypeListenerBinding.java | 4 +++- .../opensearch/common/inject/spi/UntargettedBinding.java | 4 +++- .../extensions/rest/RestSendToExtensionAction.java | 4 ++++ server/src/main/java/org/opensearch/rest/NamedRoute.java | 4 +++- .../src/main/java/org/opensearch/rest/RestHandler.java | 7 +++++++ .../src/main/java/org/opensearch/tasks/TaskListener.java | 5 ++++- .../org/opensearch/transport/RemoteClusterService.java | 4 +++- .../org/opensearch/transport/RemoteConnectionInfo.java | 7 +++++-- .../opensearch/transport/RemoteConnectionStrategy.java | 9 ++++++++- 61 files changed, 203 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74042a06c73c3..346913f025f4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -106,6 +106,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies ### Changed +- [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) ### Deprecated diff --git a/server/build.gradle b/server/build.gradle index e36498bf1038b..bbbe93bd6e517 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -141,7 +141,7 @@ tasks.withType(JavaCompile).configureEach { compileJava { options.compilerArgs += ['-processor', ['org.apache.logging.log4j.core.config.plugins.processor.PluginProcessor', - 'org.opensearch.common.annotation.processor.ApiAnnotationProcessor'].join(','), '-AcontinueOnFailingChecks'] + 'org.opensearch.common.annotation.processor.ApiAnnotationProcessor'].join(',')] } tasks.named("internalClusterTest").configure { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index f827b7f3f0097..716e248b09692 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -294,6 +294,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.NamedRegistry; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.AbstractModule; import org.opensearch.common.inject.TypeLiteral; import org.opensearch.common.inject.multibindings.MapBinder; @@ -1052,8 +1053,9 @@ public RestController getRestController() { *

    * This class is modeled after {@link NamedRegistry} but provides both register and unregister capabilities. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.7.0") public static class DynamicActionRegistry { // This is the unmodifiable actions map created during node bootstrap, which // will continue to link ActionType and TransportAction pairs from core and plugin diff --git a/server/src/main/java/org/opensearch/action/ActionRequest.java b/server/src/main/java/org/opensearch/action/ActionRequest.java index 5313a05ad6fae..7ab87065bef7e 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequest.java +++ b/server/src/main/java/org/opensearch/action/ActionRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.transport.TransportRequest; @@ -43,6 +44,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ActionRequest extends TransportRequest { public ActionRequest() { diff --git a/server/src/main/java/org/opensearch/action/ActionType.java b/server/src/main/java/org/opensearch/action/ActionType.java index dae931bdd1891..559dad73536e1 100644 --- a/server/src/main/java/org/opensearch/action/ActionType.java +++ b/server/src/main/java/org/opensearch/action/ActionType.java @@ -32,6 +32,7 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -43,6 +44,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ActionType { private final String name; diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index 72aae210d61ae..f71347f6f1d07 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.concurrent.ThreadContext; @@ -52,8 +53,9 @@ /** * Base class for a transport action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TransportAction { public final String actionName; diff --git a/server/src/main/java/org/opensearch/client/node/NodeClient.java b/server/src/main/java/org/opensearch/client/node/NodeClient.java index 6e1bb6ce79349..5780e4c1e648a 100644 --- a/server/src/main/java/org/opensearch/client/node/NodeClient.java +++ b/server/src/main/java/org/opensearch/client/node/NodeClient.java @@ -39,6 +39,7 @@ import org.opensearch.client.Client; import org.opensearch.client.support.AbstractClient; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -53,8 +54,9 @@ /** * Client that executes actions on the local node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeClient extends AbstractClient { private DynamicActionRegistry actionRegistry; diff --git a/server/src/main/java/org/opensearch/common/inject/Binder.java b/server/src/main/java/org/opensearch/common/inject/Binder.java index a733a19608ac1..a9d16becfb5ab 100644 --- a/server/src/main/java/org/opensearch/common/inject/Binder.java +++ b/server/src/main/java/org/opensearch/common/inject/Binder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.binder.AnnotatedBindingBuilder; import org.opensearch.common.inject.binder.AnnotatedConstantBindingBuilder; import org.opensearch.common.inject.binder.LinkedBindingBuilder; @@ -198,8 +199,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @author kevinb@google.com (Kevin Bourrillion) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Binder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Binding.java b/server/src/main/java/org/opensearch/common/inject/Binding.java index 53d02e37502af..a42237697a1d2 100644 --- a/server/src/main/java/org/opensearch/common/inject/Binding.java +++ b/server/src/main/java/org/opensearch/common/inject/Binding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.spi.BindingScopingVisitor; import org.opensearch.common.inject.spi.BindingTargetVisitor; import org.opensearch.common.inject.spi.Element; @@ -69,8 +70,9 @@ * @author crazybob@google.com (Bob Lee) * @author jessewilson@google.com (Jesse Wilson) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Binding extends Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java b/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java index 4379a93482560..e3a32754a1bdb 100644 --- a/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java +++ b/server/src/main/java/org/opensearch/common/inject/ConfigurationException.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.internal.Errors; import org.opensearch.common.inject.spi.Message; @@ -46,8 +47,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ConfigurationException extends RuntimeException { private final Set messages; private Object partialValue = null; diff --git a/server/src/main/java/org/opensearch/common/inject/Injector.java b/server/src/main/java/org/opensearch/common/inject/Injector.java index ff212c6313371..772578dd6bb2c 100644 --- a/server/src/main/java/org/opensearch/common/inject/Injector.java +++ b/server/src/main/java/org/opensearch/common/inject/Injector.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + import java.util.List; /** @@ -54,8 +56,9 @@ * @author crazybob@google.com (Bob Lee) * @author jessewilson@google.com (Jesse Wilson) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Injector { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Key.java b/server/src/main/java/org/opensearch/common/inject/Key.java index cd305353a555d..32f168d18e523 100644 --- a/server/src/main/java/org/opensearch/common/inject/Key.java +++ b/server/src/main/java/org/opensearch/common/inject/Key.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.internal.Annotations; import org.opensearch.common.inject.internal.MoreTypes; import org.opensearch.common.inject.internal.ToStringBuilder; @@ -59,8 +60,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Key { private final AnnotationStrategy annotationStrategy; diff --git a/server/src/main/java/org/opensearch/common/inject/MembersInjector.java b/server/src/main/java/org/opensearch/common/inject/MembersInjector.java index 891762375d5a2..872ae883e246b 100644 --- a/server/src/main/java/org/opensearch/common/inject/MembersInjector.java +++ b/server/src/main/java/org/opensearch/common/inject/MembersInjector.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * Injects dependencies into the fields and methods on instances of type {@code T}. Ignores the * presence or absence of an injectable constructor. @@ -38,8 +40,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface MembersInjector { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Module.java b/server/src/main/java/org/opensearch/common/inject/Module.java index b1fc031192ea0..e66044ff26c40 100644 --- a/server/src/main/java/org/opensearch/common/inject/Module.java +++ b/server/src/main/java/org/opensearch/common/inject/Module.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * A module contributes configuration information, typically interface * bindings, which will be used to create an {@link Injector}. A Guice-based @@ -43,8 +45,9 @@ * Use scope and binding annotations on these methods to configure the * bindings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Module { /** diff --git a/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java b/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java index 87635880e29d8..2b6b2e0aad146 100644 --- a/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java +++ b/server/src/main/java/org/opensearch/common/inject/PrivateBinder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.binder.AnnotatedElementBuilder; /** @@ -38,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface PrivateBinder extends Binder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Provider.java b/server/src/main/java/org/opensearch/common/inject/Provider.java index 97f9e9ae503cd..988143b328828 100644 --- a/server/src/main/java/org/opensearch/common/inject/Provider.java +++ b/server/src/main/java/org/opensearch/common/inject/Provider.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * An object capable of providing instances of type {@code T}. Providers are used in numerous ways * by Guice: @@ -50,8 +52,9 @@ * @param the type of object this provides * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Provider { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Scope.java b/server/src/main/java/org/opensearch/common/inject/Scope.java index a21495f522d5e..6fb9f560981ef 100644 --- a/server/src/main/java/org/opensearch/common/inject/Scope.java +++ b/server/src/main/java/org/opensearch/common/inject/Scope.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * A scope is a level of visibility that instances provided by Guice may have. * By default, an instance created by the {@link Injector} has no scope, @@ -42,8 +44,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Scope { /** diff --git a/server/src/main/java/org/opensearch/common/inject/Stage.java b/server/src/main/java/org/opensearch/common/inject/Stage.java index d5996bd1363e9..fbb6e389ef43f 100644 --- a/server/src/main/java/org/opensearch/common/inject/Stage.java +++ b/server/src/main/java/org/opensearch/common/inject/Stage.java @@ -29,13 +29,16 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * The stage we're running in. * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum Stage { /** diff --git a/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java b/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java index f0cca2990b407..8ac04e5d0ac1d 100644 --- a/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java +++ b/server/src/main/java/org/opensearch/common/inject/TypeLiteral.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.internal.MoreTypes; import org.opensearch.common.inject.util.Types; @@ -77,8 +78,9 @@ * @author crazybob@google.com (Bob Lee) * @author jessewilson@google.com (Jesse Wilson) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TypeLiteral { final Class rawType; diff --git a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java index bcd593a8cbf7b..5c3c6eac9bd3a 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedBindingBuilder.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.Annotation; /** @@ -36,8 +38,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AnnotatedBindingBuilder extends LinkedBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java index 42c208a2b37ea..71ea1ba0a5207 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedConstantBindingBuilder.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.Annotation; /** @@ -36,8 +38,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AnnotatedConstantBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java index f2d0916790b6b..54fcb915d83c9 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/AnnotatedElementBuilder.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.Annotation; /** @@ -37,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AnnotatedElementBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java index 595c477d3e28b..feaee3ed59f46 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/ConstantBindingBuilder.java @@ -29,11 +29,14 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; + /** * Binds to a constant value. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ConstantBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java index 2368fef16471c..e8c4b197253b5 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/LinkedBindingBuilder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; import org.opensearch.common.inject.TypeLiteral; @@ -38,8 +39,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LinkedBindingBuilder extends ScopedBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java b/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java index 73dd4414f17a2..c360b9571bc4a 100644 --- a/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java +++ b/server/src/main/java/org/opensearch/common/inject/binder/ScopedBindingBuilder.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.binder; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Scope; import java.lang.annotation.Annotation; @@ -38,8 +39,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ScopedBindingBuilder { /** diff --git a/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java b/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java index 21bb63cfef097..4e254f8641350 100644 --- a/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java +++ b/server/src/main/java/org/opensearch/common/inject/matcher/Matcher.java @@ -29,13 +29,16 @@ package org.opensearch.common.inject.matcher; +import org.opensearch.common.annotation.PublicApi; + /** * Returns {@code true} or {@code false} for a given input. * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Matcher { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java b/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java index d7c7d9d65051d..b4fbdf2fdb72b 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/BindingScopingVisitor.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Scope; import java.lang.annotation.Annotation; @@ -40,8 +41,9 @@ * {@code return null} if no return type is needed. * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BindingScopingVisitor { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java b/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java index 91df812b58ac4..9543e731308bd 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/BindingTargetVisitor.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; + /** * Visits each of the strategies used to find an instance to satisfy an injection. * @@ -36,8 +38,9 @@ * {@code return null} if no return type is needed. * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BindingTargetVisitor { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java index 997bf78234fd1..8eec6cefe53c7 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ConstructorBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import java.util.Set; @@ -40,8 +41,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ConstructorBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java index e8d6b346f8596..a07da68a88931 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ConvertedConstantBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; @@ -41,8 +42,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ConvertedConstantBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java b/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java index be1336ad0f297..e541ba0b73bf5 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/Dependency.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Key; import java.util.HashSet; @@ -47,8 +48,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Dependency { private final InjectionPoint injectionPoint; private final Key key; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/Element.java b/server/src/main/java/org/opensearch/common/inject/spi/Element.java index 660aca1bd45ab..58a696fb7ffa9 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/Element.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/Element.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; /** @@ -43,8 +44,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java b/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java index d415560fc03c8..b88f11b9378aa 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ElementVisitor.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; /** @@ -38,8 +39,9 @@ * {@code return null} if no return type is needed. * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ElementVisitor { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java index d2563bc2728cd..6c1679432abe5 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ExposedBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Binding; @@ -38,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ExposedBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java b/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java index 7a760d2b84e9f..878e919cda4cc 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InjectionListener.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; + /** * Listens for injections into instances of type {@code I}. Useful for performing further * injections, post-injection initialization, and more. @@ -37,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface InjectionListener { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java index c88b2281107ed..542cbd780a8b6 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InjectionPoint.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.ConfigurationException; import org.opensearch.common.inject.Inject; import org.opensearch.common.inject.Key; @@ -66,8 +67,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class InjectionPoint { private final boolean optional; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java b/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java index 6ce5febbb6711..a5faca6264424 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InjectionRequest.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.ConfigurationException; import org.opensearch.common.inject.TypeLiteral; @@ -46,8 +47,9 @@ * @author mikeward@google.com (Mike Ward) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class InjectionRequest implements Element { private final Object source; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java index fd7c1303ed6fc..f73b284ae2e8c 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/InstanceBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import java.util.Set; @@ -39,8 +40,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface InstanceBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java index 10b270e499603..01da905f8da47 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/LinkedKeyBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; @@ -38,8 +39,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LinkedKeyBinding extends Binding { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java b/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java index 1f652708de875..b8a07146812c1 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/MembersInjectorLookup.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.MembersInjector; import org.opensearch.common.inject.TypeLiteral; @@ -45,8 +46,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MembersInjectorLookup implements Element { private final Object source; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/Message.java b/server/src/main/java/org/opensearch/common/inject/spi/Message.java index 78829e82c150e..13184a7d82f0c 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/Message.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/Message.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.internal.Errors; import org.opensearch.common.inject.internal.SourceProvider; @@ -50,8 +51,9 @@ * * @author crazybob@google.com (Bob Lee) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Message implements Element { private final String message; private final Throwable cause; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java b/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java index e4d86a356cd53..6330cbe33de58 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/PrivateElements.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Injector; import org.opensearch.common.inject.Key; @@ -42,8 +43,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface PrivateElements extends Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java index 0a63fefc0a9e9..dd55e9805843f 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; @@ -40,8 +41,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ProviderBinding> extends Binding { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java index 654f40e627e4b..25bac3b5df34c 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderInstanceBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Provider; @@ -41,8 +42,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ProviderInstanceBinding extends Binding, HasDependencies { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java index 6f1ae8f2b9a03..f68e1662ad124 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderKeyBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; @@ -40,8 +41,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ProviderKeyBinding extends Binding { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java b/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java index 16060ddd3e222..6afe7346a1431 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ProviderLookup.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Provider; @@ -45,8 +46,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ProviderLookup implements Element { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java index 7a619456e06e3..ca03f4291a062 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/ScopeBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Scope; @@ -46,8 +47,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ScopeBinding implements Element { private final Object source; private final Class annotationType; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java b/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java index 494e35e6c4490..c426639d85cab 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/StaticInjectionRequest.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.ConfigurationException; @@ -45,8 +46,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class StaticInjectionRequest implements Element { private final Object source; private final Class type; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java index 93a0f607ddc27..2386c1e528db6 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverter.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.TypeLiteral; /** @@ -37,8 +38,9 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TypeConverter { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java index 00b8c7c013b5a..59311de0fb3f5 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeConverterBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.TypeLiteral; import org.opensearch.common.inject.matcher.Matcher; @@ -45,8 +46,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TypeConverterBinding implements Element { private final Object source; private final Matcher> typeMatcher; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java index e06751668c0f1..61756a5bcad95 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeEncounter.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.MembersInjector; import org.opensearch.common.inject.Provider; @@ -43,8 +44,9 @@ * @param the injectable type encountered * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @SuppressWarnings("overloads") public interface TypeEncounter { diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java index fd7004aa80df0..3157fa15f471b 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeListener.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.TypeLiteral; /** @@ -43,8 +44,9 @@ * * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TypeListener { /** diff --git a/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java index 505028f09232d..4ddcf3fc11bc1 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/TypeListenerBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.TypeLiteral; import org.opensearch.common.inject.matcher.Matcher; @@ -42,8 +43,9 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TypeListenerBinding implements Element { private final Object source; diff --git a/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java b/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java index 37e40d45cb5a9..56890efdfcd8d 100644 --- a/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java +++ b/server/src/main/java/org/opensearch/common/inject/spi/UntargettedBinding.java @@ -29,6 +29,7 @@ package org.opensearch.common.inject.spi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Binding; /** @@ -38,6 +39,7 @@ * @author jessewilson@google.com (Jesse Wilson) * @since 2.0 * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface UntargettedBinding extends Binding {} diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 41783b89ccc69..f4503ce55e6bc 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; @@ -52,7 +53,10 @@ /** * An action that forwards REST requests to an extension + * + * @opensearch.experimental */ +@ExperimentalApi public class RestSendToExtensionAction extends BaseRestHandler { private static final String SEND_TO_EXTENSION_ACTION = "send_to_extension_action"; diff --git a/server/src/main/java/org/opensearch/rest/NamedRoute.java b/server/src/main/java/org/opensearch/rest/NamedRoute.java index 109f688a4924e..c2b3ea5fdeaaf 100644 --- a/server/src/main/java/org/opensearch/rest/NamedRoute.java +++ b/server/src/main/java/org/opensearch/rest/NamedRoute.java @@ -9,6 +9,7 @@ package org.opensearch.rest; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.transport.TransportService; import java.util.HashSet; @@ -20,8 +21,9 @@ /** * A named Route * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class NamedRoute extends RestHandler.Route { private static final String VALID_ACTION_NAME_PATTERN = "^[a-zA-Z0-9:/*_]*$"; diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 387e8b25a8274..877afdd951088 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -47,6 +47,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface RestHandler { @@ -233,7 +234,10 @@ public boolean equals(Object o) { /** * Represents an API that has been deprecated and is slated for removal. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") class DeprecatedRoute extends Route { private final String deprecationMessage; @@ -251,7 +255,10 @@ public String getDeprecationMessage() { /** * Represents an API that has had its {@code path} or {@code method} changed. Holds both the * new and previous {@code path} and {@code method} combination. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ReplacedRoute extends Route { private final String deprecatedPath; diff --git a/server/src/main/java/org/opensearch/tasks/TaskListener.java b/server/src/main/java/org/opensearch/tasks/TaskListener.java index 97df8eacee584..7ec08984a7f07 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskListener.java +++ b/server/src/main/java/org/opensearch/tasks/TaskListener.java @@ -32,11 +32,14 @@ package org.opensearch.tasks; +import org.opensearch.common.annotation.PublicApi; + /** * Listener for Task success or failure. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TaskListener { /** * Handle task response. This response may constitute a failure or a success diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java index 87786fb22f22e..dc729053c2148 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java @@ -40,6 +40,7 @@ import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -73,8 +74,9 @@ /** * Basic service for accessing remote clusters via gateway nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RemoteClusterService extends RemoteClusterAware implements Closeable { private final Logger logger = LogManager.getLogger(RemoteClusterService.class); diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java index 280dd958358fd..b5f356490d3d4 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java @@ -32,6 +32,7 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * This class encapsulates all remote cluster information to be rendered on * {@code _remote/info} requests. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RemoteConnectionInfo implements ToXContentFragment, Writeable { final ModeInfo modeInfo; @@ -132,8 +134,9 @@ public int hashCode() { /** * Mode information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface ModeInfo extends ToXContentFragment, Writeable { boolean isConnected(); diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java index f0b37c617725e..f2c159d1380e8 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java @@ -38,6 +38,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -75,7 +76,13 @@ */ public abstract class RemoteConnectionStrategy implements TransportConnectionListener, Closeable { - enum ConnectionStrategy { + /** + * Strategy to connect to remote nodes + * + * @opensearch.api + */ + @PublicApi(since = "1.0.0") + public enum ConnectionStrategy { SNIFF( SniffConnectionStrategy.CHANNELS_PER_CONNECTION, SniffConnectionStrategy::enablementSettings, From 5b4b4aa4c282d06a93de72a5c07a54a1524b04ff Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Fri, 22 Mar 2024 20:19:03 -0500 Subject: [PATCH 31/74] Make GetTermVersionAction internal (#12866) * Make GetTermVersionAction internal GetTermVersion action has been added to check if cluster state needs to be updated as a performance improvement on all cluster state checks. The Authorization systems in the Security Plugin check all actions that do not start with `internal:` for permissions causing users without `cluster:monitor/*` permissions to start getting 403 exceptions. This change signals that this action does not require an authorization check by any security systems since it cannot be called via REST APIs. Signed-off-by: Peter Nied * Move TermVerson action outside of admin/cluster namespace Signed-off-by: Peter Nied --------- Signed-off-by: Peter Nied --- .../support/clustermanager/term}/FetchByTermVersionIT.java | 4 +--- .../src/main/java/org/opensearch/action/ActionModule.java | 4 ++-- .../clustermanager/TransportClusterManagerNodeAction.java | 6 +++--- .../clustermanager}/term/GetTermVersionAction.java | 4 ++-- .../clustermanager}/term/GetTermVersionRequest.java | 2 +- .../clustermanager}/term/GetTermVersionResponse.java | 2 +- .../clustermanager}/term/TransportGetTermVersionAction.java | 2 +- .../state => support/clustermanager}/term/package-info.java | 2 +- .../TransportClusterManagerTermCheckTests.java | 6 +++--- .../clustermanager}/term/ClusterTermVersionIT.java | 2 +- .../clustermanager}/term/ClusterTermVersionTests.java | 2 +- .../org/opensearch/snapshots/SnapshotResiliencyTests.java | 4 ++-- 12 files changed, 19 insertions(+), 21 deletions(-) rename server/src/internalClusterTest/java/org/opensearch/{cluster/state => action/support/clustermanager/term}/FetchByTermVersionIT.java (97%) rename server/src/main/java/org/opensearch/action/{admin/cluster/state => support/clustermanager}/term/GetTermVersionAction.java (83%) rename server/src/main/java/org/opensearch/action/{admin/cluster/state => support/clustermanager}/term/GetTermVersionRequest.java (93%) rename server/src/main/java/org/opensearch/action/{admin/cluster/state => support/clustermanager}/term/GetTermVersionResponse.java (96%) rename server/src/main/java/org/opensearch/action/{admin/cluster/state => support/clustermanager}/term/TransportGetTermVersionAction.java (97%) rename server/src/main/java/org/opensearch/action/{admin/cluster/state => support/clustermanager}/term/package-info.java (80%) rename server/src/test/java/org/opensearch/action/{admin/cluster/state => support/clustermanager}/term/ClusterTermVersionIT.java (98%) rename server/src/test/java/org/opensearch/action/{admin/cluster/state => support/clustermanager}/term/ClusterTermVersionTests.java (93%) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/state/FetchByTermVersionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/term/FetchByTermVersionIT.java similarity index 97% rename from server/src/internalClusterTest/java/org/opensearch/cluster/state/FetchByTermVersionIT.java rename to server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/term/FetchByTermVersionIT.java index cef184b3fddf9..72dcc98dcdc12 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/state/FetchByTermVersionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/term/FetchByTermVersionIT.java @@ -6,12 +6,10 @@ * compatible open source license. */ -package org.opensearch.cluster.state; +package org.opensearch.action.support.clustermanager.term; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionResponse; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.coordination.ClusterStateTermVersion; diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 716e248b09692..5e2b62614fc47 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -107,8 +107,6 @@ import org.opensearch.action.admin.cluster.snapshots.status.TransportSnapshotsStatusAction; import org.opensearch.action.admin.cluster.state.ClusterStateAction; import org.opensearch.action.admin.cluster.state.TransportClusterStateAction; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; -import org.opensearch.action.admin.cluster.state.term.TransportGetTermVersionAction; import org.opensearch.action.admin.cluster.stats.ClusterStatsAction; import org.opensearch.action.admin.cluster.stats.TransportClusterStatsAction; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; @@ -283,6 +281,8 @@ import org.opensearch.action.support.AutoCreateIndex; import org.opensearch.action.support.DestructiveOperations; import org.opensearch.action.support.TransportAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.TransportGetTermVersionAction; import org.opensearch.action.termvectors.MultiTermVectorsAction; import org.opensearch.action.termvectors.TermVectorsAction; import org.opensearch.action.termvectors.TransportMultiTermVectorsAction; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 5f57658e33924..080b0d607e991 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -37,13 +37,13 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ActionRunnable; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionRequest; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionResponse; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.RetryableAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionRequest; +import org.opensearch.action.support.clustermanager.term.GetTermVersionResponse; import org.opensearch.cluster.ClusterManagerNodeChangePredicate; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionAction.java similarity index 83% rename from server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionAction.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionAction.java index 3344fd549b23f..2401dddd0cab3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionAction.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.action.admin.cluster.state.term; +package org.opensearch.action.support.clustermanager.term; import org.opensearch.action.ActionType; @@ -18,7 +18,7 @@ public class GetTermVersionAction extends ActionType { public static final GetTermVersionAction INSTANCE = new GetTermVersionAction(); - public static final String NAME = "cluster:monitor/term"; + public static final String NAME = "internal:monitor/term"; private GetTermVersionAction() { super(NAME, GetTermVersionResponse::new); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionRequest.java similarity index 93% rename from server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionRequest.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionRequest.java index b099f8087bd15..507997a1f7e7a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionRequest.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.action.admin.cluster.state.term; +package org.opensearch.action.support.clustermanager.term; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionResponse.java similarity index 96% rename from server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionResponse.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionResponse.java index 16b355a80d1f2..0906abe57d547 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/GetTermVersionResponse.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/GetTermVersionResponse.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.action.admin.cluster.state.term; +package org.opensearch.action.support.clustermanager.term; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.ClusterStateTermVersion; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/TransportGetTermVersionAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java similarity index 97% rename from server/src/main/java/org/opensearch/action/admin/cluster/state/term/TransportGetTermVersionAction.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java index 88305252aa99c..4752a99c910e4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/TransportGetTermVersionAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.action.admin.cluster.state.term; +package org.opensearch.action.support.clustermanager.term; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/package-info.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/package-info.java similarity index 80% rename from server/src/main/java/org/opensearch/action/admin/cluster/state/term/package-info.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/term/package-info.java index 0ee559c527d7d..229c405df2d7c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/term/package-info.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/package-info.java @@ -7,4 +7,4 @@ */ /** Cluster Term transport handler. */ -package org.opensearch.action.admin.cluster.state.term; +package org.opensearch.action.support.clustermanager.term; diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java index 8c7b7a0940c82..6d118cbccb42d 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerTermCheckTests.java @@ -33,10 +33,10 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.ThreadedActionListener; +import org.opensearch.action.support.clustermanager.term.GetTermVersionResponse; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -223,7 +223,7 @@ public void testTermCheckMatchWithClusterManager() throws ExecutionException, In assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; assertTrue(capturedRequest.node.isClusterManagerNode()); - assertThat(capturedRequest.action, equalTo("cluster:monitor/term")); + assertThat(capturedRequest.action, equalTo("internal:monitor/term")); GetTermVersionResponse response = new GetTermVersionResponse( new ClusterStateTermVersion( clusterService.state().getClusterName(), @@ -249,7 +249,7 @@ public void testTermCheckNoMatchWithClusterManager() throws ExecutionException, assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest termCheckRequest = transport.capturedRequests()[0]; assertTrue(termCheckRequest.node.isClusterManagerNode()); - assertThat(termCheckRequest.action, equalTo("cluster:monitor/term")); + assertThat(termCheckRequest.action, equalTo("internal:monitor/term")); GetTermVersionResponse noMatchResponse = new GetTermVersionResponse( new ClusterStateTermVersion( clusterService.state().getClusterName(), diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionIT.java b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionIT.java similarity index 98% rename from server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionIT.java rename to server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionIT.java index fa2a6121af349..7b783e025a575 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionIT.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionIT.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.action.admin.cluster.state.term; +package org.opensearch.action.support.clustermanager.term; import org.opensearch.action.admin.cluster.state.ClusterStateAction; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionTests.java similarity index 93% rename from server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionTests.java rename to server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionTests.java index 22d9623eebdbe..23ae8c6a58776 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/term/ClusterTermVersionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/term/ClusterTermVersionTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.action.admin.cluster.state.term; +package org.opensearch.action.support.clustermanager.term; import org.opensearch.cluster.service.ClusterService; import org.opensearch.test.OpenSearchSingleNodeTestCase; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 58315ba031b84..e9bbf3d861408 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -66,8 +66,6 @@ import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.cluster.state.TransportClusterStateAction; -import org.opensearch.action.admin.cluster.state.term.GetTermVersionAction; -import org.opensearch.action.admin.cluster.state.term.TransportGetTermVersionAction; import org.opensearch.action.admin.indices.create.CreateIndexAction; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; @@ -105,6 +103,8 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.TransportAction; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.TransportGetTermVersionAction; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.update.UpdateHelper; import org.opensearch.client.AdminClient; From 9267170e77c3e523a877fae8d7688be72ebccffe Mon Sep 17 00:00:00 2001 From: Zelin Hao Date: Mon, 25 Mar 2024 06:03:54 -0700 Subject: [PATCH 32/74] Replace configureEach with all for publication iteration (#12876) * Change from configureEach to all to resolve incompatibility Signed-off-by: Zelin Hao * Update Changelog Signed-off-by: Zelin Hao --------- Signed-off-by: Zelin Hao --- CHANGELOG.md | 3 ++- .../gradle/precommit/PomValidationPrecommitPlugin.java | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 346913f025f4d..882a46d60a191 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,7 +19,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) - [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) - Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) -- Add explicit dependency to validatePom and generatePom tasks ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12807)) +- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) +- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java index 62194af3c3350..d3f173c9c02ea 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationPrecommitPlugin.java @@ -48,7 +48,7 @@ public class PomValidationPrecommitPlugin extends PrecommitPlugin { public TaskProvider createTask(Project project) { TaskProvider validatePom = project.getTasks().register("validatePom"); PublishingExtension publishing = project.getExtensions().getByType(PublishingExtension.class); - publishing.getPublications().configureEach(publication -> { + publishing.getPublications().all(publication -> { String publicationName = Util.capitalize(publication.getName()); TaskProvider validateTask = project.getTasks() .register("validate" + publicationName + "Pom", PomValidationTask.class); @@ -59,7 +59,7 @@ public TaskProvider createTask(Project project) { validateTask.configure(task -> { task.dependsOn(generateMavenPom); task.getPomFile().fileProvider(generateMavenPom.map(GenerateMavenPom::getDestination)); - publishing.getPublications().configureEach(publicationForPomGen -> { + publishing.getPublications().all(publicationForPomGen -> { task.mustRunAfter( project.getTasks() .withType(GenerateMavenPom.class) From 968f99752a313edb241cd64162b456e7042784bb Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 25 Mar 2024 10:56:29 -0400 Subject: [PATCH 33/74] Bump OpenSearch version in README for open issues (#12895) Signed-off-by: Craig Perkins --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b5fc45509b002..748f8a366ecc8 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.10 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.10.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.10.0") -[![3.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") +[![2.14.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.14.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.14.0") +[![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) [![GHA precommit](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml) From b98573787a270b5168705c8c00c3600d8f698ecc Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 25 Mar 2024 12:18:24 -0400 Subject: [PATCH 34/74] Update to Gradle 8.7 (#12444) Signed-off-by: Andriy Redko --- .../java/org/opensearch/gradle/test/GradleThreadsFilter.java | 4 +++- gradle/wrapper/gradle-wrapper.properties | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java index def5248c1f255..0ede465439400 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java @@ -45,6 +45,8 @@ public class GradleThreadsFilter implements ThreadFilter { public boolean reject(Thread t) { return t.getName().startsWith("Exec process") || t.getName().startsWith("Memory manager") - || t.getName().startsWith("File watcher consumer"); + || t.getName().startsWith("File watcher consumer") + || t.getName().startsWith("sshd-SshClient") /* Started by SshClient (sshd-core), part of SftpFileSystemProvider */ + || t.getName().startsWith("Thread-"); /* Started by AbstractFactoryManager (sshd-core), part of SftpFileSystemProvider */ } } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 82a4add334a7d..9b0d73222260e 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d +distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0 From 52ae79d017a8e961cc1bc81fa7eafcc59689a447 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 25 Mar 2024 09:37:33 -0700 Subject: [PATCH 35/74] Disable MockTelemetryPlugin by default for integ tests (#12877) With this plugin enabled we see significant increase in memory usage. This is due to a static map that collects Span entries per write/replication request. Tests with higher counts of independent writes are at risk of running oom or experiencing slow down / warnings related to this. Disabled the plugin by default until this is resolved given it is still under FeatureFlag. Signed-off-by: Marc Handalian --- .../main/java/org/opensearch/test/OpenSearchIntegTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 7cb1b3f4fe0d8..664314245530e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2070,7 +2070,8 @@ protected boolean addMockGeoShapeFieldMapper() { * @return boolean. */ protected boolean addMockTelemetryPlugin() { - return true; + // setting to false until https://github.com/opensearch-project/OpenSearch/issues/12615 is resolved + return false; } /** From 70711cfde74a00249e323ccc4198db657b9d9303 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Mon, 25 Mar 2024 11:32:01 -0700 Subject: [PATCH 36/74] [DerivedFields] DerivedFieldScript and query execution logic (#12746) First in a series of commits to support derived fields, a form of schema-on-read. This commit adds: 1. DerivedFieldScript factory: This script factory will be used to execute scripts defined against derived fields of any type. 2. DerivedFieldValueFetcher: The value fetcher contains logic to execute script and fetch the value in form of List. It expects DerivedFieldScript.LeafFactory as an input and sets the contract with consumer to call setNextReader() whenever a segment is switched. 3. DerivedFieldQuery: This query will be used by any of the derived fields. It expects an input query and DerivedFieldValueFetcher. It uses 2-phase iterator approach with approximation iterator set to match all docs. On a match, it creates a lucene MemoryIndex for a given doc, fetches the value of the derived field from _source using DerivedFieldValueFetcher and executes the input query against. --------- Signed-off-by: Rishabh Maurya --- .../test/painless/71_context_api.yml | 2 +- .../mapper/DerivedFieldValueFetcher.java | 45 ++++++ .../index/query/DerivedFieldQuery.java | 146 ++++++++++++++++++ .../opensearch/script/DerivedFieldScript.java | 104 +++++++++++++ .../org/opensearch/script/ScriptModule.java | 3 +- .../index/query/DerivedFieldQueryTests.java | 104 +++++++++++++ .../opensearch/script/MockScriptEngine.java | 15 ++ 7 files changed, 417 insertions(+), 2 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java create mode 100644 server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java create mode 100644 server/src/main/java/org/opensearch/script/DerivedFieldScript.java create mode 100644 server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml index 478ca9ae8abf4..20e6fd351a4b9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml @@ -2,7 +2,7 @@ - do: scripts_painless_context: {} - match: { contexts.0: aggregation_selector} - - match: { contexts.23: update} + - match: { contexts.24: update} --- "Action to get all API values for score context": diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java new file mode 100644 index 0000000000000..f3bf0c613415a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.List; + +/** + * The value fetcher contains logic to execute script and fetch the value in form of list of object. + * It expects DerivedFieldScript.LeafFactory as an input and sets the contract with consumer to call + * {@link #setNextReader(LeafReaderContext)} whenever a segment is switched. + */ +public final class DerivedFieldValueFetcher implements ValueFetcher { + private DerivedFieldScript derivedFieldScript; + private final DerivedFieldScript.LeafFactory derivedFieldScriptFactory; + + public DerivedFieldValueFetcher(DerivedFieldScript.LeafFactory derivedFieldScriptFactory) { + this.derivedFieldScriptFactory = derivedFieldScriptFactory; + } + + @Override + public List fetchValues(SourceLookup lookup) { + derivedFieldScript.setDocument(lookup.docId()); + // TODO: remove List.of() when derivedFieldScript.execute() returns list of objects. + return List.of(derivedFieldScript.execute()); + } + + public void setNextReader(LeafReaderContext context) { + try { + derivedFieldScript = derivedFieldScriptFactory.newInstance(context); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java new file mode 100644 index 0000000000000..8beef0bf46be0 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +/** + * DerivedFieldQuery used for querying derived fields. It contains the logic to execute an input lucene query against + * DerivedField. It also accepts DerivedFieldValueFetcher and SearchLookup as an input. + */ +public final class DerivedFieldQuery extends Query { + private final Query query; + private final DerivedFieldValueFetcher valueFetcher; + private final SearchLookup searchLookup; + private final Function indexableFieldGenerator; + private final Analyzer indexAnalyzer; + + /** + * @param query lucene query to be executed against the derived field + * @param valueFetcher DerivedFieldValueFetcher ValueFetcher to fetch the value of a derived field from _source + * using LeafSearchLookup + * @param searchLookup SearchLookup to get the LeafSearchLookup look used by valueFetcher to fetch the _source + * @param indexableFieldGenerator used to generate lucene IndexableField from a given object fetched by valueFetcher + * to be used in lucene memory index. + */ + public DerivedFieldQuery( + Query query, + DerivedFieldValueFetcher valueFetcher, + SearchLookup searchLookup, + Function indexableFieldGenerator, + Analyzer indexAnalyzer + ) { + this.query = query; + this.valueFetcher = valueFetcher; + this.searchLookup = searchLookup; + this.indexableFieldGenerator = indexableFieldGenerator; + this.indexAnalyzer = indexAnalyzer; + } + + @Override + public void visit(QueryVisitor visitor) { + query.visit(visitor); + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + Query rewritten = indexSearcher.rewrite(query); + if (rewritten == query) { + return this; + } + return new DerivedFieldQuery(rewritten, valueFetcher, searchLookup, indexableFieldGenerator, indexAnalyzer); + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + + return new ConstantScoreWeight(this, boost) { + @Override + public Scorer scorer(LeafReaderContext context) { + DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc()); + valueFetcher.setNextReader(context); + LeafSearchLookup leafSearchLookup = searchLookup.getLeafSearchLookup(context); + TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { + @Override + public boolean matches() { + leafSearchLookup.source().setSegmentAndDocument(context, approximation.docID()); + List values = valueFetcher.fetchValues(leafSearchLookup.source()); + // TODO: in case of errors from script, should it be ignored and treated as missing field + // by using a configurable setting? + MemoryIndex memoryIndex = new MemoryIndex(); + for (Object value : values) { + memoryIndex.addField(indexableFieldGenerator.apply(value), indexAnalyzer); + } + float score = memoryIndex.search(query); + return score > 0.0f; + } + + @Override + public float matchCost() { + // TODO: how can we compute this? + return 1000f; + } + }; + return new ConstantScoreScorer(this, score(), scoreMode, twoPhase); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return false; + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (sameClassAs(o) == false) { + return false; + } + DerivedFieldQuery other = (DerivedFieldQuery) o; + return Objects.equals(this.query, other.query) + && Objects.equals(this.valueFetcher, other.valueFetcher) + && Objects.equals(this.searchLookup, other.searchLookup) + && Objects.equals(this.indexableFieldGenerator, other.indexableFieldGenerator) + && Objects.equals(this.indexAnalyzer, other.indexAnalyzer); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), query, valueFetcher, searchLookup, indexableFieldGenerator, indexableFieldGenerator); + } + + @Override + public String toString(String f) { + return "DerivedFieldQuery (Query: [ " + query.toString(f) + "])"; + } +} diff --git a/server/src/main/java/org/opensearch/script/DerivedFieldScript.java b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java new file mode 100644 index 0000000000000..7f5b991950ec6 --- /dev/null +++ b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.script; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +/** + * Definition of Script for DerivedField. + * It will be used to execute scripts defined against derived fields of any type + * + * @opensearch.internal + */ +public abstract class DerivedFieldScript { + + public static final String[] PARAMETERS = {}; + public static final ScriptContext CONTEXT = new ScriptContext<>("derived_field", Factory.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); + + private static final Map> PARAMS_FUNCTIONS = Map.of( + "doc", + value -> value, + "_source", + value -> ((SourceLookup) value).loadSourceIfNeeded() + ); + + /** + * The generic runtime parameters for the script. + */ + private final Map params; + + /** + * A leaf lookup for the bound segment this script will operate on. + */ + private final LeafSearchLookup leafLookup; + + public DerivedFieldScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { + Map parameters = new HashMap<>(params); + this.leafLookup = lookup.getLeafSearchLookup(leafContext); + parameters.putAll(leafLookup.asMap()); + this.params = new DynamicMap(parameters, PARAMS_FUNCTIONS); + } + + protected DerivedFieldScript() { + params = null; + leafLookup = null; + } + + /** + * Return the parameters for this script. + */ + public Map getParams() { + return params; + } + + /** + * The doc lookup for the Lucene segment this script was created for. + */ + public Map> getDoc() { + return leafLookup.doc(); + } + + /** + * Set the current document to run the script on next. + */ + public void setDocument(int docid) { + leafLookup.setDocument(docid); + } + + public abstract Object execute(); + + /** + * A factory to construct {@link DerivedFieldScript} instances. + * + * @opensearch.internal + */ + public interface LeafFactory { + DerivedFieldScript newInstance(LeafReaderContext ctx) throws IOException; + } + + /** + * A factory to construct stateful {@link DerivedFieldScript} factories for a specific index. + * + * @opensearch.internal + */ + public interface Factory extends ScriptFactory { + LeafFactory newFactory(Map params, SearchLookup lookup); + } +} diff --git a/server/src/main/java/org/opensearch/script/ScriptModule.java b/server/src/main/java/org/opensearch/script/ScriptModule.java index a192e9553016b..c83a6e64d53eb 100644 --- a/server/src/main/java/org/opensearch/script/ScriptModule.java +++ b/server/src/main/java/org/opensearch/script/ScriptModule.java @@ -78,7 +78,8 @@ public class ScriptModule { ScriptedMetricAggContexts.MapScript.CONTEXT, ScriptedMetricAggContexts.CombineScript.CONTEXT, ScriptedMetricAggContexts.ReduceScript.CONTEXT, - IntervalFilterScript.CONTEXT + IntervalFilterScript.CONTEXT, + DerivedFieldScript.CONTEXT ).collect(Collectors.toMap(c -> c.name, Function.identity())); } diff --git a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java new file mode 100644 index 0000000000000..18d117fa8c0f5 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldQueryTests extends OpenSearchTestCase { + + private static final String[][] raw_requests = new String[][] { + { "40.135.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "40.135.0.0" }, + { "232.0.0.0 GET /images/hm_bg.jpg HTTP/1.0", "400", "232.0.0.0" }, + { "26.1.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "26.1.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" } }; + + public void testDerivedField() throws IOException { + // Create lucene documents + List docs = new ArrayList<>(); + for (String[] request : raw_requests) { + Document document = new Document(); + document.add(new TextField("raw_request", request[0], Field.Store.YES)); + document.add(new KeywordField("status", request[1], Field.Store.YES)); + docs.add(document); + } + + // Mock SearchLookup + SearchLookup searchLookup = mock(SearchLookup.class); + SourceLookup sourceLookup = new SourceLookup(); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(leafLookup.source()).thenReturn(sourceLookup); + + // Mock DerivedFieldScript.Factory + DerivedFieldScript.Factory factory = (params, lookup) -> (DerivedFieldScript.LeafFactory) ctx -> { + when(searchLookup.getLeafSearchLookup(ctx)).thenReturn(leafLookup); + return new DerivedFieldScript(params, lookup, ctx) { + @Override + public Object execute() { + return raw_requests[sourceLookup.docId()][2]; + } + }; + }; + + // Create ValueFetcher from mocked DerivedFieldScript.Factory + DerivedFieldScript.LeafFactory leafFactory = factory.newFactory((new Script("")).getParams(), searchLookup); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(leafFactory); + + // Create DerivedFieldQuery + DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( + new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), + valueFetcher, + searchLookup, + (o -> new KeywordField("ip_from_raw_request", (String) o, Field.Store.NO)), + Lucene.STANDARD_ANALYZER + ); + + // Index and Search + + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + for (Document d : docs) { + iw.addDocument(d); + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(derivedFieldQuery, 10); + assertEquals(2, topDocs.totalHits.value); + } + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index 83b245a1bcecb..8c7e9718eb0cd 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -281,7 +281,22 @@ public double execute(Map params1, double[] values) { } else if (context.instanceClazz.equals(IntervalFilterScript.class)) { IntervalFilterScript.Factory factory = mockCompiled::createIntervalFilterScript; return context.factoryClazz.cast(factory); + } else if (context.instanceClazz.equals(DerivedFieldScript.class)) { + DerivedFieldScript.Factory factory = (derivedFieldsParams, lookup) -> ctx -> new DerivedFieldScript( + derivedFieldsParams, + lookup, + ctx + ) { + @Override + public Object execute() { + Map vars = new HashMap<>(derivedFieldsParams); + vars.put("params", derivedFieldsParams); + return script.apply(vars); + } + }; + return context.factoryClazz.cast(factory); } + ContextCompiler compiler = contexts.get(context); if (compiler != null) { return context.factoryClazz.cast(compiler.compile(script::apply, params)); From 12f1487cf8e3d28fa0ec99aba435099aeba904ad Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 25 Mar 2024 16:06:45 -0400 Subject: [PATCH 37/74] Fix issue with feature flags where default value may not be honored (#12849) * Fix issue with feature flags passed as system props where default value was not being honored Signed-off-by: Craig Perkins * Add CHANGELOG entry Signed-off-by: Craig Perkins * Add test for default value of false Signed-off-by: Craig Perkins * Fix issue when empty settings passed in initialization Signed-off-by: Craig Perkins * Get actual value from settings and default from ff setting Signed-off-by: Craig Perkins * Add test with non-empty setting initialization Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../opensearch/common/util/FeatureFlags.java | 82 ++++++++++++------- .../common/util/FeatureFlagTests.java | 34 ++++++++ 3 files changed, 89 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 882a46d60a191..21b66d3efc5bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) ### Security diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 8633cf1fe25ea..bdfce72d106d3 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -12,9 +12,11 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import java.util.List; + /** * Utility class to manage feature flags. Feature flags are system properties that must be set on the JVM. - * These are used to gate the visibility/availability of incomplete features. Fore more information, see + * These are used to gate the visibility/availability of incomplete features. For more information, see * https://featureflags.io/feature-flag-introduction/ * * @opensearch.internal @@ -65,11 +67,54 @@ public class FeatureFlags { */ public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_STORE_MIGRATION_EXPERIMENTAL, + false, + Property.NodeScope + ); + + public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); + + public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); + + public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); + + public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( + DATETIME_FORMATTER_CACHING, + true, + Property.NodeScope + ); + + public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( + WRITEABLE_REMOTE_INDEX, + false, + Property.NodeScope + ); + + public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); + + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( + REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, + EXTENSIONS_SETTING, + IDENTITY_SETTING, + TELEMETRY_SETTING, + DATETIME_FORMATTER_CACHING_SETTING, + WRITEABLE_REMOTE_INDEX_SETTING, + PLUGGABLE_CACHE_SETTING + ); /** * Should store the settings from opensearch.yml. */ private static Settings settings; + static { + Settings.Builder settingsBuilder = Settings.builder(); + for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { + settingsBuilder = settingsBuilder.put(ffSetting.getKey(), ffSetting.getDefault(Settings.EMPTY)); + } + settings = settingsBuilder.build(); + } + /** * This method is responsible to map settings from opensearch.yml to local stored * settings value. That is used for the existing isEnabled method. @@ -77,7 +122,14 @@ public class FeatureFlags { * @param openSearchSettings The settings stored in opensearch.yml. */ public static void initializeFeatureFlags(Settings openSearchSettings) { - settings = openSearchSettings; + Settings.Builder settingsBuilder = Settings.builder(); + for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { + settingsBuilder = settingsBuilder.put( + ffSetting.getKey(), + openSearchSettings.getAsBoolean(ffSetting.getKey(), ffSetting.getDefault(openSearchSettings)) + ); + } + settings = settingsBuilder.build(); } /** @@ -103,30 +155,4 @@ public static boolean isEnabled(Setting featureFlag) { return featureFlag.getDefault(Settings.EMPTY); } } - - public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( - REMOTE_STORE_MIGRATION_EXPERIMENTAL, - false, - Property.NodeScope - ); - - public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); - - public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); - - public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); - - public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( - DATETIME_FORMATTER_CACHING, - true, - Property.NodeScope - ); - - public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( - WRITEABLE_REMOTE_INDEX, - false, - Property.NodeScope - ); - - public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); } diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index f175308482b15..88cb3782252b7 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -8,9 +8,14 @@ package org.opensearch.common.util; +import org.opensearch.common.settings.Settings; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; +import static org.opensearch.common.util.FeatureFlags.DATETIME_FORMATTER_CACHING; +import static org.opensearch.common.util.FeatureFlags.EXTENSIONS; +import static org.opensearch.common.util.FeatureFlags.IDENTITY; + public class FeatureFlagTests extends OpenSearchTestCase { private final String FLAG_PREFIX = "opensearch.experimental.feature."; @@ -33,4 +38,33 @@ public void testNonBooleanFeatureFlag() { assertNotNull(System.getProperty(javaVersionProperty)); assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); } + + public void testBooleanFeatureFlagWithDefaultSetToTrue() { + final String testFlag = DATETIME_FORMATTER_CACHING; + assertNotNull(testFlag); + assertTrue(FeatureFlags.isEnabled(testFlag)); + } + + public void testBooleanFeatureFlagWithDefaultSetToFalse() { + final String testFlag = IDENTITY; + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + assertNotNull(testFlag); + assertFalse(FeatureFlags.isEnabled(testFlag)); + } + + public void testBooleanFeatureFlagInitializedWithEmptySettingsAndDefaultSetToTrue() { + final String testFlag = DATETIME_FORMATTER_CACHING; + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + assertNotNull(testFlag); + assertTrue(FeatureFlags.isEnabled(testFlag)); + } + + public void testInitializeFeatureFlagsWithExperimentalSettings() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(IDENTITY, true).build()); + assertTrue(FeatureFlags.isEnabled(IDENTITY)); + assertTrue(FeatureFlags.isEnabled(DATETIME_FORMATTER_CACHING)); + assertFalse(FeatureFlags.isEnabled(EXTENSIONS)); + // reset FeatureFlags to defaults + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } } From 7e99cac03872a5b0d8a8f68a572e34f55edb349f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 25 Mar 2024 16:31:40 -0400 Subject: [PATCH 38/74] Bump asm from 9.6 to 9.7 (#12908) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- modules/lang-expression/licenses/asm-9.6.jar.sha1 | 1 - modules/lang-expression/licenses/asm-9.7.jar.sha1 | 1 + modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 | 1 - modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 | 1 + modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 | 1 - modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-util-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-util-9.7.jar.sha1 | 1 + 18 files changed, 10 insertions(+), 9 deletions(-) delete mode 100644 modules/lang-expression/licenses/asm-9.6.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-9.7.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-util-9.7.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 21b66d3efc5bd..11772b7b79ef7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added ### Dependencies +- Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8705588babe97..2a9fdb7cdddaf 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -14,7 +14,7 @@ icu4j = 70.1 supercsv = 2.4.0 log4j = 2.21.0 slf4j = 1.7.36 -asm = 9.6 +asm = 9.7 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 deleted file mode 100644 index 2d9e6a9d3cfd6..0000000000000 --- a/modules/lang-expression/licenses/asm-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-9.7.jar.sha1 new file mode 100644 index 0000000000000..84c9a9703af6d --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.7.jar.sha1 @@ -0,0 +1 @@ +073d7b3086e14beb604ced229c302feff6449723 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 deleted file mode 100644 index a0814f495771f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 new file mode 100644 index 0000000000000..1de4404e7d5d0 --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 @@ -0,0 +1 @@ +e86dda4696d3c185fcc95d8d311904e7ce38a53f \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 deleted file mode 100644 index 101eb03b4b736..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 new file mode 100644 index 0000000000000..d4eeef6151272 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 @@ -0,0 +1 @@ +e446a17b175bfb733b87c5c2560ccb4e57d69f1a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 deleted file mode 100644 index 2d9e6a9d3cfd6..0000000000000 --- a/modules/lang-painless/licenses/asm-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-9.7.jar.sha1 new file mode 100644 index 0000000000000..84c9a9703af6d --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.7.jar.sha1 @@ -0,0 +1 @@ +073d7b3086e14beb604ced229c302feff6449723 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 deleted file mode 100644 index fa42ea1198165..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 new file mode 100644 index 0000000000000..c7687adfeb990 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 @@ -0,0 +1 @@ +e4a258b7eb96107106c0599f0061cfc1832fe07a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 deleted file mode 100644 index a0814f495771f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 new file mode 100644 index 0000000000000..1de4404e7d5d0 --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 @@ -0,0 +1 @@ +e86dda4696d3c185fcc95d8d311904e7ce38a53f \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 deleted file mode 100644 index 101eb03b4b736..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 new file mode 100644 index 0000000000000..d4eeef6151272 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 @@ -0,0 +1 @@ +e446a17b175bfb733b87c5c2560ccb4e57d69f1a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 deleted file mode 100644 index 1f42ac62dc69c..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 new file mode 100644 index 0000000000000..37c0d27efe46f --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 @@ -0,0 +1 @@ +c0655519f24d92af2202cb681cd7c1569df6ead6 \ No newline at end of file From 380ba0ebaade0e69fb1cf16306fede0213c60862 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 19:57:33 -0400 Subject: [PATCH 39/74] Bump org.apache.commons:commons-configuration2 from 2.10.0 to 2.10.1 in /plugins/repository-hdfs (#12896) * Bump org.apache.commons:commons-configuration2 Bumps org.apache.commons:commons-configuration2 from 2.10.0 to 2.10.1. --- updated-dependencies: - dependency-name: org.apache.commons:commons-configuration2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Peter Nied Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Peter Nied --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- .../licenses/commons-configuration2-2.10.0.jar.sha1 | 1 - .../licenses/commons-configuration2-2.10.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 11772b7b79ef7..17a5c2b538bc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added ### Dependencies +- Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) ### Changed diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index c1f94320f2681..af07fa9e5d80b 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -74,7 +74,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" - api 'org.apache.commons:commons-configuration2:2.10.0' + api 'org.apache.commons:commons-configuration2:2.10.1' api 'commons-io:commons-io:2.15.1' api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 deleted file mode 100644 index 17d1b64781e5b..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b93eff3c83e5372262ed4996b609336305a810f \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 new file mode 100644 index 0000000000000..d4c0f8417d357 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 @@ -0,0 +1 @@ +2b681b3bcddeaa5bf5c2a2939cd77e2f9ad6efda \ No newline at end of file From 9aa7018690aefcac7588d8b0947a1c0f11be7a5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 19:58:05 -0400 Subject: [PATCH 40/74] Bump net.minidev:json-smart from 2.5.0 to 2.5.1 in /test/fixtures/hdfs-fixture (#12893) * Bump net.minidev:json-smart in /test/fixtures/hdfs-fixture Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.0 to 2.5.1. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.0...2.5.1) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Peter Nied Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Peter Nied --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17a5c2b538bc8..a60786b7cbcd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) +- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 0ca4797bfeff1..a6275f200217a 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -62,7 +62,7 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - api 'net.minidev:json-smart:2.5.0' + api 'net.minidev:json-smart:2.5.1' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" From 3907ec9ba25cf620e0a7c465d351d21a4ea1c76a Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 26 Mar 2024 09:03:44 +0530 Subject: [PATCH 41/74] Refactor remote store flow to support any path type (#12822) Signed-off-by: Ashish Singh --- .../metadata/MetadataCreateIndexService.java | 24 ++-- .../org/opensearch/index/IndexService.java | 3 +- .../org/opensearch/index/IndexSettings.java | 9 ++ .../index/remote/RemoteStoreDataEnums.java | 65 ++++++++++ .../index/remote/RemoteStorePathType.java | 43 ++++++- ....java => RemoteStorePathTypeResolver.java} | 17 ++- .../opensearch/index/shard/IndexShard.java | 11 +- .../opensearch/index/shard/StoreRecovery.java | 4 +- .../store/RemoteSegmentStoreDirectory.java | 7 +- .../RemoteSegmentStoreDirectoryFactory.java | 35 ++++-- .../RemoteStoreLockManagerFactory.java | 38 ++---- .../index/translog/RemoteFsTranslog.java | 47 +++++--- .../transfer/TranslogTransferManager.java | 29 +++-- .../blobstore/BlobStoreRepository.java | 16 ++- .../opensearch/snapshots/RestoreService.java | 2 +- .../remote/RemoteStorePathTypeTests.java | 111 ++++++++++++++++++ .../RemoteSegmentStoreDirectoryTests.java | 16 ++- .../RemoteStoreLockManagerFactoryTests.java | 9 +- .../index/translog/RemoteFsTranslogTests.java | 4 +- .../TranslogTransferManagerTests.java | 38 ++++-- 20 files changed, 418 insertions(+), 110 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java rename server/src/main/java/org/opensearch/index/remote/{RemoteStorePathResolver.java => RemoteStorePathTypeResolver.java} (50%) create mode 100644 server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index f6a14d8ec9d63..aee0473be95eb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,8 +88,8 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathResolver; import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathTypeResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -113,6 +113,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -170,7 +171,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathResolver remoteStorePathResolver; + private final RemoteStorePathTypeResolver remoteStorePathTypeResolver; public MetadataCreateIndexService( final Settings settings, @@ -203,8 +204,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); - remoteStorePathResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathResolver(clusterService.getClusterSettings()) + remoteStorePathTypeResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStorePathTypeResolver(clusterService.getClusterSettings()) : null; } @@ -553,7 +554,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - addRemoteCustomData(tmpImdBuilder); + addRemoteStorePathTypeInCustomData(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -562,8 +563,14 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( return tempMetadata; } - public void addRemoteCustomData(IndexMetadata.Builder tmpImdBuilder) { - if (remoteStorePathResolver != null) { + /** + * Adds the remote store path type information in custom data of index metadata. + * + * @param tmpImdBuilder index metadata builder. + * @param assertNullOldType flag to verify that the old remote store path type is null + */ + public void addRemoteStorePathTypeInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStorePathTypeResolver != null) { // It is possible that remote custom data exists already. In such cases, we need to only update the path type // in the remote store custom data map. Map existingRemoteCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); @@ -571,8 +578,9 @@ public void addRemoteCustomData(IndexMetadata.Builder tmpImdBuilder) { ? new HashMap<>() : new HashMap<>(existingRemoteCustomData); // Determine the path type for use using the remoteStorePathResolver. - String newPathType = remoteStorePathResolver.resolveType().toString(); + String newPathType = remoteStorePathTypeResolver.getType().toString(); String oldPathType = remoteCustomData.put(RemoteStorePathType.NAME, newPathType); + assert !assertNullOldType || Objects.isNull(oldPathType); logger.trace(() -> new ParameterizedMessage("Added new path type {}, replaced old path type {}", newPathType, oldPathType)); tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 11dc4474cfa42..109bda65b1fd8 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -507,7 +507,8 @@ public synchronized IndexShard createShard( remoteDirectory = ((RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory).newDirectory( RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), this.indexSettings.getUUID(), - shardId + shardId, + this.indexSettings.getRemoteStorePathType() ); } remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 7e49726c259cb..7e3d812974c79 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -48,6 +48,7 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; @@ -59,6 +60,7 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -1905,4 +1907,11 @@ public double getDocIdFuzzySetFalsePositiveProbability() { public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePositiveProbability) { this.docIdFuzzySetFalsePositiveProbability = docIdFuzzySetFalsePositiveProbability; } + + public RemoteStorePathType getRemoteStorePathType() { + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + return remoteCustomData != null && remoteCustomData.containsKey(RemoteStorePathType.NAME) + ? RemoteStorePathType.parseString(remoteCustomData.get(RemoteStorePathType.NAME)) + : RemoteStorePathType.FIXED; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java new file mode 100644 index 0000000000000..1afd73bf1f1b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import java.util.Set; + +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; + +/** + * This class contains the different enums related to remote store data categories and types. + * + * @opensearch.internal + */ +public class RemoteStoreDataEnums { + + /** + * Categories of the data in Remote store. + */ + public enum DataCategory { + SEGMENTS("segments", Set.of(DataType.values())), + TRANSLOG("translog", Set.of(DATA, METADATA)); + + private final String name; + private final Set supportedDataTypes; + + DataCategory(String name, Set supportedDataTypes) { + this.name = name; + this.supportedDataTypes = supportedDataTypes; + } + + public boolean isSupportedDataType(DataType dataType) { + return supportedDataTypes.contains(dataType); + } + + public String getName() { + return name; + } + } + + /** + * Types of data in remote store. + */ + public enum DataType { + DATA("data"), + METADATA("metadata"), + LOCK_FILES("lock_files"); + + private final String name; + + DataType(String name) { + this.name = name; + } + + public String getName() { + return name; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java index a64e07ab1f66f..d7d7a8cdfd644 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java @@ -8,6 +8,10 @@ package org.opensearch.index.remote; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; + import java.util.Locale; /** @@ -18,13 +22,46 @@ */ public enum RemoteStorePathType { - FIXED, - HASHED_PREFIX; + FIXED { + @Override + public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { + return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); + } + }, + HASHED_PREFIX { + @Override + public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { + // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. + // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); + } + }; + + /** + * @param basePath base path of the underlying blob store repository + * @param indexUUID of the index + * @param shardId shard id + * @param dataCategory is either translog or segment + * @param dataType can be one of data, metadata or lock_files. + * @return the blob path for the underlying remote store path type. + */ + public BlobPath path(BlobPath basePath, String indexUUID, String shardId, DataCategory dataCategory, DataType dataType) { + assert dataCategory.isSupportedDataType(dataType) : "category:" + + dataCategory + + " type:" + + dataType + + " are not supported together"; + return generatePath(basePath, indexUUID, shardId, dataCategory.getName(), dataType.getName()); + } + + abstract BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType); public static RemoteStorePathType parseString(String remoteStoreBlobPathType) { try { return RemoteStorePathType.valueOf(remoteStoreBlobPathType.toUpperCase(Locale.ROOT)); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null throw new IllegalArgumentException("Could not parse RemoteStorePathType for [" + remoteStoreBlobPathType + "]"); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java similarity index 50% rename from server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java rename to server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java index 6e8126fcce0ca..5d014c9862d45 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java @@ -16,15 +16,20 @@ * * @opensearch.internal */ -public class RemoteStorePathResolver { +public class RemoteStorePathTypeResolver { - private final ClusterSettings clusterSettings; + private volatile RemoteStorePathType type; - public RemoteStorePathResolver(ClusterSettings clusterSettings) { - this.clusterSettings = clusterSettings; + public RemoteStorePathTypeResolver(ClusterSettings clusterSettings) { + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); } - public RemoteStorePathType resolveType() { - return clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); + public RemoteStorePathType getType() { + return type; + } + + public void setType(RemoteStorePathType type) { + this.type = type; } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 72ce858661031..1d7aa6ac4958b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4932,7 +4932,7 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool()); + RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathType()); } /* @@ -4949,7 +4949,14 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.download(repository, shardId, getThreadPool(), shardPath().resolveTranslog(), logger); + RemoteFsTranslog.download( + repository, + shardId, + getThreadPool(), + shardPath().resolveTranslog(), + indexSettings.getRemoteStorePathType(), + logger + ); } /** diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 5f09b1a0802f3..9779a2320d79f 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -58,6 +58,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineException; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; @@ -409,7 +410,8 @@ void recoverFromSnapshotAndRemoteStore( RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, - shardId + shardId, + RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); sourceRemoteDirectory.initializeToSpecificCommit( primaryTerm, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index c9a238c6e3350..999625e0e579f 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -31,6 +31,7 @@ import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; @@ -897,13 +898,15 @@ public static void remoteDirectoryCleanup( RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory, String remoteStoreRepoForIndex, String indexUUID, - ShardId shardId + ShardId shardId, + RemoteStorePathType pathType ) { try { RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + pathType ); remoteSegmentStoreDirectory.deleteStaleSegments(0); remoteSegmentStoreDirectory.deleteIfEmpty(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index d6d3f1fca833c..f0ecd96bcf1f7 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -13,6 +13,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; @@ -24,8 +25,13 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; + /** * Factory for a remote store directory * @@ -33,8 +39,6 @@ */ @PublicApi(since = "2.3.0") public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.DirectoryFactory { - private static final String SEGMENTS = "segments"; - private final Supplier repositoriesService; private final ThreadPool threadPool; @@ -48,29 +52,38 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - return newDirectory(repositoryName, indexUUID, path.getShardId()); + return newDirectory(repositoryName, indexUUID, path.getShardId(), indexSettings.getRemoteStorePathType()); } - public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId) throws IOException { + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathType pathType) + throws IOException { + assert Objects.nonNull(pathType); try (Repository repository = repositoriesService.get().repository(repositoryName)) { + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); - BlobPath commonBlobPath = blobStoreRepository.basePath(); - commonBlobPath = commonBlobPath.add(indexUUID).add(String.valueOf(shardId.id())).add(SEGMENTS); + BlobPath repositoryBasePath = blobStoreRepository.basePath(); + String shardIdStr = String.valueOf(shardId.id()); + // Derive the path for data directory of SEGMENTS + BlobPath dataPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, DATA); RemoteDirectory dataDirectory = new RemoteDirectory( - blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), + blobStoreRepository.blobStore().blobContainer(dataPath), blobStoreRepository::maybeRateLimitRemoteUploadTransfers, blobStoreRepository::maybeRateLimitRemoteDownloadTransfers ); - RemoteDirectory metadataDirectory = new RemoteDirectory( - blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("metadata")) - ); + + // Derive the path for metadata directory of SEGMENTS + BlobPath mdPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, METADATA); + RemoteDirectory metadataDirectory = new RemoteDirectory(blobStoreRepository.blobStore().blobContainer(mdPath)); + + // The path for lock is derived within the RemoteStoreLockManagerFactory RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( repositoriesService.get(), repositoryName, indexUUID, - String.valueOf(shardId.id()) + shardIdStr, + pathType ); return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index 00666ada11983..c033e4f7ad0aa 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -11,15 +11,18 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import java.io.IOException; import java.util.function.Supplier; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; + /** * Factory for remote store lock manager * @@ -27,34 +30,29 @@ */ @PublicApi(since = "2.8.0") public class RemoteStoreLockManagerFactory { - private static final String SEGMENTS = "segments"; - private static final String LOCK_FILES = "lock_files"; private final Supplier repositoriesService; public RemoteStoreLockManagerFactory(Supplier repositoriesService) { this.repositoriesService = repositoriesService; } - public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { - return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId); + public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId, RemoteStorePathType pathType) { + return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId, pathType); } public static RemoteStoreMetadataLockManager newLockManager( RepositoriesService repositoriesService, String repositoryName, String indexUUID, - String shardId - ) throws IOException { + String shardId, + RemoteStorePathType pathType + ) { try (Repository repository = repositoriesService.repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath shardLevelBlobPath = ((BlobStoreRepository) repository).basePath().add(indexUUID).add(shardId).add(SEGMENTS); - RemoteBufferedOutputDirectory shardMDLockDirectory = createRemoteBufferedOutputDirectory( - repository, - shardLevelBlobPath, - LOCK_FILES - ); - - return new RemoteStoreMetadataLockManager(shardMDLockDirectory); + BlobPath repositoryBasePath = ((BlobStoreRepository) repository).basePath(); + BlobPath lockDirectoryPath = pathType.path(repositoryBasePath, indexUUID, shardId, SEGMENTS, LOCK_FILES); + BlobContainer lockDirectoryBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(lockDirectoryPath); + return new RemoteStoreMetadataLockManager(new RemoteBufferedOutputDirectory(lockDirectoryBlobContainer)); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be present to acquire/release lock", e); } @@ -65,14 +63,4 @@ public static RemoteStoreMetadataLockManager newLockManager( public Supplier getRepositoriesService() { return repositoriesService; } - - private static RemoteBufferedOutputDirectory createRemoteBufferedOutputDirectory( - Repository repository, - BlobPath commonBlobPath, - String extention - ) { - BlobPath extendedPath = commonBlobPath.add(extention); - BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); - return new RemoteBufferedOutputDirectory(dataBlobContainer); - } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 43eec01b2d365..f0fb03cc905a4 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; @@ -18,6 +19,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; @@ -38,6 +40,7 @@ import java.util.HashSet; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -47,6 +50,10 @@ import java.util.function.LongConsumer; import java.util.function.LongSupplier; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; + /** * A Translog implementation which syncs local FS with a remote store * The current impl uploads translog , ckp and metadata to remote store @@ -74,7 +81,6 @@ public class RemoteFsTranslog extends Translog { private static final int REMOTE_DELETION_PERMITS = 2; private static final int DOWNLOAD_RETRIES = 2; - public static final String TRANSLOG = "translog"; // Semaphore used to allow only single remote generation to happen at a time private final Semaphore remoteGenerationDeletionPermits = new Semaphore(REMOTE_DELETION_PERMITS); @@ -106,7 +112,8 @@ public RemoteFsTranslog( threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + indexSettings().getRemoteStorePathType() ); try { download(translogTransferManager, location, logger); @@ -150,8 +157,14 @@ RemoteTranslogTransferTracker getRemoteTranslogTracker() { return remoteTranslogTransferTracker; } - public static void download(Repository repository, ShardId shardId, ThreadPool threadPool, Path location, Logger logger) - throws IOException { + public static void download( + Repository repository, + ShardId shardId, + ThreadPool threadPool, + Path location, + RemoteStorePathType pathType, + Logger logger + ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( Locale.ROOT, "%s repository should be instance of BlobStoreRepository", @@ -167,7 +180,8 @@ public static void download(Repository repository, ShardId shardId, ThreadPool t threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + pathType ); RemoteFsTranslog.download(translogTransferManager, location, logger); logger.trace(remoteTranslogTransferTracker.toString()); @@ -244,15 +258,16 @@ public static TranslogTransferManager buildTranslogTransferManager( ThreadPool threadPool, ShardId shardId, FileTransferTracker fileTransferTracker, - RemoteTranslogTransferTracker remoteTranslogTransferTracker + RemoteTranslogTransferTracker tracker, + RemoteStorePathType pathType ) { - return new TranslogTransferManager( - shardId, - new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), - blobStoreRepository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG), - fileTransferTracker, - remoteTranslogTransferTracker - ); + assert Objects.nonNull(pathType); + String indexUUID = shardId.getIndex().getUUID(); + String shardIdStr = String.valueOf(shardId.id()); + BlobPath dataPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, DATA); + BlobPath mdPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, METADATA); + BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); + return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker); } @Override @@ -524,7 +539,8 @@ private void deleteStaleRemotePrimaryTerms() { } } - public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool) throws IOException { + public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathType pathType) + throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; // We use a dummy stats tracker to ensure the flow doesn't break. @@ -536,7 +552,8 @@ public static void cleanup(Repository repository, ShardId shardId, ThreadPool th threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + pathType ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 2f6055df87804..c9e07ca3ef8c1 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -58,7 +58,6 @@ public class TranslogTransferManager { private final TransferService transferService; private final BlobPath remoteDataTransferPath; private final BlobPath remoteMetadataTransferPath; - private final BlobPath remoteBaseTransferPath; private final FileTransferTracker fileTransferTracker; private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; @@ -67,8 +66,6 @@ public class TranslogTransferManager { private static final int METADATA_FILES_TO_FETCH = 10; private final Logger logger; - private final static String METADATA_DIR = "metadata"; - private final static String DATA_DIR = "data"; private static final VersionedCodecStreamWrapper metadataStreamWrapper = new VersionedCodecStreamWrapper<>( new TranslogTransferMetadataHandler(), @@ -79,15 +76,15 @@ public class TranslogTransferManager { public TranslogTransferManager( ShardId shardId, TransferService transferService, - BlobPath remoteBaseTransferPath, + BlobPath remoteDataTransferPath, + BlobPath remoteMetadataTransferPath, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { this.shardId = shardId; this.transferService = transferService; - this.remoteBaseTransferPath = remoteBaseTransferPath; - this.remoteDataTransferPath = remoteBaseTransferPath.add(DATA_DIR); - this.remoteMetadataTransferPath = remoteBaseTransferPath.add(METADATA_DIR); + this.remoteDataTransferPath = remoteDataTransferPath; + this.remoteMetadataTransferPath = remoteMetadataTransferPath; this.fileTransferTracker = fileTransferTracker; this.logger = Loggers.getLogger(getClass(), shardId); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; @@ -456,17 +453,27 @@ public void onFailure(Exception e) { ); } + /** + * Deletes all the translog content related to the underlying shard. + */ public void delete() { - // cleans up all the translog contents in async fashion - transferService.deleteAsync(ThreadPool.Names.REMOTE_PURGE, remoteBaseTransferPath, new ActionListener<>() { + // Delete the translog data content from the remote store. + delete(remoteDataTransferPath); + // Delete the translog metadata content from the remote store. + delete(remoteMetadataTransferPath); + } + + private void delete(BlobPath path) { + // cleans up all the translog contents in async fashion for the given path + transferService.deleteAsync(ThreadPool.Names.REMOTE_PURGE, path, new ActionListener<>() { @Override public void onResponse(Void unused) { - logger.info("Deleted all remote translog data"); + logger.info("Deleted all remote translog data at path={}", path); } @Override public void onFailure(Exception e) { - logger.error("Exception occurred while cleaning translog", e); + logger.error(new ParameterizedMessage("Exception occurred while cleaning translog at path={}", path), e); } }); } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 076173177feee..a7c2fb03285b0 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -108,6 +108,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -669,7 +670,8 @@ public void cloneRemoteStoreIndexShardSnapshot( RemoteStoreLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepository, indexUUID, - String.valueOf(shardId.shardId()) + String.valueOf(shardId.shardId()), + RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManger.cloneLock( FileLockInfo.getLockInfoBuilder().withAcquirerId(source.getUUID()).build(), @@ -1107,7 +1109,8 @@ public static void remoteDirectoryCleanupAsync( String remoteStoreRepoForIndex, String indexUUID, ShardId shardId, - String threadPoolName + String threadPoolName, + RemoteStorePathType pathType ) { threadpool.executor(threadPoolName) .execute( @@ -1116,7 +1119,8 @@ public static void remoteDirectoryCleanupAsync( remoteDirectoryFactory, remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + pathType ), indexUUID, shardId @@ -1147,7 +1151,8 @@ protected void releaseRemoteStoreLockAndCleanup( RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + RemoteStorePathType.HASHED_PREFIX // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); @@ -1169,7 +1174,8 @@ protected void releaseRemoteStoreLockAndCleanup( remoteStoreRepoForIndex, indexUUID, new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), - ThreadPool.Names.REMOTE_PURGE + ThreadPool.Names.REMOTE_PURGE, + RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index e5ac604e0a5e3..da2a36cbb0701 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -451,7 +451,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - createIndexService.addRemoteCustomData(indexMdBuilder); + createIndexService.addRemoteStorePathTypeInCustomData(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java new file mode 100644 index 0000000000000..0f108d1b45f5a --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStorePathType.FIXED; +import static org.opensearch.index.remote.RemoteStorePathType.parseString; + +public class RemoteStorePathTypeTests extends OpenSearchTestCase { + + private static final String SEPARATOR = "/"; + + public void testParseString() { + // Case 1 - Pass values from the enum. + String typeString = FIXED.toString(); + RemoteStorePathType type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + assertEquals(FIXED, type); + + typeString = RemoteStorePathType.HASHED_PREFIX.toString(); + type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + assertEquals(RemoteStorePathType.HASHED_PREFIX, type); + + // Case 2 - Pass random string + String randomTypeString = randomAlphaOfLength(2); + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> parseString(randomTypeString)); + assertEquals("Could not parse RemoteStorePathType for [" + randomTypeString + "]", ex.getMessage()); + + // Case 3 - Null string + ex = assertThrows(IllegalArgumentException.class, () -> parseString(null)); + assertEquals("Could not parse RemoteStorePathType for [null]", ex.getMessage()); + } + + public void testGeneratePathForFixedType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId + SEPARATOR; + // Translog Data + BlobPath result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Translog Lock files - This is a negative case where the assertion will trip. + BlobPath finalBlobPath = blobPath; + assertThrows(AssertionError.class, () -> FIXED.path(finalBlobPath, indexUUID, shardId, TRANSLOG, LOCK_FILES)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Segment Metadata + dataType = LOCK_FILES; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + } + + private List getPathList() { + List pathList = new ArrayList<>(); + int length = randomIntBetween(0, 5); + for (int i = 0; i < length; i++) { + pathList.add(randomAlphaOfLength(randomIntBetween(2, 5))); + } + return pathList; + } + + private String getPath(List pathList) { + String p = String.join(SEPARATOR, pathList); + if (p.isEmpty() || p.endsWith(SEPARATOR)) { + return p; + } + return p + SEPARATOR; + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 8b69c15dac9d3..59c8a9d92f07b 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -37,6 +37,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -697,13 +698,20 @@ public void testCleanupAsync() throws Exception { threadPool, indexShard.shardId() ); - when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any())).thenReturn(remoteSegmentDirectory); + when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any(), any())).thenReturn(remoteSegmentDirectory); String repositoryName = "test-repository"; String indexUUID = "test-idx-uuid"; ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); - - RemoteSegmentStoreDirectory.remoteDirectoryCleanup(remoteSegmentStoreDirectoryFactory, repositoryName, indexUUID, shardId); - verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId); + RemoteStorePathType pathType = randomFrom(RemoteStorePathType.values()); + + RemoteSegmentStoreDirectory.remoteDirectoryCleanup( + remoteSegmentStoreDirectoryFactory, + repositoryName, + indexUUID, + shardId, + pathType + ); + verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathType); verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); verify(remoteMetadataDirectory).delete(); verify(remoteDataDirectory).delete(); diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java index 897785849cf7b..0fe5557737447 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java @@ -11,6 +11,7 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; @@ -48,6 +49,7 @@ public void testNewLockManager() throws IOException { String testRepository = "testRepository"; String testIndexUUID = "testIndexUUID"; String testShardId = "testShardId"; + RemoteStorePathType pathType = RemoteStorePathType.FIXED; BlobStoreRepository repository = mock(BlobStoreRepository.class); BlobStore blobStore = mock(BlobStore.class); @@ -59,7 +61,12 @@ public void testNewLockManager() throws IOException { when(repositoriesService.repository(testRepository)).thenReturn(repository); - RemoteStoreLockManager lockManager = remoteStoreLockManagerFactory.newLockManager(testRepository, testIndexUUID, testShardId); + RemoteStoreLockManager lockManager = remoteStoreLockManagerFactory.newLockManager( + testRepository, + testIndexUUID, + testShardId, + pathType + ); assertTrue(lockManager != null); ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 7ff4c3ecf5236..9f72d3c7bd825 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -99,7 +99,7 @@ import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; -import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.contains; @@ -907,7 +907,7 @@ public void testDrainSync() throws Exception { } private BlobPath getTranslogDirectory() { - return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG); + return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG.getName()); } private Long populateTranslogOps(boolean withMissingOps) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index e34bc078896f9..a9502dc051428 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -48,6 +48,8 @@ import org.mockito.Mockito; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; @@ -95,7 +97,8 @@ public void setUp() throws Exception { translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -159,7 +162,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -194,7 +198,8 @@ public void testTransferSnapshotOnUploadTimeout() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -235,7 +240,8 @@ public void testTransferSnapshotOnThreadInterrupt() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -333,7 +339,8 @@ public void testReadMetadataNoFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -354,7 +361,8 @@ public void testReadMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -390,7 +398,8 @@ public void testReadMetadataReadException() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -426,7 +435,8 @@ public void testReadMetadataListException() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -499,7 +509,8 @@ public void testDeleteTranslogSuccess() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, blobStoreTransferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -518,7 +529,8 @@ public void testDeleteStaleTranslogMetadata() { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -569,7 +581,8 @@ public void testDeleteTranslogFailure() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, blobStoreTransferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -612,7 +625,8 @@ public void testMetadataConflict() throws InterruptedException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); From f6d6fd3564653be3e8f32cfcdb2c3ef5a45de584 Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 26 Mar 2024 11:39:45 +0530 Subject: [PATCH 42/74] Add missed API visibility annotations for public APIs (#12920) Signed-off-by: Ashish Singh --- .../main/java/org/opensearch/common/blobstore/BlobPath.java | 4 +++- .../org/opensearch/index/remote/RemoteStoreDataEnums.java | 6 +++++- .../org/opensearch/index/remote/RemoteStorePathType.java | 2 ++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index c54536e9c46e2..763594ed52977 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -33,6 +33,7 @@ package org.opensearch.common.blobstore; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.ArrayList; import java.util.Collections; @@ -42,8 +43,9 @@ /** * The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BlobPath implements Iterable { private static final String SEPARATOR = "/"; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java index 1afd73bf1f1b3..475e29004ba2e 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java @@ -8,6 +8,8 @@ package org.opensearch.index.remote; +import org.opensearch.common.annotation.PublicApi; + import java.util.Set; import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; @@ -16,13 +18,14 @@ /** * This class contains the different enums related to remote store data categories and types. * - * @opensearch.internal + * @opensearch.api */ public class RemoteStoreDataEnums { /** * Categories of the data in Remote store. */ + @PublicApi(since = "2.14.0") public enum DataCategory { SEGMENTS("segments", Set.of(DataType.values())), TRANSLOG("translog", Set.of(DATA, METADATA)); @@ -47,6 +50,7 @@ public String getName() { /** * Types of data in remote store. */ + @PublicApi(since = "2.14.0") public enum DataType { DATA("data"), METADATA("metadata"), diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java index d7d7a8cdfd644..742d7b501f227 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; @@ -20,6 +21,7 @@ * * @opensearch.internal */ +@PublicApi(since = "2.14.0") public enum RemoteStorePathType { FIXED { From 3b10a06276287b493f4713e464fce543f738f4c4 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 26 Mar 2024 22:31:57 +0800 Subject: [PATCH 43/74] Fix flaky tests in CopyProcessorTests (#12885) * Fix flaky test CopyProcessorTests#testCopyWithRemoveSource Signed-off-by: Gao Binlong * Remove an existing field to get a non-existing field name Signed-off-by: Gao Binlong * Add a new randomFieldName method Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- .../opensearch/ingest/common/CopyProcessorTests.java | 5 +++-- .../org/opensearch/ingest/RandomDocumentPicks.java | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java index 3259ba85ef340..b53ce2db994a8 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java @@ -24,7 +24,7 @@ public class CopyProcessorTests extends OpenSearchTestCase { public void testCopyExistingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String targetFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument); Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); @@ -71,7 +71,8 @@ public void testCopyWithIgnoreMissing() throws Exception { public void testCopyWithRemoveSource() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String targetFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, true, false); diff --git a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java index c478bf9239f74..0e42787b16be8 100644 --- a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java @@ -71,6 +71,17 @@ public static String randomFieldName(Random random) { return fieldName.toString(); } + /** + * Returns a random field name that doesn't exist in the document. + */ + public static String randomNonExistingFieldName(Random random, IngestDocument ingestDocument) { + String fieldName; + do { + fieldName = randomFieldName(random); + } while (canAddField(fieldName, ingestDocument) == false); + return fieldName; + } + /** * Returns a random leaf field name. */ From a4d6d55ec78cde53c80c3e0626904d594bd518db Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 26 Mar 2024 14:08:49 -0400 Subject: [PATCH 44/74] Bump netty from 4.1.107.Final to 4.1.108.Final (#12924) Signed-off-by: Andriy Redko --- CHANGELOG.md | 3 ++- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + 90 files changed, 47 insertions(+), 46 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a60786b7cbcd7..03aa8b8ec7f45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -108,6 +108,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) +- Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) @@ -122,4 +123,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 2a9fdb7cdddaf..5c9cd25bb79ad 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -26,7 +26,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.107.Final +netty = 4.1.108.Final joda = 2.12.2 # project reactor diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index d823de7ffadd4..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..93207338f7db8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 deleted file mode 100644 index 114d77a1bb95f..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b7070e9acfe262bb0bd936c4051116631796b3b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..e850aad5f3656 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +3ad0af28e408092f0d12994802a9f3fe18d45f8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5a4bde479eb38..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ebc495e9b2bc2c9ab60a264b40f62dc0671d9f6e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d4ae1b7e71661 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +62b6a5dfee2e22ab9015a469cb68e4727596fd4c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index a62cb0fefcc40..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..8d299e265646d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 deleted file mode 100644 index 0e3595fecb0d2..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3019703b67413ef3d6150da1f49753f4010507ce \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..5f0eed9c5d7e4 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ad97680373f9c9f278f597ad6552d44e20418929 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 deleted file mode 100644 index 6b9a35acb2c20..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9234407d6a46745599735765c4d3755c7fc84162 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..6ed00ff79dea9 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +baf7b939ef71b25713cacbe47bef8caf80ce99c6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index d823de7ffadd4..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..93207338f7db8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index a62cb0fefcc40..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..8d299e265646d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file From 10fc755b2df90cb755cf5cb4809392ed904aaa3e Mon Sep 17 00:00:00 2001 From: kkewwei Date: Wed, 27 Mar 2024 02:27:15 +0800 Subject: [PATCH 45/74] fix unit test (#12817) Signed-off-by: kkewwei --- .../org/opensearch/cluster/service/TaskBatcherTests.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java index 070fde523fe87..b0916ce9236f7 100644 --- a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java @@ -218,7 +218,8 @@ public void testTasksAreExecutedInOrder() throws BrokenBarrierException, Interru executors[i] = new TaskExecutor(); } - int tasksSubmittedPerThread = randomIntBetween(2, 1024); + // it will create at most 8192 threads, which will cause native memory oom. so we limit the number of created threads. + int tasksSubmittedPerThread = randomIntBetween(2, 128); CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); @@ -286,7 +287,7 @@ public void testNoTasksAreDroppedInParallelSubmission() throws BrokenBarrierExce executors[i] = new TaskExecutor(); } - int tasksSubmittedPerThread = randomIntBetween(2, 1024); + int tasksSubmittedPerThread = randomIntBetween(2, 128); CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); From f2cc3d8ea314b2d49a8b6ea5f57de5b6aff4faf9 Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Tue, 26 Mar 2024 13:29:05 -0700 Subject: [PATCH 46/74] Add a counter to node stat (and _cat/shards) api to track shard going from idle to non-idle (#12768) --------- Signed-off-by: Ruirui Zhang --- CHANGELOG.md | 1 + .../test/cat.shards/10_basic.yml | 99 ++++++++++++++++++- .../index/search/stats/SearchStats.java | 26 ++++- .../index/search/stats/ShardSearchStats.java | 9 +- .../opensearch/index/shard/IndexShard.java | 5 + .../index/shard/SearchOperationListener.java | 16 +++ .../rest/action/cat/RestShardsAction.java | 5 + .../index/search/stats/SearchStatsTests.java | 7 +- .../shard/SearchOperationListenerTests.java | 31 ++++++ .../action/cat/RestShardsActionTests.java | 8 +- 10 files changed, 196 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03aa8b8ec7f45..b8ab06ce681b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 29fbf55417961..c309f19b454e7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,13 +1,108 @@ "Help": - skip: - version: " - 2.11.99" + version: " - 2.99.99" + reason: search idle reactivate count total is only added in 3.0.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: "3.0.0 - " + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.concurrent_query_current .+ \n + search.concurrent_query_time .+ \n + search.concurrent_query_total .+ \n + search.concurrent_avg_slice_count .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n + search.search_idle_reactivate_count_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + docs.deleted .+ \n + $/ +--- +"Help from 2.12.0 to 2.99.99": + - skip: + version: " - 2.11.99 , 3.0.0 - " reason: deleted docs and concurrent search are added in 2.12.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "2.12.0 - " + version: "2.12.0 - 2.99.99" - match: $body: | diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 576e00f8f30d1..d3a53fcc0e2d8 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -163,6 +163,8 @@ public static class Stats implements Writeable, ToXContentFragment { private long pitTimeInMillis; private long pitCurrent; + private long searchIdleReactivateCount; + @Nullable private RequestStatsLongHolder requestStatsLongHolder; @@ -193,7 +195,8 @@ public Stats( long pitCurrent, long suggestCount, long suggestTimeInMillis, - long suggestCurrent + long suggestCurrent, + long searchIdleReactivateCount ) { this.requestStatsLongHolder = new RequestStatsLongHolder(); this.queryCount = queryCount; @@ -220,6 +223,8 @@ public Stats( this.pitCount = pitCount; this.pitTimeInMillis = pitTimeInMillis; this.pitCurrent = pitCurrent; + + this.searchIdleReactivateCount = searchIdleReactivateCount; } private Stats(StreamInput in) throws IOException { @@ -255,6 +260,10 @@ private Stats(StreamInput in) throws IOException { concurrentQueryCurrent = in.readVLong(); queryConcurrency = in.readVLong(); } + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + searchIdleReactivateCount = in.readVLong(); + } } public void add(Stats stats) { @@ -282,6 +291,8 @@ public void add(Stats stats) { pitCount += stats.pitCount; pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; + + searchIdleReactivateCount += stats.searchIdleReactivateCount; } public void addForClosingShard(Stats stats) { @@ -306,6 +317,8 @@ public void addForClosingShard(Stats stats) { pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; queryConcurrency += stats.queryConcurrency; + + searchIdleReactivateCount += stats.searchIdleReactivateCount; } public long getQueryCount() { @@ -412,6 +425,10 @@ public long getSuggestCurrent() { return suggestCurrent; } + public long getSearchIdleReactivateCount() { + return searchIdleReactivateCount; + } + public static Stats readStats(StreamInput in) throws IOException { return new Stats(in); } @@ -457,6 +474,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(concurrentQueryCurrent); out.writeVLong(queryConcurrency); } + + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(searchIdleReactivateCount); + } } @Override @@ -486,6 +507,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + builder.field(Fields.SEARCH_IDLE_REACTIVATE_COUNT_TOTAL, searchIdleReactivateCount); + if (requestStatsLongHolder != null) { builder.startObject(Fields.REQUEST); @@ -654,6 +677,7 @@ static final class Fields { static final String TIME = "time"; static final String CURRENT = "current"; static final String TOTAL = "total"; + static final String SEARCH_IDLE_REACTIVATE_COUNT_TOTAL = "search_idle_reactivate_count_total"; } diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 99e3f8465c5db..3098986852cc1 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -213,6 +213,11 @@ public void onFreePitContext(ReaderContext readerContext) { totalStats.pitMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); } + @Override + public void onSearchIdleReactivation() { + totalStats.searchIdleMetric.inc(); + } + /** * Holder of statistics values * @@ -239,6 +244,7 @@ static final class StatsHolder { final CounterMetric scrollCurrent = new CounterMetric(); final CounterMetric pitCurrent = new CounterMetric(); final CounterMetric suggestCurrent = new CounterMetric(); + final CounterMetric searchIdleMetric = new CounterMetric(); SearchStats.Stats stats() { return new SearchStats.Stats( @@ -260,7 +266,8 @@ SearchStats.Stats stats() { pitCurrent.count(), suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), - suggestCurrent.count() + suggestCurrent.count(), + searchIdleMetric.count() ); } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 1d7aa6ac4958b..551dd338eed2a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4683,9 +4683,14 @@ public void afterRefresh(boolean didRefresh) { * true if the listener was registered to wait for a refresh. */ public final void awaitShardSearchActive(Consumer listener) { + boolean isSearchIdle = isSearchIdle(); markSearcherAccessed(); // move the shard into non-search idle final Translog.Location location = pendingRefreshLocation.get(); if (location != null) { + if (isSearchIdle) { + SearchOperationListener searchOperationListener = getSearchOperationListener(); + searchOperationListener.onSearchIdleReactivation(); + } addRefreshListener(location, (b) -> { pendingRefreshLocation.compareAndSet(location, null); listener.accept(true); diff --git a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java index 849a4f9c15318..94079db468f9c 100644 --- a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java @@ -145,6 +145,11 @@ default void onNewPitContext(ReaderContext readerContext) {} */ default void onFreePitContext(ReaderContext readerContext) {} + /** + * Executed when a shard goes from idle to non-idle state + */ + default void onSearchIdleReactivation() {} + /** * A Composite listener that multiplexes calls to each of the listeners methods. */ @@ -310,5 +315,16 @@ public void onFreePitContext(ReaderContext readerContext) { } } } + + @Override + public void onSearchIdleReactivation() { + for (SearchOperationListener listener : listeners) { + try { + listener.onSearchIdleReactivation(); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onNewSearchIdleReactivation listener [{}] failed", listener), e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 4cd10c6874e0a..4413c8eb370be 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -253,6 +253,10 @@ protected Table getTableWithHeader(final RestRequest request) { "search.point_in_time_total", "alias:spto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" ); + table.addCell( + "search.search_idle_reactivate_count_total", + "alias:ssirct,searchSearchIdleReactivateCountTotal;default:false;text-align:right;desc:number of times a shard reactivated" + ); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -427,6 +431,7 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getSearchIdleReactivateCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 5656b77445772..594700ea60b3e 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -57,9 +57,9 @@ public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map groupStats1 = new HashMap<>(); Map groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -128,6 +128,7 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); + assertEquals(equalTo, stats.getSearchIdleReactivateCount()); // avg_concurrency is not summed up across stats assertEquals(1, stats.getConcurrentAvgSliceCount(), 0); } diff --git a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java index 98f86758ea2ca..c61c13eecf2c3 100644 --- a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java @@ -64,6 +64,7 @@ public void testListenersAreExecuted() { AtomicInteger newScrollContext = new AtomicInteger(); AtomicInteger freeScrollContext = new AtomicInteger(); AtomicInteger validateSearchContext = new AtomicInteger(); + AtomicInteger searchIdleReactivateCount = new AtomicInteger(); AtomicInteger timeInNanos = new AtomicInteger(randomIntBetween(0, 10)); SearchOperationListener listener = new SearchOperationListener() { @Override @@ -133,6 +134,11 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertNotNull(readerContext); validateSearchContext.incrementAndGet(); } + + @Override + public void onSearchIdleReactivation() { + searchIdleReactivateCount.incrementAndGet(); + } }; SearchOperationListener throwingListener = (SearchOperationListener) Proxy.newProxyInstance( @@ -169,6 +175,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFetchPhase(ctx, timeInNanos.get()); @@ -182,6 +189,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onPreQueryPhase(ctx); @@ -195,6 +203,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onPreFetchPhase(ctx); @@ -208,6 +217,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFailedFetchPhase(ctx); @@ -221,6 +231,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFailedQueryPhase(ctx); @@ -234,6 +245,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onNewReaderContext(mock(ReaderContext.class)); @@ -247,6 +259,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onNewScrollContext(mock(ReaderContext.class)); @@ -260,6 +273,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFreeReaderContext(mock(ReaderContext.class)); @@ -273,6 +287,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFreeScrollContext(mock(ReaderContext.class)); @@ -286,6 +301,21 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(2, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); + assertEquals(0, validateSearchContext.get()); + + compositeListener.onSearchIdleReactivation(); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(2, freeScrollContext.get()); + assertEquals(2, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); if (throwingListeners == 0) { @@ -311,6 +341,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(2, freeScrollContext.get()); + assertEquals(2, searchIdleReactivateCount.get()); assertEquals(2, validateSearchContext.get()); } } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index fa13ec2036797..883df7da5d717 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -125,7 +125,7 @@ public void testBuildTable() { assertThat(headers.get(6).value, equalTo("ip")); assertThat(headers.get(7).value, equalTo("id")); assertThat(headers.get(8).value, equalTo("node")); - assertThat(headers.get(78).value, equalTo("docs.deleted")); + assertThat(headers.get(79).value, equalTo("docs.deleted")); final List> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -141,9 +141,9 @@ public void testBuildTable() { assertThat(row.get(4).value, equalTo(shardStats.getStats().getDocs().getCount())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(76).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(77).value, equalTo(shardStats.getStatePath())); - assertThat(row.get(78).value, equalTo(shardStats.getStats().getDocs().getDeleted())); + assertThat(row.get(77).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(78).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(79).value, equalTo(shardStats.getStats().getDocs().getDeleted())); } } } From 839ee4c167dae6b9b2352693e16dff7d98f225db Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Tue, 26 Mar 2024 18:09:59 -0400 Subject: [PATCH 47/74] Rebased version of #12706 (#12933) * Fix Flaky SettingTests Tests Signed-off-by: kkewwei * add the comment to unit test Signed-off-by: kkewwei --------- Signed-off-by: kkewwei Co-authored-by: kkewwei --- .../java/org/opensearch/common/settings/SettingTests.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index c6da96b521276..7ebee680e8e52 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -201,7 +201,7 @@ public void testMemorySize() { assertEquals(new ByteSizeValue(12), value.get()); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "20%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2)), value.get()); + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2)), value.get()); } public void testMemorySizeWithFallbackValue() { @@ -219,10 +219,12 @@ public void testMemorySizeWithFallbackValue() { assertEquals(memorySizeValue.getBytes(), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2, 1.0); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "30%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.3)), value.get()); + // If value=getHeapMax()*0.3 is bigger than 2gb, and is bigger than Integer.MAX_VALUE, + // then (long)((int) value) will lose precision. + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.3)), value.get()); assertTrue(settingUpdater.apply(Settings.builder().put("b.byte.size", "40%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), value.get()); + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), value.get()); } public void testSimpleUpdate() { From 618782d8fe8c26298caf912795513b23c33db149 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Tue, 26 Mar 2024 19:34:30 -0700 Subject: [PATCH 48/74] Perform buildAggregation concurrently and support Composite Aggregations (#12697) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../AggregationsIntegrationIT.java | 23 ++++++++++++ .../aggregations/bucket/CompositeAggIT.java | 29 ++++++++------- .../AggregationCollectorManager.java | 14 ++------ .../search/aggregations/Aggregator.java | 20 ++++++++--- .../BucketCollectorProcessor.java | 36 +++++++++++++++++++ .../GlobalAggCollectorManager.java | 15 ++++++++ .../MultiBucketConsumerService.java | 8 ++--- .../NonGlobalAggCollectorManager.java | 15 ++++++++ .../CompositeAggregationFactory.java | 5 +-- .../GlobalOrdinalsStringTermsAggregator.java | 29 ++++++++++++--- .../search/internal/SearchContext.java | 6 ++++ .../AggregationProcessorTests.java | 6 ++++ .../opensearch/test/OpenSearchTestCase.java | 7 +++- 14 files changed, 173 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8ab06ce681b4..cf0a38c081392 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -104,6 +104,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) +- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 6059abce53c8b..4a8b00ea45738 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -38,6 +38,8 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -56,6 +58,8 @@ import java.util.List; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.global; +import static org.opensearch.search.aggregations.AggregationBuilders.stats; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -164,4 +168,23 @@ private void runLargeStringAggregationTest(AggregationBuilder aggregation) { } assertTrue("Exception should have been thrown", exceptionThrown); } + + public void testAggsOnEmptyShards() { + // Create index with 5 shards but only 1 doc + assertAcked( + prepareCreate( + "idx", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 5).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("score", "type=integer") + ); + client().prepareIndex("idx").setId("1").setSource("score", "5").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // Validate global agg does not throw an exception + assertSearchResponse( + client().prepareSearch("idx").addAggregation(global("global").subAggregation(stats("value_stats").field("score"))).get() + ); + + // Validate non-global agg does not throw an exception + assertSearchResponse(client().prepareSearch("idx").addAggregation(stats("value_stats").field("score")).get()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java index 5a38ba670f1dc..a743f22a2ff77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java @@ -26,6 +26,7 @@ import java.util.Collection; import java.util.List; +import static org.opensearch.indices.IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -50,23 +51,25 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked( prepareCreate( "idx", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), false) ).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer") ); waitForRelocation(ClusterHealthStatus.GREEN); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get(); - refresh("idx"); + indexRandom( + true, + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100") + ); waitForRelocation(ClusterHealthStatus.GREEN); refresh(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java index 0bb2d1d7ca933..94e9ce5063277 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java @@ -15,9 +15,9 @@ import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Objects; /** * Common {@link CollectorManager} used by both concurrent and non-concurrent aggregation path and also for global and non-global @@ -56,17 +56,9 @@ public String getCollectorReason() { @Override public ReduceableSearchResult reduce(Collection collectors) throws IOException { - final List aggregators = context.bucketCollectorProcessor().toAggregators(collectors); - final List internals = new ArrayList<>(aggregators.size()); + final List internals = context.bucketCollectorProcessor().toInternalAggregations(collectors); + assert internals.stream().noneMatch(Objects::isNull); context.aggregations().resetBucketMultiConsumer(); - for (Aggregator aggregator : aggregators) { - try { - // post collection is called in ContextIndexSearcher after search on leaves are completed - internals.add(aggregator.buildTopLevel()); - } catch (IOException e) { - throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e); - } - } final InternalAggregations internalAggregations = InternalAggregations.from(internals); return buildAggregationResult(internalAggregations); diff --git a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java index 8744d1f6a07d3..f4db8f61bf537 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java @@ -33,6 +33,7 @@ package org.opensearch.search.aggregations; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.SetOnce; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.core.ParseField; @@ -61,6 +62,8 @@ @PublicApi(since = "1.0.0") public abstract class Aggregator extends BucketCollector implements Releasable { + private final SetOnce internalAggregation = new SetOnce<>(); + /** * Parses the aggregation request and creates the appropriate aggregator factory for it. * @@ -83,6 +86,13 @@ public interface Parser { AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException; } + /** + * Returns the InternalAggregation stored during post collection + */ + public InternalAggregation getPostCollectionAggregation() { + return internalAggregation.get(); + } + /** * Return the name of this aggregator. */ @@ -185,13 +195,15 @@ public interface BucketComparator { /** * Build the result of this aggregation if it is at the "top level" - * of the aggregation tree. If, instead, it is a sub-aggregation of - * another aggregation then the aggregation that contains it will call - * {@link #buildAggregations(long[])}. + * of the aggregation tree and save it. This should get called + * during post collection. If, instead, it is a sub-aggregation + * of another aggregation then the aggregation that contains + * it will call {@link #buildAggregations(long[])}. */ public final InternalAggregation buildTopLevel() throws IOException { assert parent() == null; - return buildAggregations(new long[] { 0 })[0]; + this.internalAggregation.set(buildAggregations(new long[] { 0 })[0]); + return internalAggregation.get(); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java index 135fda71a757a..df05ce3f5c049 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java +++ b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java @@ -72,6 +72,15 @@ public void processPostCollection(Collector collectorTree) throws IOException { } } else if (currentCollector instanceof BucketCollector) { ((BucketCollector) currentCollector).postCollection(); + + // Perform build aggregation during post collection + if (currentCollector instanceof Aggregator) { + ((Aggregator) currentCollector).buildTopLevel(); + } else if (currentCollector instanceof MultiBucketCollector) { + for (Collector innerCollector : ((MultiBucketCollector) currentCollector).getCollectors()) { + collectors.offer(innerCollector); + } + } } } } @@ -106,4 +115,31 @@ public List toAggregators(Collection collectors) { } return aggregators; } + + /** + * Unwraps the input collection of {@link Collector} to get the list of the {@link InternalAggregation}. The + * input is expected to contain the collectors related to Aggregations only as that is passed to {@link AggregationCollectorManager} + * during the reduce phase. This list of {@link InternalAggregation} is used to optionally perform reduce at shard level before + * returning response to coordinator + * @param collectors collection of aggregation collectors to reduce + * @return list of unwrapped {@link InternalAggregation} + */ + public List toInternalAggregations(Collection collectors) throws IOException { + List internalAggregations = new ArrayList<>(); + + final Deque allCollectors = new LinkedList<>(collectors); + while (!allCollectors.isEmpty()) { + Collector currentCollector = allCollectors.pop(); + if (currentCollector instanceof InternalProfileCollector) { + currentCollector = ((InternalProfileCollector) currentCollector).getCollector(); + } + + if (currentCollector instanceof Aggregator) { + internalAggregations.add(((Aggregator) currentCollector).getPostCollectionAggregation()); + } else if (currentCollector instanceof MultiBucketCollector) { + allCollectors.addAll(Arrays.asList(((MultiBucketCollector) currentCollector).getCollectors())); + } + } + return internalAggregations; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java index 8814cc3c435e1..db2a11b47fbc3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java @@ -12,8 +12,10 @@ import org.apache.lucene.search.CollectorManager; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Objects; @@ -42,6 +44,19 @@ public Collector newCollector() throws IOException { } } + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + // If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in + // search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore + // we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree. + if (context.searcher().getLeafContexts().isEmpty()) { + for (Collector c : collectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } + } + return super.reduce(collectors); + } + @Override protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices diff --git a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java index 1461dd3009b44..35186422fceaa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java +++ b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java @@ -131,12 +131,10 @@ public static class MultiBucketConsumer implements IntConsumer { private final int limit; private final CircuitBreaker breaker; - // aggregations execute in a single thread for both sequential - // and concurrent search, so no atomic here + // count is currently only updated in final reduce phase which is executed in single thread for both concurrent and non-concurrent + // search private int count; - - // will be updated by multiple threads in concurrent search - // hence making it as LongAdder + // will be updated by multiple threads in concurrent search hence making it as LongAdder private final LongAdder callCount; private volatile boolean circuitBreakerTripped; private final int availProcessors; diff --git a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java index 8b0a1530b5505..5408a19c8ca50 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java @@ -12,8 +12,10 @@ import org.apache.lucene.search.CollectorManager; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Objects; @@ -42,6 +44,19 @@ public Collector newCollector() throws IOException { } } + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + // If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in + // search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore + // we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree. + if (context.searcher().getLeafContexts().isEmpty()) { + for (Collector c : collectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } + } + return super.reduce(collectors); + } + @Override protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 4af14ab014db5..6c5619a843fae 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -40,6 +40,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Arrays; import java.util.Map; /** @@ -80,7 +81,7 @@ protected Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - // See https://github.com/opensearch-project/OpenSearch/issues/12331 for details - return false; + // Disable concurrent search if any scripting is used. See https://github.com/opensearch-project/OpenSearch/issues/12331 for details + return Arrays.stream(sources).noneMatch(CompositeValuesSourceConfig::hasScript); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 69fda2f3f6133..9e40f7b4c9b3e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -45,6 +45,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; +import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.LongArray; @@ -94,8 +95,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final long valueCount; private final String fieldName; private Weight weight; - private final GlobalOrdLookupFunction lookupGlobalOrd; protected final CollectionStrategy collectionStrategy; + private final SetOnce dvs = new SetOnce<>(); protected int segmentsWithSingleValuedOrds = 0; protected int segmentsWithMultiValuedOrds = 0; @@ -129,11 +130,10 @@ public GlobalOrdinalsStringTermsAggregator( this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job. this.valuesSource = valuesSource; final IndexReader reader = context.searcher().getIndexReader(); - final SortedSetDocValues values = reader.leaves().size() > 0 + final SortedSetDocValues values = !reader.leaves().isEmpty() ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) : DocValues.emptySortedSet(); this.valueCount = values.getValueCount(); - this.lookupGlobalOrd = values::lookupOrd; this.acceptedGlobalOrdinals = includeExclude == null ? ALWAYS_TRUE : includeExclude.acceptedGlobalOrdinals(values)::get; if (remapGlobalOrds) { this.collectionStrategy = new RemapGlobalOrds(cardinality); @@ -885,7 +885,10 @@ PriorityQueue buildPriorityQueue(int size) { } StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp) throws IOException { - BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd)); + // Recreate DocValues as needed for concurrent segment search + SortedSetDocValues values = getDocValues(); + BytesRef term = BytesRef.deepCopyOf(values.lookupOrd(temp.globalOrd)); + StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format); result.bucketOrd = temp.bucketOrd; result.docCountError = 0; @@ -1001,7 +1004,9 @@ BucketUpdater bucketUpdater(long owningBucketOrd) long subsetSize = subsetSize(owningBucketOrd); return (spare, globalOrd, bucketOrd, docCount) -> { spare.bucketOrd = bucketOrd; - oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes); + // Recreate DocValues as needed for concurrent segment search + SortedSetDocValues values = getDocValues(); + oversizedCopy(values.lookupOrd(globalOrd), spare.termBytes); spare.subsetDf = docCount; spare.subsetSize = subsetSize; spare.supersetDf = backgroundFrequencies.freq(spare.termBytes); @@ -1086,4 +1091,18 @@ private void oversizedCopy(BytesRef from, BytesRef to) { * Predicate used for {@link #acceptedGlobalOrdinals} if there is no filter. */ private static final LongPredicate ALWAYS_TRUE = l -> true; + + /** + * If DocValues have not been initialized yet for reduce phase, create and set them. + */ + private SortedSetDocValues getDocValues() throws IOException { + if (dvs.get() == null) { + dvs.set( + !context.searcher().getIndexReader().leaves().isEmpty() + ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) + : DocValues.emptySortedSet() + ); + } + return dvs.get(); + } } diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 3d13378e58e5d..8f6001dc06755 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -113,6 +113,12 @@ public List toAggregators(Collection collectors) { // should not be called when there is no aggregation collector throw new IllegalStateException("Unexpected toAggregators call on NO_OP_BUCKET_COLLECTOR_PROCESSOR"); } + + @Override + public List toInternalAggregations(Collection collectors) { + // should not be called when there is no aggregation collector + throw new IllegalStateException("Unexpected toInternalAggregations call on NO_OP_BUCKET_COLLECTOR_PROCESSOR"); + } }; private final List releasables = new CopyOnWriteArrayList<>(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java index d68b0911d3d01..c10233a72921f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java @@ -187,10 +187,16 @@ protected LeafSlice[] slices(List leaves) { AggregationCollectorManager collectorManager; if (expectedNonGlobalAggsPerSlice > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(NonGlobalAggCollectorManager.class); + for (Collector c : nonGlobalCollectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } collectorManager.reduce(nonGlobalCollectors).reduce(context.queryResult()); } if (expectedGlobalAggs > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(GlobalAggCollectorManager.class); + for (Collector c : globalCollectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } ReduceableSearchResult result = collectorManager.reduce(globalCollectors); doReturn(result).when(testSearcher) .search(nullable(Query.class), ArgumentMatchers.>any()); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index aac3fca9e1e16..f381ebdb64fc2 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -211,7 +211,12 @@ "LuceneFixedGap", "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", - "Lucene50" }) + "Lucene50", + "Lucene90", + "Lucene94", + "Lucene90", + "Lucene95", + "Lucene99" }) @LuceneTestCase.SuppressReproduceLine public abstract class OpenSearchTestCase extends LuceneTestCase { From 8d5a1d2ece8c4b2cd93d3fd9bab8e6c3359a03d1 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 27 Mar 2024 19:53:07 +0800 Subject: [PATCH 49/74] Convert ingest processor supports ip type (#12818) * Convert ingest processor supports ip type Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong * Add comment Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../ingest/common/ConvertProcessor.java | 14 ++++ .../ingest/common/ConvertProcessorTests.java | 25 ++++++ .../test/ingest/330_convert_processor.yml | 83 +++++++++++++++++++ 4 files changed, 123 insertions(+) create mode 100644 modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index cf0a38c081392..f12f4f33acb5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java index 2a81fa5f4986e..c7b5a8978188f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.common.network.InetAddresses; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -118,6 +119,19 @@ public Object convert(Object value) { return value.toString(); } }, + IP { + @Override + public Object convert(Object value) { + // If the value is a valid ipv4/ipv6 address, we return the original value directly because IpFieldType + // can accept string value, this is simpler than we return an InetAddress object which needs to do more + // work such as serialization + if (value instanceof String && InetAddresses.isInetAddress(value.toString())) { + return value; + } else { + throw new IllegalArgumentException("[" + value + "] is not a valid ipv4/ipv6 address"); + } + } + }, AUTO { @Override public Object convert(Object value) { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java index 0ba0a39261d00..50ece9282888f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java @@ -550,4 +550,29 @@ public void testTargetField() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt))); assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt)); } + + public void testConvertIP() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String validIPString; + if (randomBoolean()) { + validIPString = "1.2.3.4"; + } else { + validIPString = "::1"; + } + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, validIPString); + + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(validIPString)); + + String invalidIPString = randomAlphaOfLength(10); + fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, invalidIPString); + Processor processorWithInvalidIP = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + try { + processorWithInvalidIP.execute(ingestDocument); + fail("processor execute should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[" + invalidIPString + "] is not a valid ipv4/ipv6 address")); + } + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml new file mode 100644 index 0000000000000..994ed225dd624 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml @@ -0,0 +1,83 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test convert processor with ip type": + - skip: + version: " - 2.13.99" + reason: "introduced in 2.14.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "type": "ip" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /\[1.1.1.\] is not a valid ipv4\/ipv6 address/ + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "target_field" : "ip_field", + "type" : "ip", + "ignore_failure" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1."} } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1.1" + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1.1", ip_field: "1.1.1.1"} } From 1d1d37010e8e1ef70aab87d125de89002a500562 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 27 Mar 2024 20:55:31 +0800 Subject: [PATCH 50/74] Update supported version for the bug fix which add more index blocks check for resize APIs (#12921) Signed-off-by: Gao Binlong --- .../resources/rest-api-spec/test/indices.clone/10_basic.yml | 6 +++--- .../rest-api-spec/test/indices.shrink/10_basic.yml | 6 +++--- .../resources/rest-api-spec/test/indices.split/10_basic.yml | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index fa48820a71a89..07df09225c624 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -113,12 +113,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 426729e737978..67b5be7eb0fd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -72,12 +72,12 @@ setup: - match: { _id: "1" } - match: { _source: { foo: "hello world" } } +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 50c2819eac9d5..096a61a765288 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -219,12 +219,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: From 5db84d16a96932e4530ff1303bdd6edde52f4caf Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 27 Mar 2024 12:11:53 -0400 Subject: [PATCH 51/74] Allow setting KEYSTORE_PASSWORD through env variable (#12865) --- CHANGELOG.md | 1 + distribution/src/bin/opensearch | 12 +++++++----- distribution/src/bin/opensearch.bat | 13 ++++++++----- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f12f4f33acb5e..8a18660a2d84c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) +- Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) ### Dependencies diff --git a/distribution/src/bin/opensearch b/distribution/src/bin/opensearch index 947d1167f79f2..8a3b0a009437f 100755 --- a/distribution/src/bin/opensearch +++ b/distribution/src/bin/opensearch @@ -36,14 +36,16 @@ fi # get keystore password before setting java options to avoid # conflicting GC configurations for the keystore tools -unset KEYSTORE_PASSWORD -KEYSTORE_PASSWORD= if [[ $CHECK_KEYSTORE = true ]] \ && bin/opensearch-keystore has-passwd --silent then - if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then - echo "Failed to read keystore password on console" 1>&2 - exit 1 + if [[ ! -z "${KEYSTORE_PASSWORD}" ]]; then + echo "Using value of KEYSTORE_PASSWORD from the environment" + else + if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then + echo "Failed to read keystore password on console" 1>&2 + exit 1 + fi fi fi diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index cce21504c55b7..b7ecab24165fa 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -62,14 +62,17 @@ if not exist "%SERVICE_LOG_DIR%" ( mkdir "%SERVICE_LOG_DIR%" ) -SET KEYSTORE_PASSWORD= IF "%checkpassword%"=="Y" ( CALL "%~dp0opensearch-keystore.bat" has-passwd --silent IF !ERRORLEVEL! EQU 0 ( - SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: - IF !ERRORLEVEL! NEQ 0 ( - ECHO Failed to read keystore password on standard input - EXIT /B !ERRORLEVEL! + if defined KEYSTORE_PASSWORD ( + ECHO Using value of KEYSTORE_PASSWORD from the environment + ) else ( + SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: + IF !ERRORLEVEL! NEQ 0 ( + ECHO Failed to read keystore password on standard input + EXIT /B !ERRORLEVEL! + ) ) ) ) From 6ddbdcdc89507c95ca320b4112075cdf764e5d60 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Wed, 27 Mar 2024 15:50:30 -0700 Subject: [PATCH 52/74] [DerivedFields] PR2 - Implementation for all supported types and DerivedFieldType (#12808) Implementation for all supported types and DerivedFieldType. We support the following types so far: boolean date geo_point ip keyword long double --------- Signed-off-by: Rishabh Maurya --- .../mapper/DerivedFieldSupportedTypes.java | 156 ++++++++ .../index/mapper/DerivedFieldType.java | 363 ++++++++++++++++++ .../index/mapper/KeywordFieldMapper.java | 2 +- .../index/mapper/DerivedFieldTypeTests.java | 94 +++++ 4 files changed, 614 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java new file mode 100644 index 0000000000000..10b5c4a0f7157 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.opensearch.Version; +import org.opensearch.common.Booleans; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.network.InetAddresses; + +import java.net.InetAddress; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Contains logic to get the FieldMapper for a given type of derived field. Also, for a given type of derived field, + * it is used to create an IndexableField for the provided type and object. It is useful when indexing into + * lucene MemoryIndex in {@link org.opensearch.index.query.DerivedFieldQuery}. + */ +enum DerivedFieldSupportedTypes { + + BOOLEAN("boolean", (name, context) -> { + BooleanFieldMapper.Builder builder = new BooleanFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // Trying to mimic the logic for parsing source value as used in BooleanFieldMapper valueFetcher + Boolean value; + if (o instanceof Boolean) { + value = (Boolean) o; + } else { + String textValue = o.toString(); + value = Booleans.parseBooleanStrict(textValue, false); + } + return new Field(name, value ? "T" : "F", BooleanFieldMapper.Defaults.FIELD_TYPE); + }), + DATE("date", (name, context) -> { + // TODO: should we support mapping settings exposed by a given field type from derived fields too? + // for example, support `format` for date type? + DateFieldMapper.Builder builder = new DateFieldMapper.Builder( + name, + DateFieldMapper.Resolution.MILLISECONDS, + DateFieldMapper.getDefaultDateTimeFormatter(), + false, + Version.CURRENT + ); + return builder.build(context); + }, name -> o -> new LongPoint(name, (long) o)), + GEO_POINT("geo_point", (name, context) -> { + GeoPointFieldMapper.Builder builder = new GeoPointFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // convert o to array of double + if (!(o instanceof List) || ((List) o).size() != 2 || !(((List) o).get(0) instanceof Double)) { + throw new ClassCastException("geo_point should be in format emit(double lat, double lon) for derived fields"); + } + return new LatLonPoint(name, (Double) ((List) o).get(0), (Double) ((List) o).get(1)); + }), + IP("ip", (name, context) -> { + IpFieldMapper.Builder builder = new IpFieldMapper.Builder(name, false, Version.CURRENT); + return builder.build(context); + }, name -> o -> { + InetAddress address; + if (o instanceof InetAddress) { + address = (InetAddress) o; + } else { + address = InetAddresses.forString(o.toString()); + } + return new InetAddressPoint(name, address); + }), + KEYWORD("keyword", (name, context) -> { + FieldType dummyFieldType = new FieldType(); + dummyFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + KeywordFieldMapper.Builder keywordBuilder = new KeywordFieldMapper.Builder(name); + KeywordFieldMapper.KeywordFieldType keywordFieldType = keywordBuilder.buildFieldType(context, dummyFieldType); + keywordFieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + return new KeywordFieldMapper( + name, + dummyFieldType, + keywordFieldType, + keywordBuilder.multiFieldsBuilder.build(keywordBuilder, context), + keywordBuilder.copyTo.build(), + keywordBuilder + ); + }, name -> o -> new KeywordField(name, (String) o, Field.Store.NO)), + LONG("long", (name, context) -> { + NumberFieldMapper.Builder longBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.LONG, false, false); + return longBuilder.build(context); + }, name -> o -> new LongField(name, Long.parseLong(o.toString()), Field.Store.NO)), + DOUBLE("double", (name, context) -> { + NumberFieldMapper.Builder doubleBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.DOUBLE, false, false); + return doubleBuilder.build(context); + }, name -> o -> new DoubleField(name, Double.parseDouble(o.toString()), Field.Store.NO)); + + final String name; + private final BiFunction builder; + + private final Function> indexableFieldBuilder; + + DerivedFieldSupportedTypes( + String name, + BiFunction builder, + Function> indexableFieldBuilder + ) { + this.name = name; + this.builder = builder; + this.indexableFieldBuilder = indexableFieldBuilder; + } + + public String getName() { + return name; + } + + private FieldMapper getFieldMapper(String name, Mapper.BuilderContext context) { + return builder.apply(name, context); + } + + private Function getIndexableFieldGenerator(String name) { + return indexableFieldBuilder.apply(name); + } + + private static final Map enumMap = Arrays.stream(DerivedFieldSupportedTypes.values()) + .collect(Collectors.toMap(DerivedFieldSupportedTypes::getName, enumValue -> enumValue)); + + public static FieldMapper getFieldMapperFromType(String type, String name, Mapper.BuilderContext context) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getFieldMapper(name, context); + } + + public static Function getIndexableFieldGeneratorType(String type, String name) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getIndexableFieldGenerator(name); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java new file mode 100644 index 0000000000000..abdca7879cc94 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -0,0 +1,363 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.opensearch.common.Nullable; +import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.index.query.DerivedFieldQuery; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +/** + * MappedFieldType for Derived Fields + * Contains logic to execute different type of queries on a derived field of given type. + * @opensearch.internal + */ +public final class DerivedFieldType extends MappedFieldType { + private final String type; + + private final Script script; + + FieldMapper typeFieldMapper; + + final Function indexableFieldGenerator; + + public DerivedFieldType( + String name, + String type, + Script script, + boolean isIndexed, + boolean isStored, + boolean hasDocValues, + Map meta, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + super(name, isIndexed, isStored, hasDocValues, typeFieldMapper.fieldType().getTextSearchInfo(), meta); + this.type = type; + this.script = script; + this.typeFieldMapper = typeFieldMapper; + this.indexableFieldGenerator = fieldFunction; + } + + public DerivedFieldType( + String name, + String type, + Script script, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + this(name, type, script, false, false, false, Collections.emptyMap(), typeFieldMapper, fieldFunction); + } + + @Override + public String typeName() { + return "derived"; + } + + public String getType() { + return type; + } + + @Override + public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQueryCaseInsensitive(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termsQuery(List values, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termsQuery(values, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query rangeQuery( + Object lowerTerm, + Object upperTerm, + boolean includeLower, + boolean includeUpper, + ShapeRelation relation, + ZoneId timeZone, + DateMathParser parser, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.rangeQuery( + lowerTerm, + upperTerm, + includeLower, + includeUpper, + relation, + timeZone, + parser, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery( + value, + fuzziness, + prefixLength, + maxExpansions, + transpositions, + method, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query prefixQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.prefixQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query wildcardQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.wildcardQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.normalizedWildcardQuery(value, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + Query query = typeFieldMapper.mappedFieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + throw new IllegalArgumentException( + "Can only use span prefix queries on text fields - not on [" + name() + "] which is of type [" + typeName() + "]" + ); + } + + @Override + public Query distanceFeatureQuery(Object origin, String pivot, float boost, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.distanceFeatureQuery(origin, pivot, boost, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support exist queries"); + } + + @Override + public boolean isAggregatable() { + return false; + } + + private DerivedFieldScript.LeafFactory getDerivedFieldLeafFactory(QueryShardContext context) { + if (!context.documentMapper("").sourceMapper().enabled()) { + throw new IllegalArgumentException( + "DerivedFieldQuery error: unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [" + + context.index().getName() + + "]" + ); + } + DerivedFieldScript.Factory factory = context.compile(script, DerivedFieldScript.CONTEXT); + return factory.newFactory(script.getParams(), context.lookup()); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index c14b2c92c89c3..42b974734e5e7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -218,7 +218,7 @@ protected List> getParameters() { ); } - private KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { + protected KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { NamedAnalyzer normalizer = Lucene.KEYWORD_ANALYZER; NamedAnalyzer searchAnalyzer = Lucene.KEYWORD_ANALYZER; String normalizerName = this.normalizer.getValue(); diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java new file mode 100644 index 0000000000000..72fb7c88cc478 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.opensearch.script.Script; + +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldTypeTests extends FieldTypeTestCase { + + private DerivedFieldType createDerivedFieldType(String type) { + Mapper.BuilderContext context = mock(Mapper.BuilderContext.class); + when(context.path()).thenReturn(new ContentPath()); + return new DerivedFieldType( + type + " _derived_field", + type, + new Script(""), + DerivedFieldSupportedTypes.getFieldMapperFromType(type, type + "_derived_field", context), + DerivedFieldSupportedTypes.getIndexableFieldGeneratorType(type, type + "_derived_field") + ); + } + + public void testBooleanType() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + assertTrue(dft.typeFieldMapper instanceof BooleanFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(true) instanceof Field); + assertTrue(dft.indexableFieldGenerator.apply(false) instanceof Field); + } + + public void testDateType() { + DerivedFieldType dft = createDerivedFieldType("date"); + assertTrue(dft.typeFieldMapper instanceof DateFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(System.currentTimeMillis()) instanceof LongPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testGeoPointType() { + DerivedFieldType dft = createDerivedFieldType("geo_point"); + assertTrue(dft.typeFieldMapper instanceof GeoPointFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(List.of(10.0, 20.0)) instanceof LatLonPoint); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of())); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of("10"))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0, 20.0, 30.0))); + } + + public void testIPType() { + DerivedFieldType dft = createDerivedFieldType("ip"); + assertTrue(dft.typeFieldMapper instanceof IpFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("127.0.0.1") instanceof InetAddressPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testKeywordType() { + DerivedFieldType dft = createDerivedFieldType("keyword"); + assertTrue(dft.typeFieldMapper instanceof KeywordFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("test_keyword") instanceof KeywordField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10)); + } + + public void testLongType() { + DerivedFieldType dft = createDerivedFieldType("long"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10) instanceof LongField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10.0)); + } + + public void testDoubleType() { + DerivedFieldType dft = createDerivedFieldType("double"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10.0) instanceof DoubleField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("")); + } + + public void testUnsupportedType() { + expectThrows(IllegalArgumentException.class, () -> createDerivedFieldType("match_only_text")); + } +} From 8ad0dc09d3e852d74cd7eac65882ea7674a282d9 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Thu, 28 Mar 2024 08:44:27 +0530 Subject: [PATCH 53/74] [Remote Store] Add RemoteStoreSettings class to handle remote store related settings (#12838) * Add RemoteStoreSettings class to handle remote store related settings --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../opensearch/remotestore/RemoteStoreIT.java | 11 +-- .../common/settings/ClusterSettings.java | 8 +- .../org/opensearch/index/IndexModule.java | 9 +- .../org/opensearch/index/IndexService.java | 11 +-- .../opensearch/index/shard/IndexShard.java | 11 ++- .../shard/RemoteStoreRefreshListener.java | 2 +- .../indices/DefaultRemoteStoreSettings.java | 26 ++++++ .../opensearch/indices/IndicesService.java | 35 ++------ .../indices/RemoteStoreSettings.java | 90 +++++++++++++++++++ .../indices/recovery/RecoverySettings.java | 32 ------- .../main/java/org/opensearch/node/Node.java | 6 +- .../opensearch/index/IndexModuleTests.java | 5 +- .../RemoteStoreRefreshListenerTests.java | 10 +-- .../indices/IndicesServiceTests.java | 5 +- ...RemoteStoreSettingsDynamicUpdateTests.java | 69 ++++++++++++++ .../RecoverySettingsDynamicUpdateTests.java | 45 ---------- .../snapshots/SnapshotResiliencyTests.java | 4 +- .../index/shard/IndexShardTestCase.java | 3 +- 19 files changed, 249 insertions(+), 136 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java create mode 100644 server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java create mode 100644 server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 7e0c1630a76e4..d218f0a985cf3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -84,6 +84,7 @@ import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; @@ -711,9 +712,9 @@ public static final IndexShard newIndexShard( SegmentReplicationCheckpointPublisher.EMPTY, null, null, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, nodeId, null, + DefaultRemoteStoreSettings.INSTANCE, false ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index e1997fea3433a..46e5b7aa28318 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -31,6 +31,7 @@ import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; @@ -56,7 +57,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -189,7 +190,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles(); + int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); @@ -224,7 +225,7 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -243,7 +244,7 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -469,7 +470,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); - assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals(expectedBufferInterval, indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval()); } private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 4f1815de224db..8760ee3c94309 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -117,6 +117,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.analysis.HunspellService; import org.opensearch.indices.breaker.BreakerSettings; @@ -297,7 +298,6 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, - RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -706,7 +706,6 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, @@ -723,7 +722,10 @@ public void apply(Settings value, Settings current, Settings previous) { // Concurrent segment search settings SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, - SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING, + + RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 6ac10a221d49e..3c4cb4fd596c1 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -79,6 +79,7 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndicesQueryCache; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoverySettings; @@ -604,8 +605,8 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -663,8 +664,8 @@ public IndexService newIndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, - clusterRemoteTranslogBufferIntervalSupplier, - recoverySettings + recoverySettings, + remoteStoreSettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 109bda65b1fd8..03cf8f9182211 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -94,6 +94,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; @@ -183,8 +184,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ValuesSourceRegistry valuesSourceRegistry; private final BiFunction translogFactorySupplier; private final Supplier clusterDefaultRefreshIntervalSupplier; - private final Supplier clusterRemoteTranslogBufferIntervalSupplier; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; public IndexService( IndexSettings indexSettings, @@ -219,8 +220,8 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -296,8 +297,8 @@ public IndexService( this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; - this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; updateFsyncTaskIfNecessary(); } @@ -549,9 +550,9 @@ public synchronized IndexShard createShard( this.indexSettings.isSegRepEnabledOrRemoteNode() ? checkpointPublisher : null, remoteStore, remoteStoreStatsTrackerFactory, - clusterRemoteTranslogBufferIntervalSupplier, nodeEnv.nodeId(), recoverySettings, + remoteStoreSettings, seedRemote ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 551dd338eed2a..f3a62cdf1436f 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -179,6 +179,7 @@ import org.opensearch.index.warmer.WarmerStats; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; @@ -349,6 +350,7 @@ Runnable getGlobalCheckpointSyncer() { private final List internalRefreshListener = new ArrayList<>(); private final RemoteStoreFileDownloader fileDownloader; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; /* On source doc rep node, It will be DOCREP_NON_MIGRATING. On source remote node , it will be REMOTE_MIGRATING_SEEDED when relocating from remote node @@ -381,9 +383,9 @@ public IndexShard( @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final Supplier clusterRemoteTranslogBufferIntervalSupplier, final String nodeId, final RecoverySettings recoverySettings, + final RemoteStoreSettings remoteStoreSettings, boolean seedRemote ) throws IOException { super(shardRouting.shardId(), indexSettings); @@ -405,7 +407,7 @@ public IndexShard( threadPool, this::getEngine, indexSettings.isRemoteNode(), - () -> getRemoteTranslogUploadBufferInterval(clusterRemoteTranslogBufferIntervalSupplier) + () -> getRemoteTranslogUploadBufferInterval(remoteStoreSettings::getClusterRemoteTranslogBufferInterval) ); this.mapperService = mapperService; this.indexCache = indexCache; @@ -481,6 +483,7 @@ public boolean shouldCache(Query query) { : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); } @@ -598,6 +601,10 @@ public RecoverySettings getRecoverySettings() { return recoverySettings; } + public RemoteStoreSettings getRemoteStoreSettings() { + return remoteStoreSettings; + } + public RemoteStoreFileDownloader getFileDownloader() { return fileDownloader; } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index fb96102bc6094..351aec6e3af6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -224,7 +224,7 @@ private boolean syncSegments() { // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. if (isRefreshAfterCommit()) { - remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles()); + remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles()); } try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { diff --git a/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java new file mode 100644 index 0000000000000..d3937600a848b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RemoteStoreSettings} instance containing all defaults + * + * @opensearch.internal + */ +public final class DefaultRemoteStoreSettings { + private DefaultRemoteStoreSettings() {} + + public static final RemoteStoreSettings INSTANCE = new RemoteStoreSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 9bc81c1826c2d..28ad4b23e319c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -249,17 +249,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * Used to specify the default translog buffer interval for remote store backed indexes. - */ - public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( - "cluster.remote_store.translog.buffer_interval", - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, - Property.NodeScope, - Property.Dynamic - ); - /** * This setting is used to set the refresh interval when the {@code index.refresh_interval} index setting is not * provided during index creation or when the existing {@code index.refresh_interval} index setting is set as null. @@ -366,7 +355,7 @@ public class IndicesService extends AbstractLifecycleComponent private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; private final RecoverySettings recoverySettings; - + private final RemoteStoreSettings remoteStoreSettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); @@ -375,8 +364,6 @@ public class IndicesService extends AbstractLifecycleComponent private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; private volatile TimeValue clusterDefaultRefreshInterval; - private volatile TimeValue clusterRemoteTranslogBufferInterval; - private final SearchRequestStats searchRequestStats; @Override @@ -411,7 +398,8 @@ public IndicesService( SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, RecoverySettings recoverySettings, - CacheService cacheService + CacheService cacheService, + RemoteStoreSettings remoteStoreSettings ) { this.settings = settings; this.threadPool = threadPool; @@ -515,10 +503,8 @@ protected void closeInternal() { this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; } /** @@ -923,8 +909,8 @@ private synchronized IndexService createIndexService( remoteDirectoryFactory, translogFactorySupplier, this::getClusterDefaultRefreshInterval, - this::getClusterRemoteTranslogBufferInterval, - this.recoverySettings + this.recoverySettings, + this.remoteStoreSettings ); } @@ -2044,12 +2030,7 @@ private TimeValue getClusterDefaultRefreshInterval() { return this.clusterDefaultRefreshInterval; } - // Exclusively for testing, please do not use it elsewhere. - public TimeValue getClusterRemoteTranslogBufferInterval() { - return clusterRemoteTranslogBufferInterval; - } - - private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { - this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + public RemoteStoreSettings getRemoteStoreSettings() { + return this.remoteStoreSettings; } } diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java new file mode 100644 index 0000000000000..5e6dba2b398db --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexSettings; + +/** + * Settings for remote store + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class RemoteStoreSettings { + + /** + * Used to specify the default translog buffer interval for remote store backed indexes. + */ + public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( + "cluster.remote_store.translog.buffer_interval", + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, + Property.NodeScope, + Property.Dynamic + ); + + /** + * Controls minimum number of metadata files to keep in remote segment store. + * {@code value < 1} will disable deletion of stale segment metadata files. + */ + public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( + "cluster.remote_store.index.segment_metadata.retention.max_count", + 10, + -1, + v -> { + if (v == 0) { + throw new IllegalArgumentException( + "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" + ); + } + }, + Property.NodeScope, + Property.Dynamic + ); + + private volatile TimeValue clusterRemoteTranslogBufferInterval; + private volatile int minRemoteSegmentMetadataFiles; + + public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + this::setClusterRemoteTranslogBufferInterval + ); + + minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + this::setMinRemoteSegmentMetadataFiles + ); + } + + // Exclusively for testing, please do not use it elsewhere. + public TimeValue getClusterRemoteTranslogBufferInterval() { + return clusterRemoteTranslogBufferInterval; + } + + private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { + this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + } + + private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { + this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; + } + + public int getMinRemoteSegmentMetadataFiles() { + return this.minRemoteSegmentMetadataFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 2b41eb125d808..53b42347aa30d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -159,25 +159,6 @@ public class RecoverySettings { Property.NodeScope ); - /** - * Controls minimum number of metadata files to keep in remote segment store. - * {@code value < 1} will disable deletion of stale segment metadata files. - */ - public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( - "cluster.remote_store.index.segment_metadata.retention.max_count", - 10, - -1, - v -> { - if (v == 0) { - throw new IllegalArgumentException( - "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" - ); - } - }, - Property.NodeScope, - Property.Dynamic - ); - public static final Setting INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT = Setting.timeSetting( "indices.recovery.internal_remote_upload_timeout", new TimeValue(1, TimeUnit.HOURS), @@ -199,7 +180,6 @@ public class RecoverySettings { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionRetryTimeout; private volatile TimeValue internalActionLongTimeout; - private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; private volatile TimeValue internalRemoteUploadTimeout; @@ -243,11 +223,6 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this::setInternalActionLongTimeout ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); - minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer( - CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, - this::setMinRemoteSegmentMetadataFiles - ); clusterSettings.addSettingsUpdateConsumer(INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, this::setInternalRemoteUploadTimeout); } @@ -354,11 +329,4 @@ private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStre this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; } - private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { - this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; - } - - public int getMinRemoteSegmentMetadataFiles() { - return this.minRemoteSegmentMetadataFiles; - } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index ea449afe1c811..8348b8f02d342 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -153,6 +153,7 @@ import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; @@ -788,6 +789,8 @@ protected Node( final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, threadPool @@ -825,7 +828,8 @@ protected Node( searchRequestStats, remoteStoreStatsTrackerFactory, recoverySettings, - cacheService + cacheService, + remoteStoreSettings ); final IngestService ingestService = new IngestService( diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 97bc822be7d51..82d7ab06f126b 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -99,6 +99,7 @@ import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; @@ -261,8 +262,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - DefaultRecoverySettings.INSTANCE + DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 85878cc2e1c9d..33f6c67b94b3d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -34,7 +34,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -249,7 +249,7 @@ public void testAfterMultipleCommits() throws IOException { setup(true, 3); assertDocs(indexShard, "1", "2", "3"); - for (int i = 0; i < indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { + for (int i = 0; i < indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { indexDocs(4 * (i + 1), 4); flushShard(indexShard); } @@ -635,9 +635,9 @@ private Tuple mockIn RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = indexShard.getRemoteStoreStatsTrackerFactory(); when(shard.indexSettings()).thenReturn(indexShard.indexSettings()); when(shard.shardId()).thenReturn(indexShard.shardId()); - RecoverySettings recoverySettings = mock(RecoverySettings.class); - when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); - when(shard.getRecoverySettings()).thenReturn(recoverySettings); + RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); + when(remoteStoreSettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); + when(shard.getRemoteStoreSettings()).thenReturn(remoteStoreSettings); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 742dbdeba8c5b..6757dbc184961 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -622,6 +622,9 @@ public void testConflictingEngineFactories() { public void testClusterRemoteTranslogBufferIntervalDefault() { IndicesService indicesService = getIndicesService(); - assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals( + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() + ); } } diff --git a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java new file mode 100644 index 0000000000000..3809a44e55901 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +public class RemoteStoreSettingsDynamicUpdateTests extends OpenSearchTestCase { + private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(Settings.EMPTY, clusterSettings); + + public void testSegmentMetadataRetention() { + // Default value + assertEquals(10, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5) + .build() + ); + assertEquals(5, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting min value + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1) + .build() + ); + assertEquals(-1, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value > default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15) + .build() + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value to 0 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < -1 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java index 18e7dfb375132..75639661f539d 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java @@ -96,49 +96,4 @@ public void testInternalLongActionTimeout() { ); assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionLongTimeout()); } - - public void testSegmentMetadataRetention() { - // Default value - assertEquals(10, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5).build() - ); - assertEquals(5, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting min value - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1).build() - ); - assertEquals(-1, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value > default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15).build() - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value to 0 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < -1 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index e9bbf3d861408..4326e5fc63961 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -192,6 +192,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -2077,7 +2078,8 @@ public void onFailure(final Exception e) { null, new RemoteStoreStatsTrackerFactory(clusterService, settings), DefaultRecoverySettings.INSTANCE, - new CacheModule(new ArrayList<>(), settings).getCacheService() + new CacheModule(new ArrayList<>(), settings).getCacheService(), + DefaultRemoteStoreSettings.INSTANCE ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index a2f9eb677c0ac..9baa6110ab54e 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -116,6 +116,7 @@ import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; @@ -708,9 +709,9 @@ protected IndexShard newShard( checkpointPublisher, remoteStore, remoteStoreStatsTrackerFactory, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, "dummy-node", DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE, false ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); From a103b84e157a12f8217e34fa41260de7ce13969f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 28 Mar 2024 10:33:18 -0400 Subject: [PATCH 54/74] [FEATURE] Broaden SecureSettingsFactory to include http transports (#12907) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../ssl/SecureNetty4HttpServerTransport.java | 78 +++++- .../transport/Netty4ModulePlugin.java | 5 +- .../netty4/ssl/SecureNetty4Transport.java | 5 +- ...HttpServerTransportConfigurationTests.java | 242 ++++++++++++++++++ .../SecureNetty4HttpServerTransportTests.java | 46 +--- .../ssl/SimpleSecureNetty4TransportTests.java | 34 +-- .../common/network/NetworkModule.java | 39 ++- .../main/java/org/opensearch/node/Node.java | 8 +- .../org/opensearch/plugins/NetworkPlugin.java | 2 +- .../SecureHttpTransportSettingsProvider.java | 55 ++++ .../plugins/SecureSettingsFactory.java | 7 + .../SecureTransportSettingsProvider.java | 57 ++--- .../plugins/TransportExceptionHandler.java | 30 +++ .../transport/TransportAdapterProvider.java | 40 +++ .../common/network/NetworkModuleTests.java | 102 ++++++-- 16 files changed, 596 insertions(+), 155 deletions(-) create mode 100644 modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java create mode 100644 server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java create mode 100644 server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a18660a2d84c..de832cdc5c52f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -116,6 +116,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) +- Improve built-in secure transports support ([#12907](https://github.com/opensearch-project/OpenSearch/pull/12907)) ### Deprecated diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java index 51a76903e284d..978c92870bd75 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java @@ -37,19 +37,27 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpHandlingSettings; +import org.opensearch.http.HttpServerTransport; import org.opensearch.http.netty4.Netty4HttpChannel; import org.opensearch.http.netty4.Netty4HttpServerTransport; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TransportAdapterProvider; import org.opensearch.transport.netty4.ssl.SslUtils; import javax.net.ssl.SSLEngine; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.DecoderException; import io.netty.handler.ssl.ApplicationProtocolNames; import io.netty.handler.ssl.ApplicationProtocolNegotiationHandler; @@ -59,9 +67,14 @@ * @see SecuritySSLNettyHttpServerTransport */ public class SecureNetty4HttpServerTransport extends Netty4HttpServerTransport { + public static final String REQUEST_HEADER_VERIFIER = "HeaderVerifier"; + public static final String REQUEST_DECOMPRESSOR = "RequestDecompressor"; + private static final Logger logger = LogManager.getLogger(SecureNetty4HttpServerTransport.class); - private final SecureTransportSettingsProvider secureTransportSettingsProvider; - private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; + private final TransportExceptionHandler exceptionHandler; + private final ChannelInboundHandlerAdapter headerVerifier; + private final TransportAdapterProvider decompressorProvider; public SecureNetty4HttpServerTransport( final Settings settings, @@ -72,7 +85,7 @@ public SecureNetty4HttpServerTransport( final Dispatcher dispatcher, final ClusterSettings clusterSettings, final SharedGroupFactory sharedGroupFactory, - final SecureTransportSettingsProvider secureTransportSettingsProvider, + final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, final Tracer tracer ) { super( @@ -86,9 +99,45 @@ public SecureNetty4HttpServerTransport( sharedGroupFactory, tracer ); - this.secureTransportSettingsProvider = secureTransportSettingsProvider; - this.exceptionHandler = secureTransportSettingsProvider.buildHttpServerExceptionHandler(settings, this) - .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + + this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider; + this.exceptionHandler = secureHttpTransportSettingsProvider.buildHttpServerExceptionHandler(settings, this) + .orElse(TransportExceptionHandler.NOOP); + + final List headerVerifiers = secureHttpTransportSettingsProvider.getHttpTransportAdapterProviders( + settings + ) + .stream() + .filter(p -> REQUEST_HEADER_VERIFIER.equalsIgnoreCase(p.name())) + .map(p -> p.create(settings, this, ChannelInboundHandlerAdapter.class)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (headerVerifiers.size() > 1) { + throw new IllegalArgumentException("Cannot have more than one header verifier configured, supplied " + headerVerifiers.size()); + } + + final Optional> decompressorProviderOpt = secureHttpTransportSettingsProvider + .getHttpTransportAdapterProviders(settings) + .stream() + .filter(p -> REQUEST_DECOMPRESSOR.equalsIgnoreCase(p.name())) + .findFirst(); + // There could be multiple request decompressor providers configured, using the first one + decompressorProviderOpt.ifPresent(p -> logger.debug("Using request decompressor provider: {}", p)); + + this.headerVerifier = headerVerifiers.isEmpty() ? null : headerVerifiers.get(0); + this.decompressorProvider = decompressorProviderOpt.orElseGet(() -> new TransportAdapterProvider() { + @Override + public String name() { + return REQUEST_DECOMPRESSOR; + } + + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.empty(); + } + }); } @Override @@ -152,7 +201,7 @@ protected SslHttpChannelHandler(final Netty4HttpServerTransport transport, final protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - final SSLEngine sslEngine = secureTransportSettingsProvider.buildSecureHttpServerEngine( + final SSLEngine sslEngine = secureHttpTransportSettingsProvider.buildSecureHttpServerEngine( settings, SecureNetty4HttpServerTransport.this ).orElseGet(SslUtils::createDefaultServerSSLEngine); @@ -166,4 +215,17 @@ protected void configurePipeline(Channel ch) { ch.pipeline().addLast(new Http2OrHttpHandler()); } } + + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + if (headerVerifier != null) { + return headerVerifier; + } else { + return super.createHeaderVerifier(); + } + } + + @Override + protected ChannelInboundHandlerAdapter createDecompressor() { + return decompressorProvider.create(settings, this, ChannelInboundHandlerAdapter.class).orElseGet(super::createDecompressor); + } } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index 56163c18949a4..e2c84ab5d339a 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -49,6 +49,7 @@ import org.opensearch.http.netty4.ssl.SecureNetty4HttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; @@ -160,7 +161,7 @@ public Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, Tracer tracer ) { return Collections.singletonMap( @@ -174,7 +175,7 @@ public Map> getSecureHttpTransports( dispatcher, clusterSettings, getSharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, tracer ) ); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java index 9c63a1ab9161b..977121346dcc3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; @@ -72,7 +73,7 @@ public class SecureNetty4Transport extends Netty4Transport { private static final Logger logger = LogManager.getLogger(SecureNetty4Transport.class); private final SecureTransportSettingsProvider secureTransportSettingsProvider; - private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + private final TransportExceptionHandler exceptionHandler; public SecureNetty4Transport( final Settings settings, @@ -100,7 +101,7 @@ public SecureNetty4Transport( this.secureTransportSettingsProvider = secureTransportSettingsProvider; this.exceptionHandler = secureTransportSettingsProvider.buildServerTransportExceptionHandler(settings, this) - .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + .orElse(TransportExceptionHandler.NOOP); } @Override diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java new file mode 100644 index 0000000000000..1ab1ae4f5ddfd --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java @@ -0,0 +1,242 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4.ssl; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.NullDispatcher; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TransportAdapterProvider; +import org.junit.After; +import org.junit.Before; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import io.netty.channel.ChannelInboundHandlerAdapter; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests for the {@link SecureNetty4HttpServerTransport} class. + */ +public class SecureNetty4HttpServerTransportConfigurationTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + private static class ConfigurableSecureHttpTransportSettingsProvider implements SecureHttpTransportSettingsProvider { + private final List> transportAdapterProviders; + + public ConfigurableSecureHttpTransportSettingsProvider( + List> transportAdapterProviders + ) { + this.transportAdapterProviders = transportAdapterProviders; + } + + @Override + public Collection> getHttpTransportAdapterProviders(Settings settings) { + return transportAdapterProviders; + } + + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + return Optional.empty(); + } + } + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + public void testRequestHeaderVerifier() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ) { + + } + } + + public void testMultipleRequestHeaderVerifiers() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider, transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ); + + assertThat(ex.getMessage(), equalTo("Cannot have more than one header verifier configured, supplied 2")); + } + + public void testRequestDecompressor() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_DECOMPRESSOR; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ) { + + } + } + + public void testRequestDecompressorAndRequestHeaderVerifier() throws InterruptedException { + final TransportAdapterProvider requestDecompressor = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_DECOMPRESSOR; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + final TransportAdapterProvider requestHeaderVerifier = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(requestDecompressor, requestHeaderVerifier)), + NoopTracer.INSTANCE + ) + ) { + + } + } +} diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java index 9ea49d0b24d44..e79a066ad8f63 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java @@ -29,7 +29,8 @@ import org.opensearch.http.HttpTransportSettings; import org.opensearch.http.NullDispatcher; import org.opensearch.http.netty4.Netty4HttpClient; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; @@ -40,7 +41,6 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NettyAllocator; import org.opensearch.transport.SharedGroupFactory; -import org.opensearch.transport.TcpTransport; import org.opensearch.transport.netty4.ssl.TrustAllManager; import org.junit.After; import org.junit.Before; @@ -83,7 +83,6 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.ssl.ClientAuth; import io.netty.handler.ssl.SslContextBuilder; import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; @@ -104,7 +103,7 @@ public class SecureNetty4HttpServerTransportTests extends OpenSearchTestCase { private ThreadPool threadPool; private MockBigArrays bigArrays; private ClusterSettings clusterSettings; - private SecureTransportSettingsProvider secureTransportSettingsProvider; + private SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; @Before public void setup() throws Exception { @@ -113,14 +112,9 @@ public void setup() throws Exception { bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - secureTransportSettingsProvider = new SecureTransportSettingsProvider() { + secureHttpTransportSettingsProvider = new SecureHttpTransportSettingsProvider() { @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { return Optional.empty(); } @@ -146,22 +140,6 @@ public Optional buildSecureHttpServerEngine(Settings settings, HttpSe throw new SSLException(ex); } } - - @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { - return Optional.empty(); - } - - @Override - public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { - return Optional.of( - SslContextBuilder.forClient() - .clientAuth(ClientAuth.NONE) - .trustManager(TrustAllManager.INSTANCE) - .build() - .newEngine(NettyAllocator.getAllocator()) - ); - } }; } @@ -241,7 +219,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, dispatcher, clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -292,7 +270,7 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(Settings.EMPTY), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -312,7 +290,7 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -366,7 +344,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -430,7 +408,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(Settings.EMPTY), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -487,7 +465,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -562,7 +540,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java index 0cae58b8efa2a..df3b005f40903 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java @@ -20,8 +20,8 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.http.HttpServerTransport; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; @@ -69,40 +69,12 @@ protected Transport build(Settings settings, final Version version, ClusterSetti NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); final SecureTransportSettingsProvider secureTransportSettingsProvider = new SecureTransportSettingsProvider() { @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + public Optional buildServerTransportExceptionHandler(Settings settings, Transport transport) { return Optional.empty(); } @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { - try { - final KeyStore keyStore = KeyStore.getInstance("PKCS12"); - keyStore.load( - SimpleSecureNetty4TransportTests.class.getResourceAsStream("/netty4-secure.jks"), - "password".toCharArray() - ); - - final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); - keyManagerFactory.init(keyStore, "password".toCharArray()); - - SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) - .trustManager(TrustAllManager.INSTANCE) - .build() - .newEngine(NettyAllocator.getAllocator()); - return Optional.of(engine); - } catch (final IOException | NoSuchAlgorithmException | UnrecoverableKeyException | KeyStoreException - | CertificateException ex) { - throw new SSLException(ex); - } - } - - @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { + public Optional buildSecureServerTransportEngine(Settings settings, Transport transport) throws SSLException { try { final KeyStore keyStore = KeyStore.getInstance("PKCS12"); keyStore.load( diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index d0f5dd9e4581d..bb8da190a6f35 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -55,6 +55,8 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.RawTaskStatus; @@ -74,7 +76,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * A module to handle registering and binding all network related classes. @@ -173,13 +177,31 @@ public NetworkModule( ClusterSettings clusterSettings, Tracer tracer, List transportInterceptors, - Collection secureTransportSettingsProvider + Collection secureSettingsFactories ) { this.settings = settings; - if (secureTransportSettingsProvider.size() > 1) { + final Collection secureTransportSettingsProviders = secureSettingsFactories.stream() + .map(p -> p.getSecureTransportSettingsProvider(settings)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (secureTransportSettingsProviders.size() > 1) { + throw new IllegalArgumentException( + "there is more than one secure transport settings provider: " + secureTransportSettingsProviders + ); + } + + final Collection secureHttpTransportSettingsProviders = secureSettingsFactories.stream() + .map(p -> p.getSecureHttpTransportSettingsProvider(settings)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (secureHttpTransportSettingsProviders.size() > 1) { throw new IllegalArgumentException( - "there is more than one secure transport settings provider: " + secureTransportSettingsProvider + "there is more than one secure HTTP transport settings provider: " + secureHttpTransportSettingsProviders ); } @@ -213,9 +235,9 @@ public NetworkModule( registerTransport(entry.getKey(), entry.getValue()); } - // Register any secure transports if available - if (secureTransportSettingsProvider.isEmpty() == false) { - final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProvider.iterator().next(); + // Register any HTTP secure transports if available + if (secureHttpTransportSettingsProviders.isEmpty() == false) { + final SecureHttpTransportSettingsProvider secureSettingProvider = secureHttpTransportSettingsProviders.iterator().next(); final Map> secureHttpTransportFactory = plugin.getSecureHttpTransports( settings, @@ -233,6 +255,11 @@ public NetworkModule( for (Map.Entry> entry : secureHttpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); } + } + + // Register any secure transports if available + if (secureTransportSettingsProviders.isEmpty() == false) { + final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProviders.iterator().next(); final Map> secureTransportFactory = plugin.getSecureTransports( settings, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 8348b8f02d342..7fa2b6c8ff497 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -203,7 +203,7 @@ import org.opensearch.plugins.ScriptPlugin; import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.plugins.SearchPlugin; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; @@ -950,9 +950,9 @@ protected Node( admissionControlService ); - final Collection secureTransportSettingsProviders = pluginsService.filterPlugins(Plugin.class) + final Collection secureSettingsFactories = pluginsService.filterPlugins(Plugin.class) .stream() - .map(p -> p.getSecureSettingFactory(settings).flatMap(f -> f.getSecureTransportSettingsProvider(settings))) + .map(p -> p.getSecureSettingFactory(settings)) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toList()); @@ -972,7 +972,7 @@ protected Node( clusterService.getClusterSettings(), tracer, transportInterceptors, - secureTransportSettingsProviders + secureSettingsFactories ); Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 679833c9f6e0d..138ef6f71280d 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -139,7 +139,7 @@ default Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, Tracer tracer ) { return Collections.emptyMap(); diff --git a/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java new file mode 100644 index 0000000000000..ff86cbc04e240 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.transport.TransportAdapterProvider; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.util.Collection; +import java.util.Collections; +import java.util.Optional; + +/** + * A provider for security related settings for HTTP transports. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface SecureHttpTransportSettingsProvider { + /** + * Collection of additional {@link TransportAdapterProvider}s that are specific to particular HTTP transport + * @param settings settings + * @return a collection of additional {@link TransportAdapterProvider}s + */ + default Collection> getHttpTransportAdapterProviders(Settings settings) { + return Collections.emptyList(); + } + + /** + * If supported, builds the {@link TransportExceptionHandler} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link TransportExceptionHandler} instance + */ + Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport); + + /** + * If supported, builds the {@link SSLEngine} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link SSLEngine} instance + * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + */ + Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException; +} diff --git a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java index b98d9cf51c129..ec2276ecc62ef 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java +++ b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java @@ -26,4 +26,11 @@ public interface SecureSettingsFactory { * @return optionally, the instance of the {@link SecureTransportSettingsProvider} */ Optional getSecureTransportSettingsProvider(Settings settings); + + /** + * Creates (or provides pre-created) instance of the {@link SecureHttpTransportSettingsProvider} + * @param settings settings + * @return optionally, the instance of the {@link SecureHttpTransportSettingsProvider} + */ + Optional getSecureHttpTransportSettingsProvider(Settings settings); } diff --git a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java index 6d038ed30c8ff..5b7402a01f82d 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java @@ -10,12 +10,14 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.Settings; -import org.opensearch.http.HttpServerTransport; -import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportAdapterProvider; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; +import java.util.Collection; +import java.util.Collections; import java.util.Optional; /** @@ -26,57 +28,30 @@ @ExperimentalApi public interface SecureTransportSettingsProvider { /** - * An exception handler for errors that might happen while secure transport handle the requests. - * - * @see SslExceptionHandler - * - * @opensearch.experimental - */ - @ExperimentalApi - @FunctionalInterface - interface ServerExceptionHandler { - static ServerExceptionHandler NOOP = t -> {}; - - /** - * Handler for errors happening during the server side processing of the requests - * @param t the error - */ - void onError(Throwable t); - } - - /** - * If supported, builds the {@link ServerExceptionHandler} instance for {@link HttpServerTransport} instance - * @param settings settings - * @param transport {@link HttpServerTransport} instance - * @return if supported, builds the {@link ServerExceptionHandler} instance - */ - Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport); - - /** - * If supported, builds the {@link ServerExceptionHandler} instance for {@link TcpTransport} instance + * Collection of additional {@link TransportAdapterProvider}s that are specific to particular transport * @param settings settings - * @param transport {@link TcpTransport} instance - * @return if supported, builds the {@link ServerExceptionHandler} instance + * @return a collection of additional {@link TransportAdapterProvider}s */ - Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport); + default Collection> getTransportAdapterProviders(Settings settings) { + return Collections.emptyList(); + } /** - * If supported, builds the {@link SSLEngine} instance for {@link HttpServerTransport} instance + * If supported, builds the {@link TransportExceptionHandler} instance for {@link Transport} instance * @param settings settings - * @param transport {@link HttpServerTransport} instance - * @return if supported, builds the {@link SSLEngine} instance - * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + * @param transport {@link Transport} instance + * @return if supported, builds the {@link TransportExceptionHandler} instance */ - Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException; + Optional buildServerTransportExceptionHandler(Settings settings, Transport transport); /** - * If supported, builds the {@link SSLEngine} instance for {@link TcpTransport} instance + * If supported, builds the {@link SSLEngine} instance for {@link Transport} instance * @param settings settings - * @param transport {@link TcpTransport} instance + * @param transport {@link Transport} instance * @return if supported, builds the {@link SSLEngine} instance * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built */ - Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException; + Optional buildSecureServerTransportEngine(Settings settings, Transport transport) throws SSLException; /** * If supported, builds the {@link SSLEngine} instance for client transport instance diff --git a/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java b/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java new file mode 100644 index 0000000000000..a6b935a6b97bc --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * An exception handler for errors that might happen while secure transport handle the requests. + * + * @see SslExceptionHandler + * + * @opensearch.experimental + */ +@ExperimentalApi +@FunctionalInterface +public interface TransportExceptionHandler { + static TransportExceptionHandler NOOP = t -> {}; + + /** + * Handler for errors happening during the server side processing of the requests + * @param t the error + */ + void onError(Throwable t); +} diff --git a/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java new file mode 100644 index 0000000000000..36dbd5a699b40 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; + +import java.util.Optional; + +/** + * Transport specific adapter providers which could be injected into the transport processing chain. The transport adapters + * are transport specific and do not have any common abstraction on top. + * @param transport type + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TransportAdapterProvider { + /** + * The name of this transport adapter provider (and essentially are freestyle). + * @return the name of this transport adapter provider + */ + String name(); + + /** + * Provides a new transport adapter of required transport adapter class and transport instance. + * @param transport adapter class + * @param settings settings + * @param transport HTTP transport instance + * @param adapterClass required transport adapter class + * @return the non-empty {@link Optional} if the transport adapter could be created, empty one otherwise + */ + Optional create(Settings settings, T transport, Class adapterClass); +} diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 1c607ca0dc98b..447377e372e61 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -47,13 +47,15 @@ import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TcpTransport; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; @@ -73,38 +75,60 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import static org.hamcrest.CoreMatchers.startsWith; + public class NetworkModuleTests extends OpenSearchTestCase { private ThreadPool threadPool; - private SecureTransportSettingsProvider secureTransportSettingsProvider; + private SecureSettingsFactory secureSettingsFactory; @Override public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(NetworkModuleTests.class.getName()); - secureTransportSettingsProvider = new SecureTransportSettingsProvider() { - @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { - return Optional.empty(); - } + secureSettingsFactory = new SecureSettingsFactory() { @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { - return Optional.empty(); + public Optional getSecureTransportSettingsProvider(Settings settings) { + return Optional.of(new SecureTransportSettingsProvider() { + @Override + public Optional buildServerTransportExceptionHandler( + Settings settings, + Transport transport + ) { + return Optional.empty(); + } + + @Override + public Optional buildSecureServerTransportEngine(Settings settings, Transport transport) + throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) + throws SSLException { + return Optional.empty(); + } + }); } @Override - public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { - return Optional.empty(); + public Optional getSecureHttpTransportSettingsProvider(Settings settings) { + return Optional.of(new SecureHttpTransportSettingsProvider() { + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) + throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildHttpServerExceptionHandler( + Settings settings, + HttpServerTransport transport + ) { + return Optional.empty(); + } + }); } }; } @@ -211,7 +235,7 @@ public Map> getSecureTransports( return Collections.singletonMap("custom-secure", custom); } }; - NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), plugin); + NetworkModule module = newNetworkModule(settings, null, List.of(secureSettingsFactory), plugin); assertSame(custom, module.getTransportSupplier()); } @@ -222,7 +246,7 @@ public void testRegisterSecureHttpTransport() { .build(); Supplier custom = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, List.of(secureSettingsFactory), new NetworkPlugin() { @Override public Map> getSecureHttpTransports( Settings settings, @@ -234,7 +258,7 @@ public Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureTransportSettingsProvider, Tracer tracer ) { return Collections.singletonMap("custom-secure", custom); @@ -595,7 +619,7 @@ private NetworkModule newNetworkModule( private NetworkModule newNetworkModule( Settings settings, List coreTransportInterceptors, - List secureTransportSettingsProviders, + List secureSettingsFactories, NetworkPlugin... plugins ) { return new NetworkModule( @@ -612,7 +636,33 @@ private NetworkModule newNetworkModule( new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), NoopTracer.INSTANCE, coreTransportInterceptors, - secureTransportSettingsProviders + secureSettingsFactories + ); + } + + public void testRegisterSecureTransportMultipleProviers() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom-secure").build(); + Supplier custom = () -> null; // content doesn't matter we check reference equality + NetworkPlugin plugin = new NetworkPlugin() { + @Override + public Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap("custom-secure", custom); + } + }; + + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> newNetworkModule(settings, null, List.of(secureSettingsFactory, secureSettingsFactory), plugin) ); + assertThat(ex.getMessage(), startsWith("there is more than one secure transport settings provider")); } } From eba3b572a2ed778ac62cce661a894c7f561ecd48 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Thu, 28 Mar 2024 14:52:08 -0700 Subject: [PATCH 55/74] Disable concurrent segment search for system indices and throttled search requests (#12954) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../search/DefaultSearchContext.java | 9 +- .../search/DefaultSearchContextTests.java | 83 ++++++++++++++++++- 3 files changed, 90 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de832cdc5c52f..f44c993f6faa0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) +- [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 061aa2f6e5896..c76ea71c0a094 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -962,6 +962,12 @@ public BucketCollectorProcessor bucketCollectorProcessor() { * false: otherwise */ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { + // Do not use concurrent segment search for system indices or throttled requests. See: + // https://github.com/opensearch-project/OpenSearch/issues/12951 + if (indexShard.isSystem() || indexShard.indexSettings().isSearchThrottled()) { + return false; + } + if ((clusterService != null) && (concurrentSearchExecutor != null)) { return indexService.getIndexSettings() .getSettings() @@ -969,9 +975,8 @@ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearc IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) ); - } else { - return false; } + return false; } @Override diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 3793249d569f0..a1a808c9faa9b 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -93,6 +93,7 @@ import java.util.function.Function; import java.util.function.Supplier; +import static org.opensearch.index.IndexSettings.INDEX_SEARCH_THROTTLED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; @@ -168,6 +169,7 @@ public void testPreProcess() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); when(indexService.getIndexSettings()).thenReturn(indexSettings); when(mapperService.getIndexSettings()).thenReturn(indexSettings); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -486,6 +488,14 @@ public void testClearQueryCancellationsOnClose() throws IOException { when(indexService.newQueryShardContext(eq(shardId.id()), any(), any(), nullable(String.class), anyBoolean())).thenReturn( queryShardContext ); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -551,7 +561,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } } - public void testSearchPathEvaluationUsingSortField() throws Exception { + public void testSearchPathEvaluation() throws Exception { ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); @@ -578,9 +588,24 @@ public void testSearchPathEvaluationUsingSortField() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); when(indexService.getIndexSettings()).thenReturn(indexSettings); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + IndexShard systemIndexShard = mock(IndexShard.class); + when(systemIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(systemIndexShard.getThreadPool()).thenReturn(threadPool); + when(systemIndexShard.isSystem()).thenReturn(true); + + IndexShard throttledIndexShard = mock(IndexShard.class); + when(throttledIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(throttledIndexShard.getThreadPool()).thenReturn(threadPool); + IndexSettings throttledIndexSettings = new IndexSettings( + indexMetadata, + Settings.builder().put(INDEX_SEARCH_THROTTLED.getKey(), true).build() + ); + when(throttledIndexShard.indexSettings()).thenReturn(throttledIndexSettings); + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { final Supplier searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) { @@ -697,6 +722,62 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // Case 4: With a system index concurrent segment search is not used + readerContext = new ReaderContext( + newContextId(), + indexService, + systemIndexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case 5: When search is throttled concurrent segment search is not used + readerContext = new ReaderContext( + newContextId(), + indexService, + throttledIndexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // shutdown the threadpool threadPool.shutdown(); } From 8426e14371ce15a7ecb80ff35c109e5830027888 Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Thu, 28 Mar 2024 20:43:00 -0700 Subject: [PATCH 56/74] Update version checks for shard search idle metric after backport to 2.x (#12972) --- .../rest-api-spec/test/cat.shards/10_basic.yml | 10 +++++----- .../org/opensearch/index/search/stats/SearchStats.java | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index c309f19b454e7..989ea6b93f47f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,13 +1,13 @@ "Help": - skip: - version: " - 2.99.99" + version: " - 2.13.99" reason: search idle reactivate count total is only added in 3.0.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "3.0.0 - " + version: "2.14.0 - " - match: $body: | @@ -93,16 +93,16 @@ docs.deleted .+ \n $/ --- -"Help from 2.12.0 to 2.99.99": +"Help from 2.12.0 to 2.13.99": - skip: - version: " - 2.11.99 , 3.0.0 - " + version: " - 2.11.99 , 2.14.0 - " reason: deleted docs and concurrent search are added in 2.12.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "2.12.0 - 2.99.99" + version: "2.12.0 - 2.13.99" - match: $body: | diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index d3a53fcc0e2d8..bb61e1afa05f4 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -261,7 +261,7 @@ private Stats(StreamInput in) throws IOException { queryConcurrency = in.readVLong(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_14_0)) { searchIdleReactivateCount = in.readVLong(); } } @@ -475,7 +475,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(queryConcurrency); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_14_0)) { out.writeVLong(searchIdleReactivateCount); } } From 62776d1bd007626f270cc263e1aafabc2c80c063 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 10:41:18 -0400 Subject: [PATCH 57/74] Bump commons-io:commons-io from 2.15.1 to 2.16.0 in /plugins/repository-hdfs (#12996) * Bump commons-io:commons-io in /plugins/repository-hdfs Bumps commons-io:commons-io from 2.15.1 to 2.16.0. --- updated-dependencies: - dependency-name: commons-io:commons-io dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index f44c993f6faa0..38246f566cf6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) - Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) +- Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index af07fa9e5d80b..6faf0383d3ba2 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -75,7 +75,7 @@ dependencies { api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.10.1' - api 'commons-io:commons-io:2.15.1' + api 'commons-io:commons-io:2.16.0' api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' diff --git a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 deleted file mode 100644 index 47c5d13812a36..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..6a7b638719fa3 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 @@ -0,0 +1 @@ +27875a7935f1ddcc13267eb6fae1f719e0409572 \ No newline at end of file From 1ec49bd3aee6cd4463ae91ffb6c09054d1bbc02b Mon Sep 17 00:00:00 2001 From: Mohammad Hasnain Mohsin Rajan Date: Tue, 2 Apr 2024 03:38:49 +0530 Subject: [PATCH 58/74] feat: constant keyword field (#12285) Constant keyword fields behave similarly to regular keyword fields, except that they are defined only in the index mapping, and all documents in the index appear to have the same value for the constant keyword field. --------- Signed-off-by: Mohammad Hasnain --- CHANGELOG.md | 1 + .../mapper/ConstantKeywordFieldMapper.java | 191 ++++++++++++++++++ .../org/opensearch/indices/IndicesModule.java | 2 + .../ConstantKeywordFieldMapperTests.java | 114 +++++++++++ .../mapper/ConstantKeywordFieldTypeTests.java | 93 +++++++++ .../terms/SignificantTextAggregatorTests.java | 4 +- .../aggregations/AggregatorTestCase.java | 5 + 7 files changed, 409 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 38246f566cf6e..2a43b676ce166 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Constant Keyword Field ([#12285](https://github.com/opensearch-project/OpenSearch/pull/12285)) - Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) diff --git a/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java new file mode 100644 index 0000000000000..f4730c70362d1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java @@ -0,0 +1,191 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.regex.Regex; +import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.plain.ConstantIndexFieldData; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * Index specific field mapper + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class ConstantKeywordFieldMapper extends ParametrizedFieldMapper { + + public static final String CONTENT_TYPE = "constant_keyword"; + + private static final String valuePropertyName = "value"; + + /** + * A {@link Mapper.TypeParser} for the constant keyword field. + * + * @opensearch.internal + */ + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + if (!node.containsKey(valuePropertyName)) { + throw new OpenSearchParseException("Field [" + name + "] is missing required parameter [value]"); + } + Object value = node.remove(valuePropertyName); + if (!(value instanceof String)) { + throw new OpenSearchParseException("Field [" + name + "] is expected to be a string value"); + } + return new Builder(name, (String) value); + } + } + + private static ConstantKeywordFieldMapper toType(FieldMapper in) { + return (ConstantKeywordFieldMapper) in; + } + + /** + * Builder for the binary field mapper + * + * @opensearch.internal + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + + private final Parameter value; + + public Builder(String name, String value) { + super(name); + this.value = Parameter.stringParam(valuePropertyName, false, m -> toType(m).value, value); + } + + @Override + public List> getParameters() { + return Arrays.asList(value); + } + + @Override + public ConstantKeywordFieldMapper build(BuilderContext context) { + return new ConstantKeywordFieldMapper( + name, + new ConstantKeywordFieldMapper.ConstantKeywordFieldType(buildFullName(context), value.getValue()), + multiFieldsBuilder.build(this, context), + copyTo.build(), + this + ); + } + } + + /** + * Field type for Index field mapper + * + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + protected static final class ConstantKeywordFieldType extends ConstantFieldType { + + protected final String value; + + public ConstantKeywordFieldType(String name, String value) { + super(name, Collections.emptyMap()); + this.value = value; + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + protected boolean matches(String pattern, boolean caseInsensitive, QueryShardContext context) { + return Regex.simpleMatch(pattern, value, caseInsensitive); + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new MatchAllDocsQuery(); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + return new ConstantIndexFieldData.Builder(fullyQualifiedIndexName, name(), CoreValuesSourceType.BYTES); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't " + "support formats."); + } + + return new SourceValueFetcher(name(), context) { + @Override + protected Object parseSourceValue(Object value) { + String keywordValue = value.toString(); + return Collections.singletonList(keywordValue); + } + }; + } + } + + private final String value; + + protected ConstantKeywordFieldMapper( + String simpleName, + MappedFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + ConstantKeywordFieldMapper.Builder builder + ) { + super(simpleName, mappedFieldType, multiFields, copyTo); + this.value = builder.value.getValue(); + } + + public ParametrizedFieldMapper.Builder getMergeBuilder() { + return new ConstantKeywordFieldMapper.Builder(simpleName(), this.value).init(this); + } + + @Override + protected void parseCreateField(ParseContext context) throws IOException { + + final String value; + if (context.externalValueSet()) { + value = context.externalValue().toString(); + } else { + value = context.parser().textOrNull(); + } + if (value == null) { + throw new IllegalArgumentException("constant keyword field [" + name() + "] must have a value"); + } + + if (!value.equals(fieldType().value)) { + throw new IllegalArgumentException("constant keyword field [" + name() + "] must have a value of [" + this.value + "]"); + } + + } + + @Override + public ConstantKeywordFieldMapper.ConstantKeywordFieldType fieldType() { + return (ConstantKeywordFieldMapper.ConstantKeywordFieldType) super.fieldType(); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index b86e98f4ebcbc..fee2888c7a3fb 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -46,6 +46,7 @@ import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.BooleanFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.DocCountFieldMapper; @@ -168,6 +169,7 @@ public static Map getMappers(List mappe mappers.put(FieldAliasMapper.CONTENT_TYPE, new FieldAliasMapper.TypeParser()); mappers.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); mappers.put(FlatObjectFieldMapper.CONTENT_TYPE, FlatObjectFieldMapper.PARSER); + mappers.put(ConstantKeywordFieldMapper.CONTENT_TYPE, new ConstantKeywordFieldMapper.TypeParser()); for (MapperPlugin mapperPlugin : mapperPlugins) { for (Map.Entry entry : mapperPlugin.getMappers().entrySet()) { diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java new file mode 100644 index 0000000000000..65dd3b6447663 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexService; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +import static org.hamcrest.Matchers.containsString; + +public class ConstantKeywordFieldMapperTests extends OpenSearchSingleNodeTestCase { + + private IndexService indexService; + private DocumentMapperParser parser; + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + public void testDefaultDisabledIndexMapper() throws Exception { + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "constant_keyword") + .field("value", "default_value") + .endObject() + .startObject("field2") + .field("type", "keyword") + .endObject(); + mapping = mapping.endObject().endObject().endObject(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.toString())); + + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> { + b.field("field", "sdf"); + b.field("field2", "szdfvsddf"); + }))); + assertThat( + e.getMessage(), + containsString( + "failed to parse field [field] of type [constant_keyword] in document with id '1'. Preview of field's value: 'sdf'" + ) + ); + + final ParsedDocument doc = mapper.parse(source(b -> { + b.field("field", "default_value"); + b.field("field2", "field_2_value"); + })); + + final IndexableField field = doc.rootDoc().getField("field"); + + // constantKeywordField should not be stored + assertNull(field); + } + + public void testMissingDefaultIndexMapper() throws Exception { + + final XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "constant_keyword") + .endObject() + .startObject("field2") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject(); + + OpenSearchParseException e = expectThrows( + OpenSearchParseException.class, + () -> parser.parse("type", new CompressedXContent(mapping.toString())) + ); + assertThat(e.getMessage(), containsString("Field [field] is missing required parameter [value]")); + } + + private final SourceToParse source(CheckedConsumer build) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + build.accept(builder); + builder.endObject(); + return new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java new file mode 100644 index 0000000000000..235811539a299 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.regex.Regex; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.query.QueryShardContext; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Predicate; + +public class ConstantKeywordFieldTypeTests extends FieldTypeTestCase { + + final ConstantKeywordFieldMapper.ConstantKeywordFieldType ft = new ConstantKeywordFieldMapper.ConstantKeywordFieldType( + "field", + "default" + ); + + public void testTermQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termQuery("default", createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termQuery("not_default", createContext())); + } + + public void testTermsQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termsQuery(Arrays.asList("default", "not_default"), createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(Arrays.asList("no_default", "not_default"), createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(List.of(), createContext())); + } + + public void testInsensitiveTermQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termQueryCaseInsensitive("defaUlt", createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termQueryCaseInsensitive("not_defaUlt", createContext())); + } + + public void testPrefixQuery() { + assertEquals(new MatchAllDocsQuery(), ft.prefixQuery("defau", null, createContext())); + assertEquals(new MatchNoDocsQuery(), ft.prefixQuery("not_default", null, createContext())); + } + + public void testWildcardQuery() { + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("defa*lt", null, createContext())); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("no_defa*lt", null, createContext())); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("defa*", null, createContext())); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("*ult", null, createContext())); + + } + + public void testExistsQuery() { + assertEquals(new MatchAllDocsQuery(), ft.existsQuery(createContext())); + } + + private QueryShardContext createContext() { + IndexMetadata indexMetadata = IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + + Predicate indexNameMatcher = pattern -> Regex.simpleMatch(pattern, "index"); + return new QueryShardContext( + 0, + indexSettings, + null, + null, + null, + null, + null, + null, + xContentRegistry(), + writableRegistry(), + null, + null, + System::currentTimeMillis, + null, + indexNameMatcher, + () -> true, + null + ); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java index e9b2d40fd4ede..644cee57bd5a4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java @@ -50,6 +50,7 @@ import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.mapper.BinaryFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.FlatObjectFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -104,7 +105,8 @@ protected List unsupportedMappedFieldTypes() { return Arrays.asList( BinaryFieldMapper.CONTENT_TYPE, // binary fields are not supported because they do not have analyzers GeoPointFieldMapper.CONTENT_TYPE, // geopoint fields cannot use term queries - FlatObjectFieldMapper.CONTENT_TYPE // flat_object fields are not supported aggregations + FlatObjectFieldMapper.CONTENT_TYPE, // flat_object fields are not supported aggregations + ConstantKeywordFieldMapper.CONTENT_TYPE // binary fields are not supported because they do not have analyzers ); } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 4eb49ebb42241..f83163bd139cd 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -95,6 +95,7 @@ import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.FieldAliasMapper; @@ -778,6 +779,10 @@ public void testSupportedFieldTypes() throws IOException { source.put("doc_values", "true"); } + if (mappedType.getKey().equals(ConstantKeywordFieldMapper.CONTENT_TYPE) == true) { + source.put("value", "default_value"); + } + Mapper.Builder builder = mappedType.getValue().parse(fieldName, source, new MockParserContext()); FieldMapper mapper = (FieldMapper) builder.build(new BuilderContext(settings, new ContentPath())); From 37569bad2665f0455fea39d6efc8f852bcab7cce Mon Sep 17 00:00:00 2001 From: Mrudhul Guda Date: Tue, 2 Apr 2024 03:53:56 +0530 Subject: [PATCH 59/74] FIX: UOE While building Exists query for nested search_as_you_type field (#12048) The "exists" query on an object field would fail when a "search_as_you_type" field is nested under that object. It would also fail for a text field with prefixes enabled nested under the object, or any other field with a "hidden" subfield. --------- Signed-off-by: Mrudhul Guda --- CHANGELOG.md | 1 + .../SearchAsYouTypeFieldMapperTests.java | 27 +++++++++++++++++++ .../index/query/ExistsQueryBuilder.java | 5 ++++ .../index/mapper/MapperServiceTestCase.java | 3 +++ 4 files changed, 36 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a43b676ce166..e8e39d3b5ee00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,6 +98,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) - Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) - Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) +- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) ### Security diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index b5f687ce34d4b..f55ad2e9d659c 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -47,6 +47,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; @@ -68,6 +69,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryStringQueryBuilder; import org.opensearch.plugins.Plugin; import java.io.IOException; @@ -541,6 +543,31 @@ public void testMatchPhrase() throws IOException { } } + public void testNestedExistsQuery() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("nested_field"); + { + b.field("type", "search_as_you_type"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + Query actual = new QueryStringQueryBuilder("field:*").toQuery(queryShardContext); + Query expected = new ConstantScoreQuery( + new BooleanQuery.Builder().add(new NormsFieldExistsQuery("field.nested_field"), BooleanClause.Occur.SHOULD).build() + ); + assertEquals(expected, actual); + } + private static BooleanQuery buildBoolPrefixQuery(String shingleFieldName, String prefixFieldName, List terms) { final BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (int i = 0; i < terms.size() - 1; i++) { diff --git a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java index 7fd83d5753512..3011a48fbb296 100644 --- a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java @@ -201,6 +201,11 @@ private static Query newObjectFieldExistsQuery(QueryShardContext context, String BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder(); Collection fields = context.simpleMatchToIndexNames(objField + ".*"); for (String field : fields) { + int dotPos = field.lastIndexOf('.'); + if (dotPos > 0 && field.charAt(dotPos + 1) == '_') { + // This is a subfield (e.g. prefix) of a complex field type. Skip it. + continue; + } Query existsQuery = context.getMapperService().fieldType(field).existsQuery(context); booleanQuery.add(existsQuery, Occur.SHOULD); } diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index a65ce3cbdd380..b3d163db8c222 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -247,6 +247,9 @@ protected QueryShardContext createQueryShardContext(MapperService mapperService) when(queryShardContext.getSearchQuoteAnalyzer(any())).thenCallRealMethod(); when(queryShardContext.getSearchAnalyzer(any())).thenCallRealMethod(); when(queryShardContext.getIndexSettings()).thenReturn(mapperService.getIndexSettings()); + when(queryShardContext.getObjectMapper(anyString())).thenAnswer( + inv -> mapperService.getObjectMapper(inv.getArguments()[0].toString()) + ); when(queryShardContext.simpleMatchToIndexNames(any())).thenAnswer( inv -> mapperService.simpleMatchToFullName(inv.getArguments()[0].toString()) ); From c25b00c6b62fd6766999c09fd06dd1f7b281b3eb Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Tue, 2 Apr 2024 00:11:11 +0000 Subject: [PATCH 60/74] Move changelog entry down to 2.x block (#13004) We need to update our PR template to suggest that people add things to "unreleased 2.x" unless they definitely want to leave things for 3.0. Signed-off-by: Michael Froh --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8e39d3b5ee00..bc3acfc991e25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,7 +98,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) - Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) - Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) -- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) ### Security @@ -128,6 +127,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) +- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) ### Security From 2dc071ff2546b0c6483b9e0a382badf9ac353cdc Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Mon, 1 Apr 2024 22:18:37 -0500 Subject: [PATCH 61/74] Detect breaking changes on pull requests (#12974) --- .github/workflows/detect-breaking-change.yml | 24 ++++++ CHANGELOG.md | 1 + server/build.gradle | 79 ++++++++++++++++++++ 3 files changed, 104 insertions(+) create mode 100644 .github/workflows/detect-breaking-change.yml diff --git a/.github/workflows/detect-breaking-change.yml b/.github/workflows/detect-breaking-change.yml new file mode 100644 index 0000000000000..fec605c58f9c7 --- /dev/null +++ b/.github/workflows/detect-breaking-change.yml @@ -0,0 +1,24 @@ +name: "Detect Breaking Changes" +on: [push, pull_request] +jobs: + detect-breaking-change: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin # Temurin is a distribution of adoptium + java-version: 21 + - uses: gradle/gradle-build-action@v3 + with: + arguments: japicmp + gradle-version: 8.7 + build-root-directory: server + - if: failure() + run: cat server/build/reports/java-compatibility/report.txt + - if: failure() + uses: actions/upload-artifact@v4 + with: + name: java-compatibility-report.html + path: ${{ github.workspace }}/server/build/reports/java-compatibility/report.html + \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index bc3acfc991e25..d2b41e910762b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -109,6 +109,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) +- Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/build.gradle b/server/build.gradle index bbbe93bd6e517..7d52849844aaa 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,6 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') + id('me.champeau.gradle.japicmp') version '0.4.2' } publishing { @@ -378,3 +379,81 @@ tasks.named("sourcesJar").configure { duplicatesStrategy = DuplicatesStrategy.EXCLUDE } } + +/** Compares the current build against a snapshot build */ +tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { + oldClasspath.from(files("${buildDir}/snapshot/opensearch-${version}.jar")) + newClasspath.from(tasks.named('jar')) + onlyModified = true + failOnModification = true + ignoreMissingClasses = true + annotationIncludes = ['@org.opensearch.common.annotation.PublicApi'] + txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") + htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") + dependsOn downloadSnapshot +} + +/** If the Java API Comparison task failed, print a hint if the change should be merged from its target branch */ +gradle.taskGraph.afterTask { Task task, TaskState state -> + if (task.name == 'japicmp' && state.failure != null) { + def sha = getGitShaFromJar("${buildDir}/snapshot/opensearch-${version}.jar") + logger.info("Incompatiable java api from snapshot jar built off of commit ${sha}") + + if (!inHistory(sha)) { + logger.warn('\u001B[33mPlease merge from the target branch and run this task again.\u001B[0m') + } + } +} + +/** Downloads latest snapshot from maven repository */ +tasks.register("downloadSnapshot", Copy) { + def mavenSnapshotRepoUrl = "https://aws.oss.sonatype.org/content/repositories/snapshots/" + def groupId = "org.opensearch" + def artifactId = "opensearch" + + repositories { + maven { + url mavenSnapshotRepoUrl + } + } + + configurations { + snapshotArtifact + } + + dependencies { + snapshotArtifact("${groupId}:${artifactId}:${version}:") + } + + from configurations.snapshotArtifact + into "$buildDir/snapshot" +} + +/** Check if the sha is in the current history */ +def inHistory(String sha) { + try { + def commandCheckSha = "git merge-base --is-ancestor ${sha} HEAD" + commandCheckSha.execute() + return true + } catch (Exception) { + return false + } +} + +/** Extracts the Git SHA used to build a jar from its manifest */ +def getGitShaFromJar(String jarPath) { + def sha = '' + try { + // Open the JAR file + def jarFile = new java.util.jar.JarFile(jarPath) + // Get the manifest from the JAR file + def manifest = jarFile.manifest + def attributes = manifest.mainAttributes + // Assuming the Git SHA is stored under an attribute named 'Git-SHA' + sha = attributes.getValue('Change') + jarFile.close() + } catch (IOException e) { + println "Failed to read the JAR file: $e.message" + } + return sha +} From 3491bcb23d6b398117cfd11c5d273b2e83798d0b Mon Sep 17 00:00:00 2001 From: Arpit-Bandejiya Date: Tue, 2 Apr 2024 11:50:54 +0530 Subject: [PATCH 62/74] Add cluster primary balance contraint for rebalancing with buffer (#12656) Signed-off-by: Arpit-Bandejiya --- CHANGELOG.md | 1 + .../SegmentReplicationAllocationIT.java | 87 ++++++++- .../allocation/AllocationConstraints.java | 6 +- .../routing/allocation/ConstraintTypes.java | 15 +- .../allocation/RebalanceConstraints.java | 10 +- .../allocation/RebalanceParameter.java | 24 +++ .../allocator/BalancedShardsAllocator.java | 81 ++++++++- .../allocator/LocalShardsBalancer.java | 14 +- .../common/settings/ClusterSettings.java | 2 + .../allocation/BalanceConfigurationTests.java | 166 ++++++++++++++++-- 10 files changed, 370 insertions(+), 36 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java diff --git a/CHANGELOG.md b/CHANGELOG.md index d2b41e910762b..5371a8372c56d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) - Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) +- Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 30edea6551067..669e24f9fb555 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -31,6 +31,9 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -58,6 +61,20 @@ public void enablePreferPrimaryBalance() { ); } + public void setAllocationRelocationStrategy(boolean preferPrimaryBalance, boolean preferPrimaryRebalance, float buffer) { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance) + .put(PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance) + .put(PRIMARY_SHARD_REBALANCE_BUFFER.getKey(), buffer) + ) + ); + } + /** * This test verifies that the overall primary balance is attained during allocation. This test verifies primary * balance per index and across all indices is maintained. @@ -87,7 +104,7 @@ public void testGlobalPrimaryAllocation() throws Exception { state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); - verifyPrimaryBalance(); + verifyPrimaryBalance(0.0f); } /** @@ -224,6 +241,70 @@ public void testAllocationWithDisruption() throws Exception { verifyPerIndexPrimaryBalance(); } + /** + * Similar to testSingleIndexShardAllocation test but creates multiple indices, multiple nodes adding in and getting + * removed. The test asserts post each such event that primary shard distribution is balanced for each index as well as across the nodes + * when the PREFER_PRIMARY_SHARD_REBALANCE is set to true + */ + public void testAllocationAndRebalanceWithDisruption() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 2; + final int maxShardCount = 2; + // Create higher number of nodes than number of shards to reduce chances of SameShardAllocationDecider kicking-in + // and preventing primary relocations + final int nodeCount = randomIntBetween(5, 10); + final int numberOfIndices = randomIntBetween(1, 10); + final float buffer = randomIntBetween(1, 4) * 0.10f; + + logger.info("--> Creating {} nodes", nodeCount); + final List nodeNames = new ArrayList<>(); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + setAllocationRelocationStrategy(true, true, buffer); + + int shardCount, replicaCount; + ClusterState state; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(1, maxReplicaCount); + logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount); + createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); + ensureGreen(TimeValue.timeValueSeconds(60)); + if (logger.isTraceEnabled()) { + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + } + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + final int additionalNodeCount = randomIntBetween(1, 5); + logger.info("--> Adding {} nodes", additionalNodeCount); + + internalCluster().startNodes(additionalNodeCount); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + int nodeCountToStop = additionalNodeCount; + while (nodeCountToStop > 0) { + internalCluster().stopRandomDataNode(); + // give replica a chance to promote as primary before terminating node containing the replica + ensureGreen(TimeValue.timeValueSeconds(60)); + nodeCountToStop--; + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info("--> Cluster state post nodes stop {}", state); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + } + /** * Utility method which ensures cluster has balanced primary shard distribution across a single index. * @throws Exception exception @@ -263,7 +344,7 @@ private void verifyPerIndexPrimaryBalance() throws Exception { }, 60, TimeUnit.SECONDS); } - private void verifyPrimaryBalance() throws Exception { + private void verifyPrimaryBalance(float buffer) throws Exception { assertBusy(() -> { final ClusterState currentState = client().admin().cluster().prepareState().execute().actionGet().getState(); RoutingNodes nodes = currentState.getRoutingNodes(); @@ -278,7 +359,7 @@ private void verifyPrimaryBalance() throws Exception { .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer))); } }, 60, TimeUnit.SECONDS); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index 5375910c57579..6702db4b43e91 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -30,9 +30,9 @@ public class AllocationConstraints { public AllocationConstraints() { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); - this.constraints.putIfAbsent(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached(0.0f))); } public void updateAllocationConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index ae2d4a0926194..08fe8f92d1f80 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -28,6 +28,11 @@ public class ConstraintTypes { */ public final static String CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID = "cluster.primary.shard.balance.constraint"; + /** + * Defines a cluster constraint which is breached when a node contains more than avg primary shards across all indices + */ + public final static String CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID = "cluster.primary.shard.rebalance.constraint"; + /** * Defines an index constraint which is breached when a node contains more than avg number of shards for an index */ @@ -70,14 +75,14 @@ public static Predicate isPerIndexPrimaryShardsPerN } /** - * Defines a predicate which returns true when a node contains more than average number of primary shards. This - * constraint is used in weight calculation during allocation only. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} - * is assigned to node resulting in lesser chances of node being selected as allocation target + * Defines a predicate which returns true when a node contains more than average number of primary shards with added buffer. This + * constraint is used in weight calculation during allocation/rebalance both. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} + * is assigned to node resulting in lesser chances of node being selected as allocation/rebalance target */ - public static Predicate isPrimaryShardsPerNodeBreached() { + public static Predicate isPrimaryShardsPerNodeBreached(float buffer) { return (params) -> { int primaryShardCount = params.getNode().numPrimaryShards(); - int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode()); + int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode() * (1 + buffer)); return primaryShardCount >= allowedPrimaryShardCount; }; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java index a4036ec47ec0e..2c2138af18abc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java @@ -14,8 +14,10 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPerIndexPrimaryShardsPerNodeBreached; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPrimaryShardsPerNodeBreached; /** * Constraints applied during rebalancing round; specify conditions which, if breached, reduce the @@ -27,9 +29,13 @@ public class RebalanceConstraints { private Map constraints; - public RebalanceConstraints() { + public RebalanceConstraints(RebalanceParameter rebalanceParameter) { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put( + CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, + new Constraint(isPrimaryShardsPerNodeBreached(rebalanceParameter.getPreferPrimaryBalanceBuffer())) + ); } public void updateRebalanceConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java new file mode 100644 index 0000000000000..35fbaede93ba3 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +/** + * RebalanceConstraint Params + */ +public class RebalanceParameter { + private float preferPrimaryBalanceBuffer; + + public RebalanceParameter(float preferPrimaryBalanceBuffer) { + this.preferPrimaryBalanceBuffer = preferPrimaryBalanceBuffer; + } + + public float getPreferPrimaryBalanceBuffer() { + return preferPrimaryBalanceBuffer; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 41ace0e7661fe..b2443490dd973 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.routing.allocation.ConstraintTypes; import org.opensearch.cluster.routing.allocation.MoveDecision; import org.opensearch.cluster.routing.allocation.RebalanceConstraints; +import org.opensearch.cluster.routing.allocation.RebalanceParameter; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.inject.Inject; @@ -61,6 +62,7 @@ import java.util.Set; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; @@ -145,10 +147,29 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting PREFER_PRIMARY_SHARD_REBALANCE = Setting.boolSetting( + "cluster.routing.allocation.rebalance.primary.enable", + false, + Property.Dynamic, + Property.NodeScope + ); + + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( + "cluster.routing.allocation.rebalance.primary.buffer", + 0.10f, + 0.0f, + Property.Dynamic, + Property.NodeScope + ); + private volatile boolean movePrimaryFirst; private volatile ShardMovementStrategy shardMovementStrategy; private volatile boolean preferPrimaryShardBalance; + private volatile boolean preferPrimaryShardRebalance; + private volatile float preferPrimaryShardRebalanceBuffer; + private volatile float indexBalanceFactor; + private volatile float shardBalanceFactor; private volatile WeightFunction weightFunction; private volatile float threshold; @@ -158,14 +179,21 @@ public BalancedShardsAllocator(Settings settings) { @Inject public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { - setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); + setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); + setPreferPrimaryShardRebalance(PREFER_PRIMARY_SHARD_REBALANCE.get(settings)); setShardMovementStrategy(SHARD_MOVEMENT_STRATEGY_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_BALANCE, this::setPreferPrimaryShardBalance); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVEMENT_STRATEGY_SETTING, this::setShardMovementStrategy); - clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); + clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, this::updateIndexBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(SHARD_BALANCE_FACTOR_SETTING, this::updateShardBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); + clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } @@ -190,8 +218,35 @@ private void setShardMovementStrategy(ShardMovementStrategy shardMovementStrateg } } - private void setWeightFunction(float indexBalance, float shardBalanceFactor) { - weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); + private void setIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + } + + private void setShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + } + + private void setPreferPrimaryShardRebalanceBuffer(float preferPrimaryShardRebalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardRebalanceBuffer; + } + + private void updateIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + updateWeightFunction(); + } + + private void updateShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + updateWeightFunction(); + } + + private void updatePreferPrimaryShardBalanceBuffer(float preferPrimaryShardBalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardBalanceBuffer; + updateWeightFunction(); + } + + private void updateWeightFunction() { + weightFunction = new WeightFunction(this.indexBalanceFactor, this.shardBalanceFactor, this.preferPrimaryShardRebalanceBuffer); } /** @@ -205,6 +260,11 @@ private void setPreferPrimaryShardBalance(boolean preferPrimaryShardBalance) { this.weightFunction.updateRebalanceConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, preferPrimaryShardBalance); } + private void setPreferPrimaryShardRebalance(boolean preferPrimaryShardRebalance) { + this.preferPrimaryShardRebalance = preferPrimaryShardRebalance; + this.weightFunction.updateRebalanceConstraint(CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, preferPrimaryShardRebalance); + } + private void setThreshold(float threshold) { this.threshold = threshold; } @@ -221,7 +281,8 @@ public void allocate(RoutingAllocation allocation) { shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -242,7 +303,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -348,7 +410,7 @@ static class WeightFunction { private AllocationConstraints constraints; private RebalanceConstraints rebalanceConstraints; - WeightFunction(float indexBalance, float shardBalance) { + WeightFunction(float indexBalance, float shardBalance, float preferPrimaryBalanceBuffer) { float sum = indexBalance + shardBalance; if (sum <= 0.0f) { throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); @@ -357,8 +419,9 @@ static class WeightFunction { theta1 = indexBalance / sum; this.indexBalance = indexBalance; this.shardBalance = shardBalance; + RebalanceParameter rebalanceParameter = new RebalanceParameter(preferPrimaryBalanceBuffer); this.constraints = new AllocationConstraints(); - this.rebalanceConstraints = new RebalanceConstraints(); + this.rebalanceConstraints = new RebalanceConstraints(rebalanceParameter); // Enable index shard per node breach constraint updateAllocationConstraint(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, true); } @@ -495,7 +558,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 45f64a5b29b04..ec25d041bda43 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -61,6 +61,7 @@ public class LocalShardsBalancer extends ShardsBalancer { private final ShardMovementStrategy shardMovementStrategy; private final boolean preferPrimaryBalance; + private final boolean preferPrimaryRebalance; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -76,7 +77,8 @@ public LocalShardsBalancer( ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, - boolean preferPrimaryBalance + boolean preferPrimaryBalance, + boolean preferPrimaryRebalance ) { this.logger = logger; this.allocation = allocation; @@ -91,6 +93,7 @@ public LocalShardsBalancer( sorter = newNodeSorter(); inEligibleTargetNode = new HashSet<>(); this.preferPrimaryBalance = preferPrimaryBalance; + this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; } @@ -995,13 +998,18 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala continue; } // This is a safety net which prevents un-necessary primary shard relocations from maxNode to minNode when - // doing such relocation wouldn't help in primary balance. + // doing such relocation wouldn't help in primary balance. The condition won't be applicable when we enable node level + // primary rebalance if (preferPrimaryBalance == true + && preferPrimaryRebalance == false && shard.primary() && maxNode.numPrimaryShards(shard.getIndexName()) - minNode.numPrimaryShards(shard.getIndexName()) < 2) { continue; } - + // Relax the above condition to per node to allow rebalancing to attain global balance + if (preferPrimaryRebalance == true && shard.primary() && maxNode.numPrimaryShards() - minNode.numPrimaryShards() < 2) { + continue; + } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); maxNode.removeShard(shard); long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 8760ee3c94309..1b529a3b2bef1 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -252,7 +252,9 @@ public void apply(Settings value, Settings current, Settings previous) { AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER, BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE, + BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE, BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index 62dce9c4edeb5..11cbe89645657 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -72,6 +72,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -140,10 +141,14 @@ public void testIndexBalance() { } private Settings.Builder getSettingsBuilderForPrimaryBalance() { - return getSettingsBuilderForPrimaryBalance(true); + return getSettingsBuilderForPrimaryBalance(true, false); } - private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrimaryBalance) { + private Settings.Builder getSettingsBuilderForPrimaryReBalance() { + return getSettingsBuilderForPrimaryBalance(true, true); + } + + private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrimaryBalance, boolean preferPrimaryRebalance) { final float indexBalance = 0.55f; final float shardBalance = 0.45f; final float balanceThreshold = 1.0f; @@ -155,6 +160,7 @@ private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrima ); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance); + settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); return settings; @@ -201,7 +207,7 @@ public void testPrimaryBalanceWithoutPreferPrimaryBalanceSetting() { int balanceFailed = 0; AllocationService strategy = createAllocationService( - getSettingsBuilderForPrimaryBalance(false).build(), + getSettingsBuilderForPrimaryBalance(false, false).build(), new TestGatewayAllocator() ); for (int i = 0; i < numberOfRuns; i++) { @@ -244,6 +250,60 @@ public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { assertTrue(balanceFailed <= 1); } + /** + * This test verifies primary shard balance is attained setting. + */ + public void testPrimaryBalanceNotSolvedForNodeDropWithPreferPrimaryBalanceSetting() { + final int numberOfNodes = 4; + final int numberOfIndices = 4; + final int numberOfShards = 4; + final int numberOfReplicas = 1; + final int numberOfRuns = 5; + final float buffer = 0.10f; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPrimaryBalance(clusterState, buffer); + } catch (AssertionError | Exception e) { + balanceFailed++; + logger.info("Unexpected assertion failure"); + } + } + assertTrue(balanceFailed >= 4); + } + + /** + * This test verifies primary shard balance is attained with PREFER_PRIMARY_SHARD_BALANCE setting. + */ + public void testPrimaryBalanceSolvedWithPreferPrimaryRebalanceSetting() { + final int numberOfNodes = 4; + final int numberOfIndices = 4; + final int numberOfShards = 4; + final int numberOfReplicas = 1; + final int numberOfRuns = 5; + final float buffer = 0.10f; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryReBalance().build(), new TestGatewayAllocator()); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPrimaryBalance(clusterState, buffer); + } catch (Exception e) { + balanceFailed++; + logger.info("Unexpected assertion failure"); + } + } + assertTrue(balanceFailed <= 1); + } + /** * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min * constraints chosen for allocation. @@ -367,8 +427,7 @@ public void testPrimaryBalanceWithContrainstBreaching() { */ public void testGlobalPrimaryBalance() throws Exception { AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); clusterState = addNode(clusterState, strategy); clusterState = addNode(clusterState, strategy); @@ -377,7 +436,30 @@ public void testGlobalPrimaryBalance() throws Exception { clusterState = addIndex(clusterState, strategy, "test-index3", 1, 1); logger.info(ShardAllocations.printShardDistribution(clusterState)); - verifyPrimaryBalance(clusterState); + verifyPrimaryBalance(clusterState, 0.0f); + } + + /** + * This test verifies global balance by creating indices iteratively and verify primary shards do not pile up on one + * @throws Exception generic exception + */ + public void testGlobalPrimaryBalanceWithNodeDrops() throws Exception { + final float buffer = 0.10f; + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryReBalance().build(), new TestGatewayAllocator()); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); + clusterState = addNodes(clusterState, strategy, 5); + + clusterState = addIndices(clusterState, strategy, 5, 1, 8); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + verifyPrimaryBalance(clusterState, buffer); + + clusterState = removeOneNode(clusterState, strategy); + + clusterState = applyAllocationUntilNoChange(clusterState, strategy); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + verifyPrimaryBalance(clusterState, buffer); } /** @@ -537,7 +619,7 @@ private void verifyPerIndexPrimaryBalance(ClusterState currentState) { } } - private void verifyPrimaryBalance(ClusterState clusterState) throws Exception { + private void verifySkewedPrimaryBalance(ClusterState clusterState, int delta) throws Exception { assertBusy(() -> { RoutingNodes nodes = clusterState.getRoutingNodes(); int totalPrimaryShards = 0; @@ -545,13 +627,36 @@ private void verifyPrimaryBalance(ClusterState clusterState) throws Exception { totalPrimaryShards += index.primaryShardsActive(); } final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / clusterState.getRoutingNodes().size()); + int maxPrimaryShardOnNode = Integer.MIN_VALUE; + int minPrimaryShardOnNode = Integer.MAX_VALUE; for (RoutingNode node : nodes) { final int primaryCount = node.shardsWithState(STARTED) .stream() .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + maxPrimaryShardOnNode = Math.max(maxPrimaryShardOnNode, primaryCount); + minPrimaryShardOnNode = Math.min(minPrimaryShardOnNode, primaryCount); + } + assertTrue(maxPrimaryShardOnNode - minPrimaryShardOnNode < delta); + }, 60, TimeUnit.SECONDS); + } + + private void verifyPrimaryBalance(ClusterState clusterState, float buffer) throws Exception { + assertBusy(() -> { + RoutingNodes nodes = clusterState.getRoutingNodes(); + int totalPrimaryShards = 0; + for (final IndexRoutingTable index : clusterState.getRoutingTable().indicesRouting().values()) { + totalPrimaryShards += index.primaryShardsActive(); + } + final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / clusterState.getRoutingNodes().size()); + for (RoutingNode node : nodes) { + final int primaryCount = node.shardsWithState(STARTED) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .size(); + assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer))); } }, 60, TimeUnit.SECONDS); } @@ -567,8 +672,8 @@ public void testShardBalance() { ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString() ); - settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); AllocationService strategy = createAllocationService(settings.build(), new TestGatewayAllocator()); @@ -634,6 +739,34 @@ private ClusterState addIndex( return applyAllocationUntilNoChange(clusterState, strategy); } + private ClusterState addIndices( + ClusterState clusterState, + AllocationService strategy, + int numberOfShards, + int numberOfReplicas, + int numberOfIndices + ) { + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.getMetadata()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + + for (int i = 0; i < numberOfIndices; i++) { + IndexMetadata.Builder index = IndexMetadata.builder("test" + i) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas); + + metadataBuilder = metadataBuilder.put(index); + routingTableBuilder.addAsNew(index.build()); + } + + clusterState = ClusterState.builder(clusterState) + .metadata(metadataBuilder.build()) + .routingTable(routingTableBuilder.build()) + .build(); + clusterState = strategy.reroute(clusterState, "indices-created"); + return applyAllocationUntilNoChange(clusterState, strategy); + } + private ClusterState initCluster( AllocationService strategy, int numberOfIndices, @@ -664,7 +797,7 @@ private ClusterState initCluster( for (int i = 0; i < numberOfNodes; i++) { nodes.add(newNode("node" + i)); } - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(nodes) .metadata(metadata) .routingTable(initialRoutingTable) @@ -673,6 +806,17 @@ private ClusterState initCluster( return applyAllocationUntilNoChange(clusterState, strategy); } + private ClusterState addNodes(ClusterState clusterState, AllocationService strategy, int numberOfNodes) { + logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numberOfNodes); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + for (int i = 0; i < numberOfNodes; i++) { + nodes.add(newNode("node" + (clusterState.nodes().getSize() + i))); + } + clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + return applyStartedShardsUntilNoChange(clusterState, strategy); + } + private ClusterState addNode(ClusterState clusterState, AllocationService strategy) { logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always"); clusterState = ClusterState.builder(clusterState) @@ -918,7 +1062,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing nodes.add(node); } - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(nodes) .metadata(metadata) .routingTable(routingTable) From 170ea7af0ac02057dae18b31e81ae149b001532c Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 2 Apr 2024 16:17:07 +0530 Subject: [PATCH 63/74] Introduce remote store hash algo in customData in IndexMetadata (#12986) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 22 +-- .../metadata/MetadataCreateIndexService.java | 34 ++-- .../org/opensearch/common/hash/FNV1a.java | 48 +++++ .../org/opensearch/index/IndexService.java | 2 +- .../org/opensearch/index/IndexSettings.java | 17 +- .../index/remote/RemoteStoreDataEnums.java | 69 ------- .../index/remote/RemoteStoreEnums.java | 187 ++++++++++++++++++ .../index/remote/RemoteStorePathStrategy.java | 152 ++++++++++++++ ...a => RemoteStorePathStrategyResolver.java} | 16 +- .../index/remote/RemoteStorePathType.java | 75 ------- .../opensearch/index/shard/IndexShard.java | 4 +- .../opensearch/index/shard/StoreRecovery.java | 6 +- .../store/RemoteSegmentStoreDirectory.java | 6 +- .../RemoteSegmentStoreDirectoryFactory.java | 34 +++- .../RemoteStoreLockManagerFactory.java | 27 ++- .../index/translog/RemoteFsTranslog.java | 40 ++-- .../opensearch/indices/IndicesService.java | 8 +- .../blobstore/BlobStoreRepository.java | 16 +- .../opensearch/snapshots/RestoreService.java | 2 +- .../MetadataCreateIndexServiceTests.java | 27 ++- .../opensearch/common/hashing/FNV1aTests.java | 48 +++++ ...eTests.java => RemoteStoreEnumsTests.java} | 89 ++++++--- .../RemoteSegmentStoreDirectoryTests.java | 13 +- .../RemoteStoreLockManagerFactoryTests.java | 7 +- .../index/translog/RemoteFsTranslogTests.java | 2 +- .../TranslogTransferManagerTests.java | 4 +- 26 files changed, 685 insertions(+), 270 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/hash/FNV1a.java delete mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java rename server/src/main/java/org/opensearch/index/remote/{RemoteStorePathTypeResolver.java => RemoteStorePathStrategyResolver.java} (54%) delete mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java create mode 100644 server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java rename server/src/test/java/org/opensearch/index/remote/{RemoteStorePathTypeTests.java => RemoteStoreEnumsTests.java} (51%) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index fff99e65054dc..181f242aecd09 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -27,7 +27,8 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; @@ -283,7 +284,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { indexDocuments(client, indexName1, randomIntBetween(5, 10)); ensureGreen(indexName1); - validateRemoteStorePathType(indexName1, RemoteStorePathType.FIXED); + validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); logger.info("--> snapshot"); SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); @@ -300,14 +301,12 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version1); - validateRemoteStorePathType(restoredIndexName1version1, RemoteStorePathType.FIXED); + validatePathType(restoredIndexName1version1, PathType.FIXED, PathHashAlgorithm.FNV_1A); client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), RemoteStorePathType.HASHED_PREFIX) - ) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -319,24 +318,25 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validateRemoteStorePathType(restoredIndexName1version2, RemoteStorePathType.HASHED_PREFIX); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validateRemoteStorePathType(indexName2, RemoteStorePathType.HASHED_PREFIX); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated - validateRemoteStorePathType(indexName1, RemoteStorePathType.FIXED); + validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); } - private void validateRemoteStorePathType(String index, RemoteStorePathType pathType) { + private void validatePathType(String index, PathType pathType, PathHashAlgorithm pathHashAlgorithm) { ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); // Validate that the remote_store custom data is present in index metadata for the created index. Map remoteCustomData = state.metadata().index(index).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); assertNotNull(remoteCustomData); - assertEquals(pathType.toString(), remoteCustomData.get(RemoteStorePathType.NAME)); + assertEquals(pathType.name(), remoteCustomData.get(PathType.NAME)); + assertEquals(pathHashAlgorithm.name(), remoteCustomData.get(PathHashAlgorithm.NAME)); } public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index aee0473be95eb..451871b10d5eb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,8 +88,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathType; -import org.opensearch.index.remote.RemoteStorePathTypeResolver; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.remote.RemoteStorePathStrategyResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -171,7 +173,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathTypeResolver remoteStorePathTypeResolver; + private final RemoteStorePathStrategyResolver remoteStorePathStrategyResolver; public MetadataCreateIndexService( final Settings settings, @@ -204,8 +206,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); - remoteStorePathTypeResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathTypeResolver(clusterService.getClusterSettings()) + remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings()) : null; } @@ -554,7 +556,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - addRemoteStorePathTypeInCustomData(tmpImdBuilder, true); + addRemoteStorePathStrategyInCustomData(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -569,8 +571,8 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( * @param tmpImdBuilder index metadata builder. * @param assertNullOldType flag to verify that the old remote store path type is null */ - public void addRemoteStorePathTypeInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { - if (remoteStorePathTypeResolver != null) { + public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStorePathStrategyResolver != null) { // It is possible that remote custom data exists already. In such cases, we need to only update the path type // in the remote store custom data map. Map existingRemoteCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); @@ -578,10 +580,18 @@ public void addRemoteStorePathTypeInCustomData(IndexMetadata.Builder tmpImdBuild ? new HashMap<>() : new HashMap<>(existingRemoteCustomData); // Determine the path type for use using the remoteStorePathResolver. - String newPathType = remoteStorePathTypeResolver.getType().toString(); - String oldPathType = remoteCustomData.put(RemoteStorePathType.NAME, newPathType); - assert !assertNullOldType || Objects.isNull(oldPathType); - logger.trace(() -> new ParameterizedMessage("Added new path type {}, replaced old path type {}", newPathType, oldPathType)); + RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); + String oldPathType = remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); + String oldHashAlgorithm = remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); + assert !assertNullOldType || (Objects.isNull(oldPathType) && Objects.isNull(oldHashAlgorithm)); + logger.trace( + () -> new ParameterizedMessage( + "Added newPathStrategy={}, replaced oldPathType={} oldHashAlgorithm={}", + newPathStrategy, + oldPathType, + oldHashAlgorithm + ) + ); tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } } diff --git a/server/src/main/java/org/opensearch/common/hash/FNV1a.java b/server/src/main/java/org/opensearch/common/hash/FNV1a.java new file mode 100644 index 0000000000000..cab28d0f2d68f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/hash/FNV1a.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.nio.charset.StandardCharsets; + +/** + * Provides hashing function using FNV1a hash function. @see FNV author's website. + * 32 bit Java port of http://www.isthe.com/chongo/src/fnv/hash_32a.c + * 64 bit Java port of http://www.isthe.com/chongo/src/fnv/hash_64a.c + * + * @opensearch.internal + */ +public class FNV1a { + private static final long FNV_OFFSET_BASIS_32 = 0x811c9dc5L; + private static final long FNV_PRIME_32 = 0x01000193L; + + private static final long FNV_OFFSET_BASIS_64 = 0xcbf29ce484222325L; + private static final long FNV_PRIME_64 = 0x100000001b3L; + + // FNV-1a hash computation for 32-bit hash + public static long hash32(String input) { + long hash = FNV_OFFSET_BASIS_32; + byte[] bytes = input.getBytes(StandardCharsets.UTF_8); + for (byte b : bytes) { + hash ^= (b & 0xFF); + hash *= FNV_PRIME_32; + } + return hash; + } + + // FNV-1a hash computation for 64-bit hash + public static long hash64(String input) { + long hash = FNV_OFFSET_BASIS_64; + byte[] bytes = input.getBytes(StandardCharsets.UTF_8); + for (byte b : bytes) { + hash ^= (b & 0xFF); + hash *= FNV_PRIME_64; + } + return hash; + } +} diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 03cf8f9182211..9ded1b174d4c6 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -509,7 +509,7 @@ public synchronized IndexShard createShard( RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), this.indexSettings.getUUID(), shardId, - this.indexSettings.getRemoteStorePathType() + this.indexSettings.getRemoteStorePathStrategy() ); } remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 7e3d812974c79..f17a254c4d3cd 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -48,7 +48,9 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; @@ -1908,10 +1910,15 @@ public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePo this.docIdFuzzySetFalsePositiveProbability = docIdFuzzySetFalsePositiveProbability; } - public RemoteStorePathType getRemoteStorePathType() { + public RemoteStorePathStrategy getRemoteStorePathStrategy() { Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); - return remoteCustomData != null && remoteCustomData.containsKey(RemoteStorePathType.NAME) - ? RemoteStorePathType.parseString(remoteCustomData.get(RemoteStorePathType.NAME)) - : RemoteStorePathType.FIXED; + if (remoteCustomData != null + && remoteCustomData.containsKey(PathType.NAME) + && remoteCustomData.containsKey(PathHashAlgorithm.NAME)) { + PathType pathType = PathType.parseString(remoteCustomData.get(PathType.NAME)); + PathHashAlgorithm pathHashAlgorithm = PathHashAlgorithm.parseString(remoteCustomData.get(PathHashAlgorithm.NAME)); + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + } + return new RemoteStorePathStrategy(PathType.FIXED); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java deleted file mode 100644 index 475e29004ba2e..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.common.annotation.PublicApi; - -import java.util.Set; - -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; - -/** - * This class contains the different enums related to remote store data categories and types. - * - * @opensearch.api - */ -public class RemoteStoreDataEnums { - - /** - * Categories of the data in Remote store. - */ - @PublicApi(since = "2.14.0") - public enum DataCategory { - SEGMENTS("segments", Set.of(DataType.values())), - TRANSLOG("translog", Set.of(DATA, METADATA)); - - private final String name; - private final Set supportedDataTypes; - - DataCategory(String name, Set supportedDataTypes) { - this.name = name; - this.supportedDataTypes = supportedDataTypes; - } - - public boolean isSupportedDataType(DataType dataType) { - return supportedDataTypes.contains(dataType); - } - - public String getName() { - return name; - } - } - - /** - * Types of data in remote store. - */ - @PublicApi(since = "2.14.0") - public enum DataType { - DATA("data"), - METADATA("metadata"), - LOCK_FILES("lock_files"); - - private final String name; - - DataType(String name) { - this.name = name; - } - - public String getName() { - return name; - } - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java new file mode 100644 index 0000000000000..4e557d8c24431 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.hash.FNV1a; +import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; + +import java.util.Locale; +import java.util.Set; + +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; + +/** + * This class contains the different enums related to remote store like data categories and types, path types + * and hashing algorithm. + * + * @opensearch.api + */ +public class RemoteStoreEnums { + + /** + * Categories of the data in Remote store. + */ + @PublicApi(since = "2.14.0") + public enum DataCategory { + SEGMENTS("segments", Set.of(DataType.values())), + TRANSLOG("translog", Set.of(DATA, METADATA)); + + private final String name; + private final Set supportedDataTypes; + + DataCategory(String name, Set supportedDataTypes) { + this.name = name; + this.supportedDataTypes = supportedDataTypes; + } + + public boolean isSupportedDataType(DataType dataType) { + return supportedDataTypes.contains(dataType); + } + + public String getName() { + return name; + } + } + + /** + * Types of data in remote store. + */ + @PublicApi(since = "2.14.0") + public enum DataType { + DATA("data"), + METADATA("metadata"), + LOCK_FILES("lock_files"); + + private final String name; + + DataType(String name) { + this.name = name; + } + + public String getName() { + return name; + } + } + + /** + * Enumerates the types of remote store paths resolution techniques supported by OpenSearch. + * For more information, see Github issue #12567. + */ + @PublicApi(since = "2.14.0") + public enum PathType { + FIXED { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + // Hash algorithm is not used in FIXED path type + return pathInput.basePath() + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return false; + } + }, + HASHED_PREFIX { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. + // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + return pathInput.basePath() + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return true; + } + }; + + /** + * This method generates the path for the given path input which constitutes multiple fields and characteristics + * of the data. + * + * @param pathInput input. + * @param hashAlgorithm hashing algorithm. + * @return the blob path for the path input. + */ + public BlobPath path(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + DataCategory dataCategory = pathInput.dataCategory(); + DataType dataType = pathInput.dataType(); + assert dataCategory.isSupportedDataType(dataType) : "category:" + + dataCategory + + " type:" + + dataType + + " are not supported together"; + return generatePath(pathInput, hashAlgorithm); + } + + abstract BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm); + + abstract boolean requiresHashAlgorithm(); + + public static PathType parseString(String pathType) { + try { + return PathType.valueOf(pathType.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null + throw new IllegalArgumentException("Could not parse PathType for [" + pathType + "]"); + } + } + + /** + * This string is used as key for storing information in the custom data in index settings. + */ + public static final String NAME = "path_type"; + + } + + /** + * Type of hashes supported for path types that have hashing. + */ + @PublicApi(since = "2.14.0") + public enum PathHashAlgorithm { + + FNV_1A { + @Override + long hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + return FNV1a.hash32(input); + } + }; + + abstract long hash(PathInput pathInput); + + public static PathHashAlgorithm parseString(String pathHashAlgorithm) { + try { + return PathHashAlgorithm.valueOf(pathHashAlgorithm.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null + throw new IllegalArgumentException("Could not parse PathHashAlgorithm for [" + pathHashAlgorithm + "]"); + } + } + + /** + * This string is used as key for storing information in the custom data in index settings. + */ + public static final String NAME = "path_hash_algorithm"; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java new file mode 100644 index 0000000000000..ce5a6748fd9d4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; + +import java.util.Objects; + +/** + * This class wraps internal details on the remote store path for an index. + * + * @opensearch.internal + */ +@PublicApi(since = "2.14.0") +public class RemoteStorePathStrategy { + + private final PathType type; + + @Nullable + private final PathHashAlgorithm hashAlgorithm; + + public RemoteStorePathStrategy(PathType type) { + this(type, null); + } + + public RemoteStorePathStrategy(PathType type, PathHashAlgorithm hashAlgorithm) { + assert type.requiresHashAlgorithm() == false || Objects.nonNull(hashAlgorithm); + this.type = Objects.requireNonNull(type); + this.hashAlgorithm = hashAlgorithm; + } + + public PathType getType() { + return type; + } + + public PathHashAlgorithm getHashAlgorithm() { + return hashAlgorithm; + } + + @Override + public String toString() { + return "RemoteStorePathStrategy{" + "type=" + type + ", hashAlgorithm=" + hashAlgorithm + '}'; + } + + public BlobPath generatePath(PathInput pathInput) { + return type.generatePath(pathInput, hashAlgorithm); + } + + /** + * Wrapper class for the input required to generate path for remote store uploads. + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + public static class PathInput { + private final BlobPath basePath; + private final String indexUUID; + private final String shardId; + private final DataCategory dataCategory; + private final DataType dataType; + + public PathInput(BlobPath basePath, String indexUUID, String shardId, DataCategory dataCategory, DataType dataType) { + this.basePath = Objects.requireNonNull(basePath); + this.indexUUID = Objects.requireNonNull(indexUUID); + this.shardId = Objects.requireNonNull(shardId); + this.dataCategory = Objects.requireNonNull(dataCategory); + this.dataType = Objects.requireNonNull(dataType); + } + + BlobPath basePath() { + return basePath; + } + + String indexUUID() { + return indexUUID; + } + + String shardId() { + return shardId; + } + + DataCategory dataCategory() { + return dataCategory; + } + + DataType dataType() { + return dataType; + } + + /** + * Returns a new builder for {@link PathInput}. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link PathInput}. + * + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + public static class Builder { + private BlobPath basePath; + private String indexUUID; + private String shardId; + private DataCategory dataCategory; + private DataType dataType; + + public Builder basePath(BlobPath basePath) { + this.basePath = basePath; + return this; + } + + public Builder indexUUID(String indexUUID) { + this.indexUUID = indexUUID; + return this; + } + + public Builder shardId(String shardId) { + this.shardId = shardId; + return this; + } + + public Builder dataCategory(DataCategory dataCategory) { + this.dataCategory = dataCategory; + return this; + } + + public Builder dataType(DataType dataType) { + this.dataType = dataType; + return this; + } + + public PathInput build() { + return new PathInput(basePath, indexUUID, shardId, dataCategory, dataType); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java similarity index 54% rename from server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java rename to server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 5d014c9862d45..20fc516132220 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -9,27 +9,29 @@ package org.opensearch.index.remote; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.indices.IndicesService; /** - * Determines the {@link RemoteStorePathType} at the time of index metadata creation. + * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. * * @opensearch.internal */ -public class RemoteStorePathTypeResolver { +public class RemoteStorePathStrategyResolver { - private volatile RemoteStorePathType type; + private volatile PathType type; - public RemoteStorePathTypeResolver(ClusterSettings clusterSettings) { + public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings) { type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); } - public RemoteStorePathType getType() { - return type; + public RemoteStorePathStrategy get() { + return new RemoteStorePathStrategy(type, PathHashAlgorithm.FNV_1A); } - public void setType(RemoteStorePathType type) { + private void setType(PathType type) { this.type = type; } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java deleted file mode 100644 index 742d7b501f227..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.common.annotation.PublicApi; -import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; - -import java.util.Locale; - -/** - * Enumerates the types of remote store paths resolution techniques supported by OpenSearch. - * For more information, see Github issue #12567. - * - * @opensearch.internal - */ -@PublicApi(since = "2.14.0") -public enum RemoteStorePathType { - - FIXED { - @Override - public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { - return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); - } - }, - HASHED_PREFIX { - @Override - public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { - // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. - // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. - return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); - } - }; - - /** - * @param basePath base path of the underlying blob store repository - * @param indexUUID of the index - * @param shardId shard id - * @param dataCategory is either translog or segment - * @param dataType can be one of data, metadata or lock_files. - * @return the blob path for the underlying remote store path type. - */ - public BlobPath path(BlobPath basePath, String indexUUID, String shardId, DataCategory dataCategory, DataType dataType) { - assert dataCategory.isSupportedDataType(dataType) : "category:" - + dataCategory - + " type:" - + dataType - + " are not supported together"; - return generatePath(basePath, indexUUID, shardId, dataCategory.getName(), dataType.getName()); - } - - abstract BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType); - - public static RemoteStorePathType parseString(String remoteStoreBlobPathType) { - try { - return RemoteStorePathType.valueOf(remoteStoreBlobPathType.toUpperCase(Locale.ROOT)); - } catch (IllegalArgumentException | NullPointerException e) { - // IllegalArgumentException is thrown when the input does not match any enum name - // NullPointerException is thrown when the input is null - throw new IllegalArgumentException("Could not parse RemoteStorePathType for [" + remoteStoreBlobPathType + "]"); - } - } - - /** - * This string is used as key for storing information in the custom data in index settings. - */ - public static final String NAME = "path_type"; -} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f3a62cdf1436f..d1ebbd168df52 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4944,7 +4944,7 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathType()); + RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathStrategy()); } /* @@ -4966,7 +4966,7 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { shardId, getThreadPool(), shardPath().resolveTranslog(), - indexSettings.getRemoteStorePathType(), + indexSettings.getRemoteStorePathStrategy(), logger ); } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 9779a2320d79f..c74ab5e24a980 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -58,7 +58,8 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineException; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; @@ -411,7 +412,8 @@ void recoverFromSnapshotAndRemoteStore( remoteStoreRepository, indexUUID, shardId, - RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); sourceRemoteDirectory.initializeToSpecificCommit( primaryTerm, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 999625e0e579f..ec1163fe91b6c 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -31,7 +31,7 @@ import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; @@ -899,14 +899,14 @@ public static void remoteDirectoryCleanup( String remoteStoreRepoForIndex, String indexUUID, ShardId shardId, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { try { RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( remoteStoreRepoForIndex, indexUUID, shardId, - pathType + pathStrategy ); remoteSegmentStoreDirectory.deleteStaleSegments(0); remoteSegmentStoreDirectory.deleteIfEmpty(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index f0ecd96bcf1f7..e462f6d4ac011 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -13,7 +13,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; @@ -28,9 +28,9 @@ import java.util.Objects; import java.util.function.Supplier; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; /** * Factory for a remote store directory @@ -52,12 +52,12 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - return newDirectory(repositoryName, indexUUID, path.getShardId(), indexSettings.getRemoteStorePathType()); + return newDirectory(repositoryName, indexUUID, path.getShardId(), indexSettings.getRemoteStorePathStrategy()); } - public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathType pathType) + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy) throws IOException { - assert Objects.nonNull(pathType); + assert Objects.nonNull(pathStrategy); try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; @@ -65,16 +65,30 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s BlobPath repositoryBasePath = blobStoreRepository.basePath(); String shardIdStr = String.valueOf(shardId.id()); + RemoteStorePathStrategy.PathInput dataPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(SEGMENTS) + .dataType(DATA) + .build(); // Derive the path for data directory of SEGMENTS - BlobPath dataPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, DATA); + BlobPath dataPath = pathStrategy.generatePath(dataPathInput); RemoteDirectory dataDirectory = new RemoteDirectory( blobStoreRepository.blobStore().blobContainer(dataPath), blobStoreRepository::maybeRateLimitRemoteUploadTransfers, blobStoreRepository::maybeRateLimitRemoteDownloadTransfers ); + RemoteStorePathStrategy.PathInput mdPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(SEGMENTS) + .dataType(METADATA) + .build(); // Derive the path for metadata directory of SEGMENTS - BlobPath mdPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, METADATA); + BlobPath mdPath = pathStrategy.generatePath(mdPathInput); RemoteDirectory metadataDirectory = new RemoteDirectory(blobStoreRepository.blobStore().blobContainer(mdPath)); // The path for lock is derived within the RemoteStoreLockManagerFactory @@ -83,7 +97,7 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s repositoryName, indexUUID, shardIdStr, - pathType + pathStrategy ); return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index c033e4f7ad0aa..45d466d3a8ce8 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -11,7 +11,7 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -20,8 +20,8 @@ import java.util.function.Supplier; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; /** * Factory for remote store lock manager @@ -36,8 +36,13 @@ public RemoteStoreLockManagerFactory(Supplier repositoriesS this.repositoriesService = repositoriesService; } - public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId, RemoteStorePathType pathType) { - return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId, pathType); + public RemoteStoreLockManager newLockManager( + String repositoryName, + String indexUUID, + String shardId, + RemoteStorePathStrategy pathStrategy + ) { + return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId, pathStrategy); } public static RemoteStoreMetadataLockManager newLockManager( @@ -45,12 +50,20 @@ public static RemoteStoreMetadataLockManager newLockManager( String repositoryName, String indexUUID, String shardId, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { try (Repository repository = repositoriesService.repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobPath repositoryBasePath = ((BlobStoreRepository) repository).basePath(); - BlobPath lockDirectoryPath = pathType.path(repositoryBasePath, indexUUID, shardId, SEGMENTS, LOCK_FILES); + + RemoteStorePathStrategy.PathInput lockFilesPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(SEGMENTS) + .dataType(LOCK_FILES) + .build(); + BlobPath lockDirectoryPath = pathStrategy.generatePath(lockFilesPathInput); BlobContainer lockDirectoryBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(lockDirectoryPath); return new RemoteStoreMetadataLockManager(new RemoteBufferedOutputDirectory(lockDirectoryBlobContainer)); } catch (RepositoryMissingException e) { diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index f0fb03cc905a4..bb7769eae1bf5 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -19,7 +19,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; @@ -50,9 +50,9 @@ import java.util.function.LongConsumer; import java.util.function.LongSupplier; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; /** * A Translog implementation which syncs local FS with a remote store @@ -113,7 +113,7 @@ public RemoteFsTranslog( shardId, fileTransferTracker, remoteTranslogTransferTracker, - indexSettings().getRemoteStorePathType() + indexSettings().getRemoteStorePathStrategy() ); try { download(translogTransferManager, location, logger); @@ -162,7 +162,7 @@ public static void download( ShardId shardId, ThreadPool threadPool, Path location, - RemoteStorePathType pathType, + RemoteStorePathStrategy pathStrategy, Logger logger ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( @@ -181,7 +181,7 @@ public static void download( shardId, fileTransferTracker, remoteTranslogTransferTracker, - pathType + pathStrategy ); RemoteFsTranslog.download(translogTransferManager, location, logger); logger.trace(remoteTranslogTransferTracker.toString()); @@ -259,13 +259,27 @@ public static TranslogTransferManager buildTranslogTransferManager( ShardId shardId, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker tracker, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { - assert Objects.nonNull(pathType); + assert Objects.nonNull(pathStrategy); String indexUUID = shardId.getIndex().getUUID(); String shardIdStr = String.valueOf(shardId.id()); - BlobPath dataPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, DATA); - BlobPath mdPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, METADATA); + RemoteStorePathStrategy.PathInput dataPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(blobStoreRepository.basePath()) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(TRANSLOG) + .dataType(DATA) + .build(); + BlobPath dataPath = pathStrategy.generatePath(dataPathInput); + RemoteStorePathStrategy.PathInput mdPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(blobStoreRepository.basePath()) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(TRANSLOG) + .dataType(METADATA) + .build(); + BlobPath mdPath = pathStrategy.generatePath(mdPathInput); BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker); } @@ -539,7 +553,7 @@ private void deleteStaleRemotePrimaryTerms() { } } - public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathType pathType) + public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathStrategy pathStrategy) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; @@ -553,7 +567,7 @@ public static void cleanup(Repository repository, ShardId shardId, ThreadPool th shardId, fileTransferTracker, remoteTranslogTransferTracker, - pathType + pathStrategy ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 28ad4b23e319c..717f4dad4f073 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -123,7 +123,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -309,10 +309,10 @@ public class IndicesService extends AbstractLifecycleComponent * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( + public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( "cluster.remote_store.index.path.prefix.type", - RemoteStorePathType.FIXED.toString(), - RemoteStorePathType::parseString, + PathType.FIXED.toString(), + PathType::parseString, Property.NodeScope, Property.Dynamic ); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index a7c2fb03285b0..5aab02993db34 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -108,7 +108,8 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -671,7 +672,8 @@ public void cloneRemoteStoreIndexShardSnapshot( remoteStoreRepository, indexUUID, String.valueOf(shardId.shardId()), - RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManger.cloneLock( FileLockInfo.getLockInfoBuilder().withAcquirerId(source.getUUID()).build(), @@ -1110,7 +1112,7 @@ public static void remoteDirectoryCleanupAsync( String indexUUID, ShardId shardId, String threadPoolName, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { threadpool.executor(threadPoolName) .execute( @@ -1120,7 +1122,7 @@ public static void remoteDirectoryCleanupAsync( remoteStoreRepoForIndex, indexUUID, shardId, - pathType + pathStrategy ), indexUUID, shardId @@ -1152,7 +1154,8 @@ protected void releaseRemoteStoreLockAndCleanup( remoteStoreRepoForIndex, indexUUID, shardId, - RemoteStorePathType.HASHED_PREFIX // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); @@ -1175,7 +1178,8 @@ protected void releaseRemoteStoreLockAndCleanup( indexUUID, new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), ThreadPool.Names.REMOTE_PURGE, - RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index da2a36cbb0701..ff393ecf19a99 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -451,7 +451,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - createIndexService.addRemoteStorePathTypeInCustomData(indexMdBuilder, false); + createIndexService.addRemoteStorePathStrategyInCustomData(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cf4de32890a2a..a2f19b8c694d0 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -71,7 +71,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; @@ -1587,32 +1588,38 @@ public void testBuildIndexMetadata() { */ public void testRemoteCustomData() { // Case 1 - Remote store is not enabled - IndexMetadata indexMetadata = testRemoteCustomData(false, randomFrom(RemoteStorePathType.values())); + IndexMetadata indexMetadata = testRemoteCustomData(false, randomFrom(PathType.values())); assertNull(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); // Case 2 - cluster.remote_store.index.path.prefix.optimised=fixed (default value) - indexMetadata = testRemoteCustomData(true, RemoteStorePathType.FIXED); + indexMetadata = testRemoteCustomData(true, PathType.FIXED); + validateRemoteCustomData(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathType.NAME, PathType.FIXED.name()); validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), - RemoteStorePathType.NAME, - RemoteStorePathType.FIXED.toString() + PathHashAlgorithm.NAME, + PathHashAlgorithm.FNV_1A.name() ); // Case 3 - cluster.remote_store.index.path.prefix.optimised=hashed_prefix - indexMetadata = testRemoteCustomData(true, RemoteStorePathType.HASHED_PREFIX); + indexMetadata = testRemoteCustomData(true, PathType.HASHED_PREFIX); validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), - RemoteStorePathType.NAME, - RemoteStorePathType.HASHED_PREFIX.toString() + PathType.NAME, + PathType.HASHED_PREFIX.toString() + ); + validateRemoteCustomData( + indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), + PathHashAlgorithm.NAME, + PathHashAlgorithm.FNV_1A.name() ); } - private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, RemoteStorePathType remoteStorePathType) { + private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType pathType) { Settings.Builder settingsBuilder = Settings.builder(); if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), remoteStorePathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java b/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java new file mode 100644 index 0000000000000..8d41211f10134 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hashing; + +import org.opensearch.common.hash.FNV1a; +import org.opensearch.test.OpenSearchTestCase; + +public class FNV1aTests extends OpenSearchTestCase { + + public void testHash32WithKnownValues() { + assertEquals(-1114940029532279145L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentsdata")); + assertEquals(-5313793557685118702L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentsmetadata")); + assertEquals(-4776941653547780179L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0translogdata")); + assertEquals(7773876801598345624L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0translogmetadata")); + assertEquals(3174284101845744576L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentslock_files")); + assertEquals(875447599647258598L, FNV1a.hash32("hell")); + assertEquals(1460560186469985451L, FNV1a.hash32("hello")); + assertEquals(-4959477702557352110L, FNV1a.hash32("hello w")); + assertEquals(-777130915070571257L, FNV1a.hash32("hello wo")); + assertEquals(-7887204531510399185L, FNV1a.hash32("hello wor")); + assertEquals(-782004333700192647L, FNV1a.hash32("hello worl")); + assertEquals(2168278929747165095L, FNV1a.hash32("hello world")); + assertEquals(2655121221658607504L, FNV1a.hash32("The quick brown fox jumps over the lazy dog")); + } + + public void testHash64WithKnownValues() { + assertEquals(-8975854101357662761L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentsdata")); + assertEquals(-4380291990281602606L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentsmetadata")); + assertEquals(-4532418109365814419L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0translogdata")); + assertEquals(41331743556869080L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0translogmetadata")); + assertEquals(6170437157231275808L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentslock_files")); + assertEquals(763638091547294502L, FNV1a.hash64("hell")); + assertEquals(-6615550055289275125L, FNV1a.hash64("hello")); + assertEquals(-8428874042178798254L, FNV1a.hash64("hello w")); + assertEquals(-6323438951910650201L, FNV1a.hash64("hello wo")); + assertEquals(7042426588567368687L, FNV1a.hash64("hello wor")); + assertEquals(7273314957493782425L, FNV1a.hash64("hello worl")); + assertEquals(8618312879776256743L, FNV1a.hash64("hello world")); + assertEquals(-866459186506731248L, FNV1a.hash64("The quick brown fox jumps over the lazy dog")); + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java similarity index 51% rename from server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java rename to server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index 0f108d1b45f5a..33008bee1a392 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -9,44 +9,46 @@ package org.opensearch.index.remote; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; import java.util.List; import java.util.Locale; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStorePathType.FIXED; -import static org.opensearch.index.remote.RemoteStorePathType.parseString; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.parseString; -public class RemoteStorePathTypeTests extends OpenSearchTestCase { +public class RemoteStoreEnumsTests extends OpenSearchTestCase { private static final String SEPARATOR = "/"; public void testParseString() { // Case 1 - Pass values from the enum. String typeString = FIXED.toString(); - RemoteStorePathType type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + PathType type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); assertEquals(FIXED, type); - typeString = RemoteStorePathType.HASHED_PREFIX.toString(); + typeString = PathType.HASHED_PREFIX.toString(); type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); - assertEquals(RemoteStorePathType.HASHED_PREFIX, type); + assertEquals(PathType.HASHED_PREFIX, type); // Case 2 - Pass random string String randomTypeString = randomAlphaOfLength(2); IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> parseString(randomTypeString)); - assertEquals("Could not parse RemoteStorePathType for [" + randomTypeString + "]", ex.getMessage()); + assertEquals("Could not parse PathType for [" + randomTypeString + "]", ex.getMessage()); // Case 3 - Null string ex = assertThrows(IllegalArgumentException.class, () -> parseString(null)); - assertEquals("Could not parse RemoteStorePathType for [null]", ex.getMessage()); + assertEquals("Could not parse PathType for [null]", ex.getMessage()); } public void testGeneratePathForFixedType() { @@ -63,32 +65,74 @@ public void testGeneratePathForFixedType() { String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId + SEPARATOR; // Translog Data - BlobPath result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Translog Metadata dataType = METADATA; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. - BlobPath finalBlobPath = blobPath; - assertThrows(AssertionError.class, () -> FIXED.path(finalBlobPath, indexUUID, shardId, TRANSLOG, LOCK_FILES)); + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> FIXED.path(finalPathInput, null)); // Segment Data dataCategory = SEGMENTS; dataType = DATA; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Segment Metadata dataType = METADATA; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Segment Metadata dataType = LOCK_FILES; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); } @@ -108,4 +152,5 @@ private String getPath(List pathList) { } return p + SEPARATOR; } + } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 59c8a9d92f07b..11b4eb078226f 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -37,7 +37,9 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -702,16 +704,19 @@ public void testCleanupAsync() throws Exception { String repositoryName = "test-repository"; String indexUUID = "test-idx-uuid"; ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); - RemoteStorePathType pathType = randomFrom(RemoteStorePathType.values()); + RemoteStorePathStrategy pathStrategy = new RemoteStorePathStrategy( + randomFrom(PathType.values()), + randomFrom(PathHashAlgorithm.values()) + ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( remoteSegmentStoreDirectoryFactory, repositoryName, indexUUID, shardId, - pathType + pathStrategy ); - verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathType); + verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathStrategy); verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); verify(remoteMetadataDirectory).delete(); verify(remoteDataDirectory).delete(); diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java index 0fe5557737447..de3dfbdaa4778 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java @@ -11,7 +11,8 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; @@ -49,7 +50,7 @@ public void testNewLockManager() throws IOException { String testRepository = "testRepository"; String testIndexUUID = "testIndexUUID"; String testShardId = "testShardId"; - RemoteStorePathType pathType = RemoteStorePathType.FIXED; + RemoteStorePathStrategy pathStrategy = new RemoteStorePathStrategy(PathType.FIXED); BlobStoreRepository repository = mock(BlobStoreRepository.class); BlobStore blobStore = mock(BlobStore.class); @@ -65,7 +66,7 @@ public void testNewLockManager() throws IOException { testRepository, testIndexUUID, testShardId, - pathType + pathStrategy ); assertTrue(lockManager != null); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 9f72d3c7bd825..70800d4fd423a 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -99,7 +99,7 @@ import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.contains; diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index a9502dc051428..49719017ce736 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -48,8 +48,8 @@ import org.mockito.Mockito; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; From eb66f62f2d6a4a4aea23fb3182991aaf9b46f560 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:27:48 +0530 Subject: [PATCH 64/74] Memoize isOnRemoteNode in index settings and refactor as well (#12994) * Memoize isOnRemoteNode in index settings and rename it as well --------- Signed-off-by: Gaurav Bafna --- .../org/opensearch/index/IndexSettings.java | 8 +++--- .../index/engine/NRTReplicationEngine.java | 2 +- .../RemoteStoreStatsTrackerFactory.java | 2 +- .../opensearch/index/shard/IndexShard.java | 26 +++++++++---------- .../org/opensearch/index/store/Store.java | 2 +- .../opensearch/index/translog/Translog.java | 2 +- .../opensearch/indices/IndicesService.java | 2 +- .../recovery/PeerRecoveryTargetService.java | 2 +- .../opensearch/index/IndexSettingsTests.java | 11 ++++++++ .../index/shard/IndexShardTestCase.java | 4 +-- 10 files changed, 37 insertions(+), 24 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index f17a254c4d3cd..f9062585c0093 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -763,6 +763,7 @@ public static IndexMergePolicy fromString(String text) { private volatile String defaultSearchPipeline; private final boolean widenIndexSortType; + private final boolean assignedOnRemoteNode; /** * The maximum age of a retention lease before it is considered expired. @@ -986,6 +987,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); + assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1231,7 +1233,7 @@ public int getNumberOfReplicas() { * proper index setting during the migration. */ public boolean isSegRepEnabledOrRemoteNode() { - return ReplicationType.SEGMENT.equals(replicationType) || isRemoteNode(); + return ReplicationType.SEGMENT.equals(replicationType) || isAssignedOnRemoteNode(); } public boolean isSegRepLocalEnabled() { @@ -1249,8 +1251,8 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } - public boolean isRemoteNode() { - return RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); + public boolean isAssignedOnRemoteNode() { + return assignedOnRemoteNode; } /** diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 1e1825e1f8ace..d759423ce5a55 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -437,7 +437,7 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { during promotion. */ if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false - && engineConfig.getIndexSettings().isRemoteNode() == false) { + && engineConfig.getIndexSettings().isAssignedOnRemoteNode() == false) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java index e4c7eb56d02c6..21753e68db498 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java @@ -68,7 +68,7 @@ public RemoteStoreStatsTrackerFactory(ClusterService clusterService, Settings se @Override public void afterIndexShardCreated(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteStoreEnabled() == false && indexShard.indexSettings().isRemoteNode() == false) { + if (indexShard.indexSettings().isRemoteStoreEnabled() == false && indexShard.indexSettings().isAssignedOnRemoteNode() == false) { return; } ShardId shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d1ebbd168df52..2b935b743512a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -406,7 +406,7 @@ public IndexShard( logger, threadPool, this::getEngine, - indexSettings.isRemoteNode(), + indexSettings.isAssignedOnRemoteNode(), () -> getRemoteTranslogUploadBufferInterval(remoteStoreSettings::getClusterRemoteTranslogBufferInterval) ); this.mapperService = mapperService; @@ -1469,7 +1469,7 @@ public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean inclu SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes, includeUnloadedSegments); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); // Populate remote_store stats only if the index is remote store backed - if (indexSettings().isRemoteNode()) { + if (indexSettings().isAssignedOnRemoteNode()) { segmentsStats.addRemoteSegmentStats( new RemoteSegmentStats(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId).stats()) ); @@ -1491,7 +1491,7 @@ public FieldDataStats fieldDataStats(String... fields) { public TranslogStats translogStats() { TranslogStats translogStats = getEngine().translogManager().getTranslogStats(); // Populate remote_store stats only if the index is remote store backed - if (indexSettings.isRemoteNode()) { + if (indexSettings.isAssignedOnRemoteNode()) { translogStats.addRemoteTranslogStats( new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats()) ); @@ -1530,7 +1530,7 @@ public void flush(FlushRequest request) { * {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details */ public void trimTranslog() { - if (indexSettings.isRemoteNode()) { + if (indexSettings.isAssignedOnRemoteNode()) { return; } verifyNotClosed(); @@ -2050,7 +2050,7 @@ public void close(String reason, boolean flushEngine, boolean deleted) throws IO ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003 */ public RemoteSegmentStoreDirectory getRemoteDirectory() { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); @@ -2063,7 +2063,7 @@ public RemoteSegmentStoreDirectory getRemoteDirectory() { * is in sync with local */ public boolean isRemoteSegmentStoreInSync() { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); try { RemoteSegmentStoreDirectory directory = getRemoteDirectory(); if (directory.readLatestMetadataFile() != null) { @@ -2102,7 +2102,7 @@ public void waitForRemoteStoreSync() { Calls onProgress on seeing an increased file count on remote */ public void waitForRemoteStoreSync(Runnable onProgress) { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); RemoteSegmentStoreDirectory directory = getRemoteDirectory(); int segmentUploadeCount = 0; if (shardRouting.primary() == false) { @@ -2277,7 +2277,7 @@ public long recoverLocallyAndFetchStartSeqNo(boolean localTranslog) { * @return the starting sequence number from which the recovery should start. */ private long recoverLocallyUptoLastCommit() { - assert indexSettings.isRemoteNode() : "Remote translog store is not enabled"; + assert indexSettings.isAssignedOnRemoteNode() : "Remote translog store is not enabled"; long seqNo; validateLocalRecoveryState(); @@ -3540,7 +3540,7 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(allocati } private void postActivatePrimaryMode() { - if (indexSettings.isRemoteNode()) { + if (indexSettings.isAssignedOnRemoteNode()) { // We make sure to upload translog (even if it does not contain any operations) to remote translog. // This helps to get a consistent state in remote store where both remote segment store and remote // translog contains data. @@ -4010,7 +4010,7 @@ public boolean enableUploadToRemoteTranslog() { } private boolean hasOneRemoteSegmentSyncHappened() { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); // We upload remote translog only after one remote segment upload in case of migration RemoteSegmentStoreDirectory rd = getRemoteDirectory(); AtomicBoolean segment_n_uploaded = new AtomicBoolean(false); @@ -4624,7 +4624,7 @@ public final boolean isSearchIdle() { public final boolean isSearchIdleSupported() { // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh // task continues to upload to remote store periodically. - if (isRemoteTranslogEnabled() || indexSettings.isRemoteNode()) { + if (isRemoteTranslogEnabled() || indexSettings.isAssignedOnRemoteNode()) { return false; } return indexSettings.isSegRepEnabledOrRemoteNode() == false || indexSettings.getNumberOfReplicas() == 0; @@ -5263,9 +5263,9 @@ enum ShardMigrationState { } static ShardMigrationState getShardMigrationState(IndexSettings indexSettings, boolean shouldSeed) { - if (indexSettings.isRemoteNode() && indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isAssignedOnRemoteNode() && indexSettings.isRemoteStoreEnabled()) { return REMOTE_NON_MIGRATING; - } else if (indexSettings.isRemoteNode()) { + } else if (indexSettings.isAssignedOnRemoteNode()) { return shouldSeed ? REMOTE_MIGRATING_UNSEEDED : REMOTE_MIGRATING_SEEDED; } return ShardMigrationState.DOCREP_NON_MIGRATING; diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 0992d86d6f0aa..56fc5b1ffa90d 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -893,7 +893,7 @@ public void beforeClose() { * @throws IOException when there is an IO error committing. */ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, long processedCheckpoint) throws IOException { - assert indexSettings.isSegRepEnabledOrRemoteNode() || indexSettings.isRemoteNode(); + assert indexSettings.isSegRepEnabledOrRemoteNode() || indexSettings.isAssignedOnRemoteNode(); metadataLock.writeLock().lock(); try { final Map userData = new HashMap<>(latestSegmentInfos.getUserData()); diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index e78300e368099..7c50ed6ecd58f 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -525,7 +525,7 @@ TranslogWriter createWriter( tragedy, persistedSequenceNumberConsumer, bigArrays, - indexSettings.isRemoteNode() + indexSettings.isAssignedOnRemoteNode() ); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 717f4dad4f073..ba78c28f3db88 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -933,7 +933,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isRemoteSnapshot()) { return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); } - if (idxSettings.isSegRepEnabledOrRemoteNode() || idxSettings.isRemoteNode()) { + if (idxSettings.isSegRepEnabledOrRemoteNode() || idxSettings.isAssignedOnRemoteNode()) { return new NRTReplicationEngineFactory(); } return new InternalEngineFactory(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 227496f72f83d..c24840d0c1333 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -261,7 +261,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi } } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false - && indexShard.indexSettings().isRemoteNode(); + && indexShard.indexSettings().isAssignedOnRemoteNode(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); final boolean verifyTranslog = (hasRemoteTranslog || hasNoTranslog || hasRemoteSegmentStore) == false; final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!hasRemoteTranslog); diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index e4ce879a5ec5e..474ec73d5fe61 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -1053,4 +1053,15 @@ public void testDefaultSearchPipeline() throws Exception { settings.updateIndexMetadata(metadata); assertEquals("foo", settings.getDefaultSearchPipeline()); } + + public void testIsOnRemoteNode() { + Version version = VersionUtils.getPreviousVersion(); + Settings theSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put(IndexMetadata.SETTING_INDEX_UUID, "0xdeadbeef") + .build(); + Settings nodeSettings = Settings.builder().put("node.attr.remote_store.translog.repository", "my-repo-1").build(); + IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), nodeSettings); + assertTrue("Index should be on remote node", settings.isAssignedOnRemoteNode()); + } } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 9baa6110ab54e..80d16a8243634 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -654,7 +654,7 @@ protected IndexShard newShard( RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; RepositoriesService mockRepoSvc = mock(RepositoriesService.class); - if (indexSettings.isRemoteStoreEnabled() || indexSettings.isRemoteNode()) { + if (indexSettings.isRemoteStoreEnabled() || indexSettings.isAssignedOnRemoteNode()) { String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); // remote path via setting a repository . This is a hack used for shards are created using reset . // since we can't get remote path from IndexShard directly, we are using repository to store it . @@ -1498,7 +1498,7 @@ private SegmentReplicationTargetService prepareForReplication( SegmentReplicationSourceFactory sourceFactory = null; SegmentReplicationTargetService targetService; - if (primaryShard.indexSettings.isRemoteStoreEnabled() || primaryShard.indexSettings.isRemoteNode()) { + if (primaryShard.indexSettings.isRemoteStoreEnabled() || primaryShard.indexSettings.isAssignedOnRemoteNode()) { RecoverySettings recoverySettings = new RecoverySettings( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) From 8def8cb7cdc56fe7b6930377001513391f067422 Mon Sep 17 00:00:00 2001 From: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:56:04 +0530 Subject: [PATCH 65/74] [Remote Store] Add Primary/Replica side changes to support Dual Replication during Remote Store Migration (#12821) Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> --- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../MigrationBaseTestCase.java | 95 +++- .../RemoteDualReplicationIT.java | 530 ++++++++++++++++++ .../action/bulk/TransportShardBulkAction.java | 2 +- .../ReplicationModeAwareProxy.java | 28 +- .../TransportReplicationAction.java | 12 +- .../org/opensearch/index/IndexService.java | 7 +- .../org/opensearch/index/IndexSettings.java | 6 +- .../seqno/GlobalCheckpointSyncAction.java | 2 +- .../index/seqno/ReplicationTracker.java | 69 ++- .../shard/CheckpointRefreshListener.java | 2 +- .../opensearch/index/shard/IndexShard.java | 37 +- .../opensearch/indices/IndicesService.java | 7 +- .../cluster/IndicesClusterStateService.java | 12 +- .../RecoverySourceHandlerFactory.java | 3 +- .../indices/recovery/RecoveryTarget.java | 11 +- .../SegmentReplicationSourceFactory.java | 2 +- .../checkpoint/PublishCheckpointAction.java | 8 +- ...portVerifyShardBeforeCloseActionTests.java | 7 +- .../flush/TransportShardFlushActionTests.java | 5 +- ...sportVerifyShardIndexBlockActionTests.java | 5 +- .../TransportShardRefreshActionTests.java | 5 +- .../bulk/TransportShardBulkActionTests.java | 5 +- ...TransportResyncReplicationActionTests.java | 5 +- .../ReplicationModeAwareProxyTests.java | 216 +++++++ .../ReplicationOperationTests.java | 196 ++++++- .../TransportReplicationActionTests.java | 7 + .../index/remote/RemoteStoreTestsHelper.java | 19 + .../RetentionLeasesReplicationTests.java | 4 +- .../GlobalCheckpointSyncActionTests.java | 8 +- ...PeerRecoveryRetentionLeaseExpiryTests.java | 3 +- ...ReplicationTrackerRetentionLeaseTests.java | 48 +- .../seqno/ReplicationTrackerTestCase.java | 23 +- .../index/seqno/ReplicationTrackerTests.java | 32 +- ...tentionLeaseBackgroundSyncActionTests.java | 5 +- .../seqno/RetentionLeaseSyncActionTests.java | 7 +- .../index/shard/IndexShardTests.java | 21 +- .../shard/PrimaryReplicaSyncerTests.java | 6 +- ...dicesLifecycleListenerSingleNodeTests.java | 4 +- ...actIndicesClusterStateServiceTestCase.java | 7 +- .../PeerRecoverySourceServiceTests.java | 5 +- .../PeerRecoveryTargetServiceTests.java | 5 +- .../PublishCheckpointActionTests.java | 41 +- .../index/engine/EngineTestCase.java | 3 +- ...enSearchIndexLevelReplicationTestCase.java | 31 +- .../index/shard/IndexShardTestCase.java | 69 ++- .../index/shard/IndexShardTestUtils.java | 67 +++ 47 files changed, 1540 insertions(+), 155 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java create mode 100644 server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java create mode 100644 test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index d218f0a985cf3..f97950f2652a3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -715,7 +715,8 @@ public static final IndexShard newIndexShard( nodeId, null, DefaultRemoteStoreSettings.INSTANCE, - false + false, + IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 19da668c432cf..0c35f91121059 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -8,17 +8,29 @@ package org.opensearch.remotemigration; +import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; +import java.util.List; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -28,8 +40,16 @@ public class MigrationBaseTestCase extends OpenSearchIntegTestCase { protected Path segmentRepoPath; protected Path translogRepoPath; - boolean addRemote = false; + Settings extraSettings = Settings.EMPTY; + + private final List documentKeys = List.of( + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5) + ); protected Settings nodeSettings(int nodeOrdinal) { if (segmentRepoPath == null || translogRepoPath == null) { @@ -40,6 +60,7 @@ protected Settings nodeSettings(int nodeOrdinal) { logger.info("Adding remote store node"); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) + .put(extraSettings) .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) .build(); } else { @@ -64,4 +85,76 @@ protected void setFailRate(String repoName, int value) throws ExecutionException client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() ); } + + public void initDocRepToRemoteMigration() { + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + ) + .get() + .isAcknowledged() + ); + } + + public BulkResponse indexBulk(String indexName, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + final IndexRequest request = client().prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)) + .request(); + bulkRequest.add(request); + } + return client().bulk(bulkRequest).actionGet(); + } + + private void indexSingleDoc(String indexName) { + IndexResponse indexResponse = client().prepareIndex(indexName).setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete(indexName, "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex(indexName).setSource("auto", true).get(); + } + + public class AsyncIndexingService { + private String indexName; + private AtomicLong indexedDocs = new AtomicLong(0); + private AtomicBoolean finished = new AtomicBoolean(); + private Thread indexingThread; + + AsyncIndexingService(String indexName) { + this.indexName = indexName; + } + + public void startIndexing() { + indexingThread = getIndexingThread(); + indexingThread.start(); + } + + public void stopIndexing() throws InterruptedException { + finished.set(true); + indexingThread.join(); + } + + public long getIndexedDocs() { + return indexedDocs.get(); + } + + private Thread getIndexingThread() { + return new Thread(() -> { + while (finished.get() == false) { + indexSingleDoc(indexName); + long currentDocCount = indexedDocs.incrementAndGet(); + logger.info("Completed ingestion of {} docs", currentDocCount); + + } + }); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java new file mode 100644 index 0000000000000..34b60d5f3e9b3 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java @@ -0,0 +1,530 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexService; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.indices.IndexingMemoryController; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteDualReplicationIT extends MigrationBaseTestCase { + private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; + private final String REMOTE_PRI_DOCREP_REMOTE_REP = "remote-primary-docrep-remote-replica"; + private final String FAILOVER_REMOTE_TO_DOCREP = "failover-remote-to-docrep"; + + @Override + protected Collection> nodePlugins() { + /* Adding the following mock plugins: + - InternalSettingsPlugin : To override default intervals of retention lease and global ckp sync + - MockFsRepositoryPlugin and MockTransportService.TestPlugin: To ensure remote interactions are not no-op and retention leases are properly propagated + */ + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(InternalSettingsPlugin.class, MockFsRepositoryPlugin.class, MockTransportService.TestPlugin.class) + ).collect(Collectors.toList()); + } + + /* + Scenario: + - Starts 2 docrep backed node + - Creates index with 1 replica + - Index some docs + - Start 1 remote backed node + - Move primary copy from docrep to remote through _cluster/reroute + - Index some more docs + - Assert primary-replica consistency + */ + public void testRemotePrimaryDocRepReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 2 docrep data nodes"); + internalCluster().startDataOnlyNodes(2); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 1 replica"); + Settings oneReplica = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + createIndex(REMOTE_PRI_DOCREP_REP, oneReplica); + ensureGreen(REMOTE_PRI_DOCREP_REP); + + int initialBatch = randomIntBetween(1, 1000); + logger.info("---> Indexing {} docs", initialBatch); + indexBulk(REMOTE_PRI_DOCREP_REP, initialBatch); + + initDocRepToRemoteMigration(); + + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + String primaryShardHostingNode = primaryNodeName(REMOTE_PRI_DOCREP_REP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(REMOTE_PRI_DOCREP_REP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REP); + ClusterState clusterState = internalCluster().client().admin().cluster().prepareState().get().getState(); + String primaryShardHostingNodeId = clusterState.getRoutingTable() + .index(REMOTE_PRI_DOCREP_REP) + .shard(0) + .primaryShard() + .currentNodeId(); + assertTrue(clusterState.getNodes().get(primaryShardHostingNodeId).isRemoteStoreNode()); + + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing another {} docs", secondBatch); + indexBulk(REMOTE_PRI_DOCREP_REP, secondBatch); + // Defensive check to ensure that doc count in replica shard catches up to the primary copy + refreshAndWaitForReplication(REMOTE_PRI_DOCREP_REP); + assertReplicaAndPrimaryConsistency(REMOTE_PRI_DOCREP_REP, initialBatch, secondBatch); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Index some docs + - Move primary copy from docrep to remote through _cluster/reroute + - Starts another remote backed data node + - Expands index to 2 replicas. One replica copy lies in remote backed node and other in docrep backed node + - Index some more docs + - Assert primary-replica consistency + */ + public void testRemotePrimaryDocRepAndRemoteReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 1 docrep data nodes"); + String docrepNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 0 replica"); + Settings zeroReplicas = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + createIndex(REMOTE_PRI_DOCREP_REMOTE_REP, zeroReplicas); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + initDocRepToRemoteMigration(); + + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + int firstBatch = randomIntBetween(1, 100); + logger.info("---> Indexing {} docs", firstBatch); + indexBulk(REMOTE_PRI_DOCREP_REMOTE_REP, firstBatch); + + String primaryShardHostingNode = primaryNodeName(REMOTE_PRI_DOCREP_REMOTE_REP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(REMOTE_PRI_DOCREP_REMOTE_REP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + ClusterState clusterState = internalCluster().client().admin().cluster().prepareState().get().getState(); + String primaryShardHostingNodeId = clusterState.getRoutingTable() + .index(REMOTE_PRI_DOCREP_REMOTE_REP) + .shard(0) + .primaryShard() + .currentNodeId(); + assertTrue(clusterState.getNodes().get(primaryShardHostingNodeId).isRemoteStoreNode()); + + logger.info("---> Starting another remote enabled node"); + internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + logger.info("---> Expanding index to 2 replica copies"); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build(); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings() + .setIndices(REMOTE_PRI_DOCREP_REMOTE_REP) + .setSettings(twoReplicas) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing another {} docs", secondBatch); + indexBulk(REMOTE_PRI_DOCREP_REMOTE_REP, secondBatch); + // Defensive check to ensure that doc count in replica shard catches up to the primary copy + refreshAndWaitForReplication(REMOTE_PRI_DOCREP_REMOTE_REP); + assertReplicaAndPrimaryConsistency(REMOTE_PRI_DOCREP_REMOTE_REP, firstBatch, secondBatch); + } + + /* + Checks if retention leases are published on primary shard and it's docrep copies, but not on remote copies + */ + public void testRetentionLeasePresentOnDocrepReplicaButNotRemote() throws Exception { + /* Reducing indices.memory.shard_inactive_time to force a flush and trigger translog sync, + instead of relying on Global CKP Sync action which doesn't run on remote enabled copies + + Under steady state, RetentionLeases would be on (GlobalCkp + 1) on a + docrep enabled shard copy and (GlobalCkp) for a remote enabled shard copy. + This is because we block translog sync on remote enabled shard copies during the GlobalCkpSync background task. + + RLs on remote enabled copies are brought up to (GlobalCkp + 1) upon a flush request issued by IndexingMemoryController + when the shard becomes inactive after SHARD_INACTIVE_TIME_SETTING interval. + + Flush triggers a force sync of translog which bumps the RetentionLease sequence number along with it + */ + extraSettings = Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), "3s").build(); + testRemotePrimaryDocRepAndRemoteReplica(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertBusy(() -> { + for (ShardStats shardStats : internalCluster().client() + .admin() + .indices() + .prepareStats(REMOTE_PRI_DOCREP_REMOTE_REP) + .get() + .getShards()) { + ShardRouting shardRouting = shardStats.getShardRouting(); + DiscoveryNode discoveryNode = nodes.get(shardRouting.currentNodeId()); + RetentionLeases retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases(); + if (shardRouting.primary()) { + // Primary copy should be on remote node and should have retention leases + assertTrue(discoveryNode.isRemoteStoreNode()); + assertCheckpointsConsistency(shardStats); + assertRetentionLeaseConsistency(shardStats, retentionLeases); + } else { + // Checkpoints and Retention Leases are not synced to remote replicas + if (discoveryNode.isRemoteStoreNode()) { + assertTrue(shardStats.getRetentionLeaseStats().retentionLeases().leases().isEmpty()); + } else { + // Replica copy on docrep node should have retention leases + assertCheckpointsConsistency(shardStats); + assertRetentionLeaseConsistency(shardStats, retentionLeases); + } + } + } + }); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Move primary copy from docrep to remote through _cluster/reroute + - Expands index to 1 replica + - Stops remote enabled node + - Ensure doc count is same after failover + - Index some more docs to ensure working of failed-over primary + */ + public void testFailoverRemotePrimaryToDocrepReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 1 docrep data nodes"); + String docrepNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 0 replica"); + Settings excludeRemoteNode = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(FAILOVER_REMOTE_TO_DOCREP, excludeRemoteNode); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + initDocRepToRemoteMigration(); + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + logger.info("---> Starting doc ingestion in parallel thread"); + AsyncIndexingService asyncIndexingService = new AsyncIndexingService(FAILOVER_REMOTE_TO_DOCREP); + asyncIndexingService.startIndexing(); + + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_DOCREP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + logger.info("---> Expanding index to 1 replica copy"); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings() + .setIndices(FAILOVER_REMOTE_TO_DOCREP) + .setSettings(twoReplicas) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Stopping indexing thread"); + asyncIndexingService.stopIndexing(); + + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_DOCREP); + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(FAILOVER_REMOTE_TO_DOCREP) + .setDocs(true) + .get() + .asMap(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + long initialPrimaryDocCount = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertTrue(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + initialPrimaryDocCount = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + int firstBatch = (int) asyncIndexingService.getIndexedDocs(); + assertReplicaAndPrimaryConsistency(FAILOVER_REMOTE_TO_DOCREP, firstBatch, 0); + + logger.info("---> Stop remote store enabled node"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(remoteNodeName)); + ensureStableCluster(2); + ensureYellow(FAILOVER_REMOTE_TO_DOCREP); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_DOCREP).setDocs(true).get().asMap(); + nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + long primaryDocCountAfterFailover = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertFalse(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + primaryDocCountAfterFailover = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + assertEquals(initialPrimaryDocCount, primaryDocCountAfterFailover); + + logger.info("---> Index some more docs to ensure that the failed over primary is ingesting new docs"); + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing {} more docs", secondBatch); + indexBulk(FAILOVER_REMOTE_TO_DOCREP, secondBatch); + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_DOCREP); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_DOCREP).setDocs(true).get().asMap(); + assertEquals(1, shardStatsMap.size()); + shardStatsMap.forEach( + (shardRouting, shardStats) -> { assertEquals(firstBatch + secondBatch, shardStats.getStats().getDocs().getCount()); } + ); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Move primary copy from docrep to remote through _cluster/reroute + - Expands index to 1 replica + - Stops remote enabled node + - Ensure doc count is same after failover + - Index some more docs to ensure working of failed-over primary + - Starts another remote node + - Move primary copy from docrep to remote through _cluster/reroute + - Ensure that remote store is seeded in the new remote node by asserting remote uploads from that node > 0 + */ + public void testFailoverRemotePrimaryToDocrepReplicaReseedToRemotePrimary() throws Exception { + testFailoverRemotePrimaryToDocrepReplica(); + + logger.info("---> Removing replica copy"); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings(FAILOVER_REMOTE_TO_DOCREP) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + logger.info("---> Starting a new remote enabled node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_DOCREP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(FAILOVER_REMOTE_TO_DOCREP) + .get() + .asMap(); + DiscoveryNodes discoveryNodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertEquals(1, shardStatsMap.size()); + shardStatsMap.forEach((shardRouting, shardStats) -> { + if (discoveryNodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()) { + RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + } + }); + } + + private void assertReplicaAndPrimaryConsistency(String indexName, int firstBatch, int secondBatch) throws Exception { + assertBusy(() -> { + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(indexName) + .setDocs(true) + .get() + .asMap(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + CommonStats shardStats = shardStatsMap.get(shardRouting).getStats(); + if (shardRouting.primary()) { + assertEquals(firstBatch + secondBatch, shardStats.getDocs().getCount()); + assertTrue(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + RemoteSegmentStats remoteSegmentStats = shardStats.getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + } else { + boolean remoteNode = nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode(); + assertEquals( + "Mismatched doc count. Is this on remote node ? " + remoteNode, + firstBatch + secondBatch, + shardStats.getDocs().getCount() + ); + RemoteSegmentStats remoteSegmentStats = shardStats.getSegments().getRemoteSegmentStats(); + if (remoteNode) { + assertTrue(remoteSegmentStats.getDownloadBytesStarted() > 0); + assertTrue(remoteSegmentStats.getTotalDownloadTime() > 0); + } else { + assertEquals(0, remoteSegmentStats.getUploadBytesSucceeded()); + assertEquals(0, remoteSegmentStats.getTotalUploadTime()); + } + } + } + }); + } + + /** + * For a docrep enabled shard copy or a primary shard copy, + * asserts that the stored Retention Leases equals to 1 + maxSeqNo ingested on the node + * + * @param shardStats ShardStats object from NodesStats API + * @param retentionLeases RetentionLeases from NodesStats API + */ + private static void assertRetentionLeaseConsistency(ShardStats shardStats, RetentionLeases retentionLeases) { + long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + for (RetentionLease rl : retentionLeases.leases()) { + assertEquals(maxSeqNo + 1, rl.retainingSequenceNumber()); + } + } + + /** + * For a docrep enabled shard copy or a primary shard copy, + * asserts that local and global checkpoints are up-to-date with maxSeqNo of doc operations + * + * @param shardStats ShardStats object from NodesStats API + */ + private static void assertCheckpointsConsistency(ShardStats shardStats) { + long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + long localCkp = shardStats.getSeqNoStats().getLocalCheckpoint(); + long globalCkp = shardStats.getSeqNoStats().getGlobalCheckpoint(); + + assertEquals(maxSeqNo, localCkp); + assertEquals(maxSeqNo, globalCkp); + } +} diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index a7a13afd2597c..c1d13128c18b1 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -442,7 +442,7 @@ protected long primaryOperationSize(BulkShardRequest request) { @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isRemoteNode()) { return ReplicationMode.PRIMARY_TERM_VALIDATION; } return super.getReplicationMode(indexShard); diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java index 189bc82348a0c..9f5e31a9c6926 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java @@ -9,6 +9,8 @@ package org.opensearch.action.support.replication; import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.core.action.ActionListener; @@ -31,14 +33,22 @@ public class ReplicationModeAwareProxy primaryTermValidationProxy; + private final DiscoveryNodes discoveryNodes; + + private final boolean isRemoteEnabled; + public ReplicationModeAwareProxy( ReplicationMode replicationModeOverride, + DiscoveryNodes discoveryNodes, ReplicationOperation.Replicas replicasProxy, - ReplicationOperation.Replicas primaryTermValidationProxy + ReplicationOperation.Replicas primaryTermValidationProxy, + boolean remoteIndexSettingsEnabled ) { super(replicasProxy); this.replicationModeOverride = Objects.requireNonNull(replicationModeOverride); this.primaryTermValidationProxy = Objects.requireNonNull(primaryTermValidationProxy); + this.discoveryNodes = discoveryNodes; + this.isRemoteEnabled = remoteIndexSettingsEnabled; } @Override @@ -60,16 +70,26 @@ protected void performOnReplicaProxy( @Override ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { - // If the current routing is the primary, then it does not need to be replicated if (shardRouting.isSameAllocation(primaryRouting)) { return ReplicationMode.NO_REPLICATION; } - + // Perform full replication during primary relocation if (primaryRouting.relocating() && shardRouting.isSameAllocation(primaryRouting.getTargetRelocatingShard())) { return ReplicationMode.FULL_REPLICATION; } - + /* + Only applicable during remote store migration. + During the migration process, remote based index settings will not be enabled, + thus we will rely on node attributes to figure out the replication mode + */ + if (isRemoteEnabled == false) { + DiscoveryNode targetNode = discoveryNodes.get(shardRouting.currentNodeId()); + if (targetNode != null && targetNode.isRemoteStoreNode() == false) { + // Perform full replication if replica is hosted on a non-remote node. + return ReplicationMode.FULL_REPLICATION; + } + } return replicationModeOverride; } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 95f998e2d89c2..8d86128e36441 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -356,7 +356,7 @@ public void performOn( * @return the overridden replication mode. */ public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isRemoteNode()) { return ReplicationMode.NO_REPLICATION; } return ReplicationMode.FULL_REPLICATION; @@ -642,8 +642,14 @@ public void handleException(TransportException exp) { primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, retryTimeout, - indexShard.isRemoteTranslogEnabled() - ? new ReplicationModeAwareProxy<>(getReplicationMode(indexShard), replicasProxy, termValidationProxy) + indexShard.indexSettings().isRemoteNode() + ? new ReplicationModeAwareProxy<>( + getReplicationMode(indexShard), + clusterState.getNodes(), + replicasProxy, + termValidationProxy, + indexShard.isRemoteTranslogEnabled() + ) : new FanoutReplicationProxy<>(replicasProxy) ).execute(); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 9ded1b174d4c6..a7b29314210df 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; @@ -462,7 +463,8 @@ public synchronized IndexShard createShard( final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, final RepositoriesService repositoriesService, final DiscoveryNode targetNode, - @Nullable DiscoveryNode sourceNode + @Nullable DiscoveryNode sourceNode, + DiscoveryNodes discoveryNodes ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -553,7 +555,8 @@ public synchronized IndexShard createShard( nodeEnv.nodeId(), recoverySettings, remoteStoreSettings, - seedRemote + seedRemote, + discoveryNodes ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index f9062585c0093..82875564c1c07 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1237,11 +1237,7 @@ public boolean isSegRepEnabledOrRemoteNode() { } public boolean isSegRepLocalEnabled() { - return isSegRepEnabledOrRemoteNode() && !isRemoteStoreEnabled(); - } - - public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabledOrRemoteNode() && isRemoteStoreEnabled(); + return ReplicationType.SEGMENT.equals(replicationType) && !isRemoteStoreEnabled(); } /** diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index ca1dfe2d5ad01..0c167d6d80b5c 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -135,7 +135,7 @@ protected void shardOperationOnReplica(Request shardRequest, IndexShard replica, private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint() - && indexShard.isRemoteTranslogEnabled() == false) { + && indexShard.indexSettings().isRemoteNode() == false) { indexShard.sync(); } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 0e625e9f30320..b2eb2f03486ac 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -253,6 +253,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private volatile ReplicationCheckpoint latestReplicationCheckpoint; + private final Function isShardOnRemoteEnabledNode; + /** * Get all retention leases tracked on this shard. * @@ -999,7 +1001,8 @@ public ReplicationTracker( final LongConsumer onGlobalCheckpointUpdated, final LongSupplier currentTimeMillisSupplier, final BiConsumer> onSyncRetentionLeases, - final Supplier safeCommitInfoSupplier + final Supplier safeCommitInfoSupplier, + final Function isShardOnRemoteEnabledNode ) { this( shardId, @@ -1011,7 +1014,8 @@ public ReplicationTracker( currentTimeMillisSupplier, onSyncRetentionLeases, safeCommitInfoSupplier, - x -> {} + x -> {}, + isShardOnRemoteEnabledNode ); } @@ -1037,7 +1041,8 @@ public ReplicationTracker( final LongSupplier currentTimeMillisSupplier, final BiConsumer> onSyncRetentionLeases, final Supplier safeCommitInfoSupplier, - final Consumer onReplicationGroupUpdated + final Consumer onReplicationGroupUpdated, + final Function isShardOnRemoteEnabledNode ) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; @@ -1060,6 +1065,7 @@ public ReplicationTracker( this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; this.latestReplicationCheckpoint = indexSettings.isSegRepEnabledOrRemoteNode() ? ReplicationCheckpoint.empty(shardId) : null; + this.isShardOnRemoteEnabledNode = isShardOnRemoteEnabledNode; assert Version.V_EMPTY.equals(indexSettings.getIndexVersionCreated()) == false; assert invariant(); } @@ -1088,8 +1094,12 @@ private ReplicationGroup calculateReplicationGroup() { } else { newVersion = replicationGroup.getVersion() + 1; } - - assert indexSettings().isRemoteTranslogStoreEnabled() + assert indexSettings.isRemoteTranslogStoreEnabled() + // Handle migration cases. Ignore assertion if any of the shard copies in the replication group is assigned to a remote node + || (replicationGroup != null + && replicationGroup.getReplicationTargets() + .stream() + .anyMatch(shardRouting -> isShardOnRemoteEnabledNode.apply(shardRouting.currentNodeId()))) || checkpoints.entrySet().stream().filter(e -> e.getValue().tracked).allMatch(e -> e.getValue().replicated) : "In absence of remote translog store, all tracked shards must have replication mode as LOGICAL_REPLICATION"; @@ -1248,7 +1258,9 @@ private void createReplicationLagTimers() { if (cps.inSync && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false && isPrimaryRelocation(allocationId) == false - && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint)) { + && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint) + && (indexSettings.isSegRepLocalEnabled() == true + || isShardOnRemoteEnabledNode.apply(routingTable.getByAllocationId(allocationId).currentNodeId()))) { cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> new SegmentReplicationLagTimer()); logger.trace( () -> new ParameterizedMessage( @@ -1366,8 +1378,7 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { final ShardRouting primaryShard = routingTable.primaryShard(); final String leaseId = getPeerRecoveryRetentionLeaseId(primaryShard); if (retentionLeases.get(leaseId) == null) { - if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) - || indexSettings().isRemoteTranslogStoreEnabled()) { + if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) || indexSettings.isRemoteNode()) { assert primaryShard.allocationId().getId().equals(shardAllocationId) : routingTable.assignedShards() + " vs " + shardAllocationId; @@ -1453,7 +1464,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, inSync, inSync, - isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + initializingId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, initializingId) + ) ) ); } @@ -1472,7 +1488,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, false, false, - isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + initializingId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, initializingId) + ) ) ); } @@ -1486,7 +1507,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, true, true, - isReplicated(inSyncId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + inSyncId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, inSyncId) + ) ) ); } @@ -1503,6 +1529,12 @@ public synchronized void updateFromClusterManager( assert invariant(); } + private boolean assignedToRemoteStoreNode(IndexShardRoutingTable routingTable, String allocationId) { + return indexSettings().isRemoteStoreEnabled() + || (routingTable.getByAllocationId(allocationId) != null + && isShardOnRemoteEnabledNode.apply(routingTable.getByAllocationId(allocationId).currentNodeId())); + } + /** * Returns whether the requests are replicated considering the remote translog existence, current/primary/primary target allocation ids. * @@ -1511,13 +1543,16 @@ public synchronized void updateFromClusterManager( * @param primaryTargetAllocationId primary target allocation id * @return the replication mode. */ - private boolean isReplicated(String allocationId, String primaryAllocationId, String primaryTargetAllocationId) { - // If remote translog is enabled, then returns replication mode checking current allocation id against the + private boolean isReplicated( + String allocationId, + String primaryAllocationId, + String primaryTargetAllocationId, + boolean assignedToRemoteStoreNode + ) { + // If assigned to a remote node, returns true if given allocation id matches the primary or it's relocation target allocation // primary and primary target allocation id. - // If remote translog is enabled, then returns true if given allocation id matches the primary or it's relocation target allocation - // id. - if (indexSettings().isRemoteTranslogStoreEnabled()) { - return (allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId)); + if (assignedToRemoteStoreNode == true) { + return allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId); } // For other case which is local translog, return true as the requests are replicated to all shards in the replication group. return true; diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index 675d60ec2b63d..b47025d75282c 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -43,7 +43,7 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { if (didRefresh && shard.state() == IndexShardState.STARTED && shard.getReplicationTracker().isPrimaryMode() - && !shard.indexSettings.isSegRepWithRemoteEnabled()) { + && shard.indexSettings.isRemoteNode() == false) { publisher.publish(shard, shard.getLatestReplicationCheckpoint()); } return true; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 2b935b743512a..484083a5b1260 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -67,6 +67,8 @@ import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; @@ -357,6 +359,7 @@ Runnable getGlobalCheckpointSyncer() { On source remote node , it will be REMOTE_MIGRATING_UNSEEDED when relocating from docrep node */ private final ShardMigrationState shardMigrationState; + private DiscoveryNodes discoveryNodes; public IndexShard( final ShardRouting shardRouting, @@ -386,7 +389,8 @@ public IndexShard( final String nodeId, final RecoverySettings recoverySettings, final RemoteStoreSettings remoteStoreSettings, - boolean seedRemote + boolean seedRemote, + final DiscoveryNodes discoveryNodes ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -448,7 +452,8 @@ public IndexShard( threadPool::absoluteTimeInMillis, (retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, aId, getPendingPrimaryTerm(), retentionLeases, listener), this::getSafeCommitInfo, - pendingReplicationActions + pendingReplicationActions, + isShardOnRemoteEnabledNode ); // the query cache is a node-level thing, however we want the most popular filters @@ -486,6 +491,7 @@ public boolean shouldCache(Query query) { this.remoteStoreSettings = remoteStoreSettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); + this.discoveryNodes = discoveryNodes; } public ThreadPool getThreadPool() { @@ -506,6 +512,23 @@ public boolean shouldSeedRemoteStore() { return shardMigrationState == REMOTE_MIGRATING_UNSEEDED; } + /** + * To be delegated to {@link ReplicationTracker} so that relevant remote store based + * operations can be ignored during engine migration + *

    + * Has explicit null checks to ensure that the {@link ReplicationTracker#invariant()} + * checks does not fail during a cluster manager state update when the latest replication group + * calculation is not yet done and the cached replication group details are available + */ + public Function isShardOnRemoteEnabledNode = nodeId -> { + DiscoveryNode node = discoveryNodes.get(nodeId); + if (node != null) { + logger.trace("Node {} has remote_enabled as {}", nodeId, node.isRemoteStoreNode()); + return node.isRemoteStoreNode(); + } + return false; + }; + public boolean isRemoteSeeded() { return shardMigrationState == REMOTE_MIGRATING_SEEDED; } @@ -616,8 +639,10 @@ public void updateShardState( final BiConsumer> primaryReplicaSyncer, final long applyingClusterStateVersion, final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable + final IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException { + this.discoveryNodes = discoveryNodes; final ShardRouting currentRouting; synchronized (mutex) { currentRouting = this.shardRouting; @@ -3495,8 +3520,8 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * When remote translog is enabled for an index, replication operation is limited to primary term validation and does not * update local checkpoint at replica, so the local checkpoint at replica can be less than globalCheckpoint. */ - assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) - || indexSettings.isRemoteTranslogStoreEnabled() : "supposedly in-sync shard copy received a global checkpoint [" + assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) || indexSettings.isRemoteNode() + : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" @@ -3993,7 +4018,7 @@ private boolean isRemoteStoreEnabled() { } public boolean isRemoteTranslogEnabled() { - return indexSettings() != null && indexSettings().isRemoteTranslogStoreEnabled(); + return indexSettings() != null && (indexSettings().isRemoteTranslogStoreEnabled()); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index ba78c28f3db88..4f68c03913199 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -54,6 +54,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; @@ -1021,7 +1022,8 @@ public IndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final DiscoveryNodes discoveryNodes ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); @@ -1036,7 +1038,8 @@ public IndexShard createShard( remoteStoreStatsTrackerFactory, repositoriesService, targetNode, - sourceNode + sourceNode, + discoveryNodes ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 7fb8b172ae352..2c3ffcdd9e0ba 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -680,7 +680,8 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR retentionLeaseSyncer, nodes.getLocalNode(), sourceNode, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + nodes ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); @@ -714,7 +715,8 @@ private void updateShard( primaryReplicaSyncer::resync, clusterState.version(), inSyncIds, - indexShardRoutingTable + indexShardRoutingTable, + nodes ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); @@ -922,7 +924,8 @@ void updateShardState( BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable + IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException; } @@ -1040,7 +1043,8 @@ T createShard( RetentionLeaseSyncer retentionLeaseSyncer, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, - RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + DiscoveryNodes discoveryNodes ) throws IOException; /** diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java index 0ccb1ac2133cf..96e85154e6248 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java @@ -23,8 +23,7 @@ public static RecoverySourceHandler create( StartRecoveryRequest request, RecoverySettings recoverySettings ) { - boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false - && (shard.isRemoteTranslogEnabled() || shard.isMigratingToRemote()); + boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false && request.targetNode().isRemoteStoreNode(); if (isReplicaRecoveryWithRemoteTranslog) { return new RemoteStorePeerRecoverySourceHandler( shard, diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 16311d5d2cfb7..f47b082de3856 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -213,10 +213,17 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener { state().getIndex().setFileDetailsComplete(); // ops-based recoveries don't send the file details state().getTranslog().totalOperations(totalTranslogOps); + // Cleanup remote contents before opening new translog. + // This prevents reading from any old Translog UUIDs during re-seeding + // (situation in which primary fails over to docrep replica and is re-seeded to remote again) + // which might end up causing a TranslogCorruptedException + if (indexShard.shouldSeedRemoteStore()) { + assert indexShard.routingEntry().primary() : "Remote seeding should only true be for primary shard copy"; + indexShard.deleteRemoteStoreContents(); + } indexShard().openEngineAndSkipTranslogRecovery(); // upload to remote store in migration for primary shard - if (indexShard.shouldSeedRemoteStore() && indexShard.routingEntry().primary()) { - indexShard.deleteRemoteStoreContents(); + if (indexShard.shouldSeedRemoteStore()) { // This cleans up remote translog's 0 generation, as we don't want to get that uploaded indexShard.sync(); threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> { indexShard.refresh("remote store migration"); }); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 852003c9f3e4d..657705a8cd725 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -38,7 +38,7 @@ public SegmentReplicationSourceFactory( } public SegmentReplicationSource get(IndexShard shard) { - if (shard.indexSettings().isSegRepWithRemoteEnabled()) { + if (shard.indexSettings().isRemoteNode()) { return new RemoteStoreReplicationSource(shard); } else { return new PrimaryShardReplicationSource( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 821ae42e31881..4d7d0a8633b5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -98,7 +98,7 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isRemoteNode()) { return ReplicationMode.FULL_REPLICATION; } return super.getReplicationMode(indexShard); @@ -199,6 +199,12 @@ protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexSh Objects.requireNonNull(replica); ActionListener.completeWith(listener, () -> { logger.trace(() -> new ParameterizedMessage("Checkpoint {} received on replica {}", request, replica.shardId())); + // Condition for ensuring that we ignore Segrep checkpoints received on Docrep shard copies. + // This case will hit iff the replica hosting node is not remote enabled and replication type != SEGMENT + if (replica.indexSettings().isRemoteNode() == false && replica.indexSettings().isSegRepLocalEnabled() == false) { + logger.trace("Received segrep checkpoint on a docrep shard copy during an ongoing remote migration. NoOp."); + return new ReplicaResult(); + } if (request.getCheckpoint().getShardId().equals(replica.shardId())) { replicationService.onNewCheckpoint(request.getCheckpoint(), replica); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index ef26bc225b0c7..5ca5f53f180be 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -82,6 +82,7 @@ import org.mockito.ArgumentCaptor; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.arrayWithSize; @@ -332,15 +333,15 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { TransportVerifyShardBeforeCloseAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportVerifyShardBeforeCloseAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); - assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } private TransportVerifyShardBeforeCloseAction createAction() { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java index 09215088bd04b..c9d3a6c4c7605 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportShardFlushActionTests extends OpenSearchTestCase { public void testGetReplicationModeWithRemoteTranslog() { TransportShardFlushAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardFlushAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java index 8c4a6c023f9a5..90498d6d35700 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportVerifyShardIndexBlockActionTests extends OpenSearchTestCas public void testGetReplicationModeWithRemoteTranslog() { TransportVerifyShardIndexBlockAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportVerifyShardIndexBlockAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java index b2eee904bad38..bc0b7e5cf14b2 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportShardRefreshActionTests extends OpenSearchTestCase { public void testGetReplicationModeWithRemoteTranslog() { TransportShardRefreshAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardRefreshAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index 65b555649b2d0..6331861c3dcb9 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -107,6 +107,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongSupplier; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; @@ -1237,14 +1238,14 @@ public void testHandlePrimaryTermValidationRequestSuccess() { public void testGetReplicationModeWithRemoteTranslog() { TransportShardBulkAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.PRIMARY_TERM_VALIDATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardBulkAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index da87a0a967f53..a2fefd6278321 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -84,6 +84,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; @@ -233,14 +234,14 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { final TransportResyncReplicationAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final TransportResyncReplicationAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java new file mode 100644 index 0000000000000..626c2f74f09c4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java @@ -0,0 +1,216 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShardTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +public class ReplicationModeAwareProxyTests extends OpenSearchTestCase { + + /* + Replication action running on the same primary copy from which it originates. + Action should not run and proxy should return ReplicationMode.NO_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingCurrentPrimary() { + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + null, + true, + ShardRoutingState.STARTED, + AllocationId.newInitializing("abc") + ); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + null, + true, + ShardRoutingState.STARTED, + AllocationId.newInitializing("abc") + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder().add(IndexShardTestUtils.getFakeRemoteEnabledNode("dummy-node")).build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + randomBoolean() + ); + assertEquals(ReplicationMode.NO_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from failing primary to replica being promoted to primary + Action should run and proxy should return ReplicationMode.FULL_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingRelocatingPrimary() { + AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing()); + AllocationId relocationTargetId = AllocationId.newTargetRelocation(primaryId); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + null, + true, + ShardRoutingState.INITIALIZING, + relocationTargetId + ); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + "dummy-node-2", + true, + ShardRoutingState.RELOCATING, + primaryId + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + randomBoolean() + ); + assertEquals(ReplicationMode.FULL_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to docrep replica during remote store migration + Action should run and proxy should return ReplicationMode.FULL_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingDocrepShard() { + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + true, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + false, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeDiscoNode(targetRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(ReplicationMode.FULL_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to remote replica during remote store migration + Action should not run and proxy should return ReplicationMode.NO_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingRemoteShard() { + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(ReplicationMode.NO_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to remote enabled replica during remote store migration + with an explicit replication mode specified + Action should run and proxy should return the overridden Replication Mode + */ + public void testDetermineReplicationWithExplicitOverrideTargetRoutingRemoteShard() { + ReplicationMode replicationModeOverride = ReplicationMode.PRIMARY_TERM_VALIDATION; + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + replicationModeOverride, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(replicationModeOverride, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary with remote enabled index settings enabled + Action should not query the DiscoveryNodes object + */ + public void testDetermineReplicationWithRemoteIndexSettingsEnabled() { + DiscoveryNodes mockDiscoveryNodes = mock(DiscoveryNodes.class); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + mockDiscoveryNodes, + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + true + ); + replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting); + // Verify no interactions with the DiscoveryNodes object + verify(mockDiscoveryNodes, never()).get(anyString()); + } +} diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 6b54623b03164..ec5fc1d19e40d 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; @@ -59,6 +60,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardNotStartedException; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.node.NodeClosedException; import org.opensearch.test.OpenSearchTestCase; @@ -239,7 +241,13 @@ public void testReplicationWithRemoteTranslogEnabled() throws Exception { listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildRemoteStoreEnabledDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + true + ) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -304,7 +312,13 @@ public void testPrimaryToPrimaryReplicationWithRemoteTranslogEnabled() throws Ex listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildRemoteStoreEnabledDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + true + ) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -380,6 +394,144 @@ public void testForceReplicationWithRemoteTranslogEnabled() throws Exception { assertEquals(activeIds.size() + initializingIds.size(), shardInfo.getTotal()); } + public void testReplicationInDualModeWithDocrepReplica() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId); + builder.addShard(routing); + }); + activeIds.stream().filter(aId -> !aId.equals(primaryId)).forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId); + builder.addShard(routing); + }); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + false + ) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + // During dual replication, except for primary, replication action should be executed on all the replicas + assertEquals(activeIds.size() - 1, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + // All initializing and active shards are set to docrep + assertEquals(initializingIds.size() + activeIds.size(), shardInfo.getTotal()); + } + + public void testReplicationInDualModeWithMixedReplicasSomeInDocrepOthersOnRemote() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId); + builder.addShard(routing); + }); + activeIds.stream().filter(aId -> !aId.equals(primaryId)).forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId); + builder.addShard(routing); + }); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + // Generating data nodes in mixed mode wherein some of the allocated replicas + // are in docrep nodes whereas others are on remote enabled ones + Tuple discoveryNodesDetails = buildMixedModeDiscoveryNodes(routingTable); + int docRepNodes = discoveryNodesDetails.v1(); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, discoveryNodesDetails.v2(), replicasProxy, replicasProxy, false) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + // Only docrep nodes should have the request fanned out to + assertEquals(docRepNodes, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + // Listener should be invoked for initializing Ids, primary and the operations on docrep nodes + assertEquals(1 + docRepNodes + initializingIds.size(), shardInfo.getTotal()); + } + static String nodeIdFromAllocationId(final AllocationId allocationId) { return "n-" + allocationId.getId().substring(0, 8); } @@ -816,6 +968,46 @@ private Set getExpectedReplicas(ShardId shardId, ClusterState stat return expectedReplicas; } + private DiscoveryNodes buildRemoteStoreEnabledDiscoveryNodes(IndexShardRoutingTable routingTable) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } + return builder.build(); + } + + private DiscoveryNodes buildDiscoveryNodes(IndexShardRoutingTable routingTable) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + if (shardRouting.primary()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(shardRouting.currentNodeId())); + } + } + return builder.build(); + } + + private Tuple buildMixedModeDiscoveryNodes(IndexShardRoutingTable routingTable) { + int docrepNodes = 0; + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + if (shardRouting.primary()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } else { + // Only add docrep nodes for allocationIds that are active + // since the test cases creates replication group with active allocationIds only + if (shardRouting.active() && randomBoolean()) { + builder.add(IndexShardTestUtils.getFakeDiscoNode(shardRouting.currentNodeId())); + docrepNodes += 1; + } else { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } + } + } + return new Tuple<>(docrepNodes, builder.build()); + } + public static class Request extends ReplicationRequest { public AtomicBoolean processedOnPrimary = new AtomicBoolean(); public AtomicBoolean runPostReplicationActionsOnPrimary = new AtomicBoolean(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index dad0fa0efd3ec..4a18778cc0b2b 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -78,6 +78,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; +import org.opensearch.index.remote.RemoteStoreTestsHelper; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.IndexShardState; @@ -1589,9 +1590,15 @@ private IndexService mockIndexService(final IndexMetadata indexMetadata, Cluster @SuppressWarnings("unchecked") private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { + return mockIndexShard(shardId, clusterService, false); + } + + @SuppressWarnings("unchecked") + private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService, boolean remote) { final IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(shardId); when(indexShard.state()).thenReturn(IndexShardState.STARTED); + when(indexShard.indexSettings()).thenReturn(RemoteStoreTestsHelper.createIndexSettings(remote)); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; if (isPrimaryMode.get()) { diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java index e072d3037caad..043b4493e8989 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java @@ -10,6 +10,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; @@ -37,4 +38,22 @@ static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) when(indexShard.store()).thenReturn(store); return indexShard; } + + public static IndexSettings createIndexSettings(boolean remote) { + return createIndexSettings(remote, Settings.EMPTY); + } + + public static IndexSettings createIndexSettings(boolean remote, Settings settings) { + IndexSettings indexSettings; + if (remote) { + Settings nodeSettings = Settings.builder() + .put("node.name", "xyz") + .put("node.attr.remote_store.translog.repository", "seg_repo") + .build(); + indexSettings = IndexSettingsModule.newIndexSettings(new Index("test_index", "_na_"), settings, nodeSettings); + } else { + indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + } + return indexSettings; + } } diff --git a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java index 8c59e92a3fe8a..904c9a70e61e0 100644 --- a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java @@ -45,6 +45,7 @@ import org.opensearch.index.seqno.RetentionLeaseUtils; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.test.VersionUtils; import java.util.ArrayList; @@ -182,7 +183,8 @@ public void testTurnOffTranslogRetentionAfterAllShardStarted() throws Exception null, 1L, group.getPrimary().getReplicationGroup().getInSyncAllocationIds(), - group.getPrimary().getReplicationGroup().getRoutingTable() + group.getPrimary().getReplicationGroup().getRoutingTable(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); } group.syncGlobalCheckpoint(); diff --git a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java index 8363ea3757a2b..a27f3476888eb 100644 --- a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -54,6 +54,7 @@ import java.util.Collections; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -110,6 +111,7 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { final ShardId shardId = new ShardId(index, id); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); final Translog.Durability durability = randomFrom(Translog.Durability.ASYNC, Translog.Durability.REQUEST); when(indexShard.getTranslogDurability()).thenReturn(durability); @@ -158,14 +160,14 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { final GlobalCheckpointSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final GlobalCheckpointSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } @@ -191,6 +193,7 @@ public void testMayBeSyncTranslogWithRemoteTranslog() throws Exception { when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(globalCheckpoint - 1); when(indexShard.getTranslogDurability()).thenReturn(Translog.Durability.REQUEST); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); action.shardOperationOnPrimary(primaryRequest, indexShard, ActionTestUtils.assertNoFailureListener(r -> {})); verify(indexShard, never()).sync(); @@ -205,6 +208,7 @@ public void testMayBeSyncTranslogWithLocalTranslog() throws Exception { when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(globalCheckpoint - 1); when(indexShard.getTranslogDurability()).thenReturn(Translog.Durability.REQUEST); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); action.shardOperationOnPrimary(primaryRequest, indexShard, ActionTestUtils.assertNoFailureListener(r -> {})); verify(indexShard).sync(); diff --git a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java index ca80c7b9c4884..7a9f1d7baa12e 100644 --- a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java @@ -93,7 +93,8 @@ public void setUpReplicationTracker() throws InterruptedException { value -> {}, currentTimeMillis::get, (leases, listener) -> {}, - () -> safeCommitInfo + () -> safeCommitInfo, + sId -> false ); replicationTracker.updateFromClusterManager( 1L, diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 3cd60ac973709..fdbe89422a2aa 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -84,7 +84,8 @@ public void testAddOrRenewRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -132,7 +133,8 @@ public void testAddDuplicateRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -164,7 +166,8 @@ public void testRenewNotFoundRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -202,7 +205,8 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { equalTo(retainingSequenceNumbers) ); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); reference.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -241,7 +245,8 @@ public void testRemoveRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -305,7 +310,8 @@ public void testCloneRetentionLease() { assertTrue(synced.compareAndSet(false, true)); listener.onResponse(new ReplicationResponse()); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTrackerRef.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -351,7 +357,8 @@ public void testCloneNonexistentRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -380,7 +387,8 @@ public void testCloneDuplicateRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -413,7 +421,8 @@ public void testRemoveNotFound() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -451,7 +460,8 @@ public void testRemoveRetentionLeaseCausesRetentionLeaseSync() { equalTo(retainingSequenceNumbers) ); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); reference.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -504,7 +514,8 @@ private void runExpirationTest(final boolean primaryMode) { value -> {}, currentTimeMillis::get, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -583,7 +594,8 @@ public void testReplicaIgnoresOlderRetentionLeasesVersion() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -636,7 +648,8 @@ public void testLoadAndPersistRetentionLeases() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -671,7 +684,8 @@ public void testUnnecessaryPersistenceOfRetentionLeases() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -724,7 +738,8 @@ public void testPersistRetentionLeasesUnderConcurrency() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -788,7 +803,8 @@ public void testRenewLeaseWithLowerRetainingSequenceNumber() throws Exception { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java index e61d27695a5e5..daeefeff59c94 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java @@ -40,11 +40,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.SafeCommitInfo; +import org.opensearch.index.remote.RemoteStoreTestsHelper; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; import java.util.Set; +import java.util.function.Function; import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -57,18 +59,20 @@ ReplicationTracker newTracker( final AllocationId allocationId, final LongConsumer updatedGlobalCheckpoint, final LongSupplier currentTimeMillisSupplier, - final Settings settings + final Settings settings, + final boolean remote ) { return new ReplicationTracker( new ShardId("test", "_na_", 0), allocationId.getId(), - IndexSettingsModule.newIndexSettings("test", settings), + remote ? RemoteStoreTestsHelper.createIndexSettings(true, settings) : IndexSettingsModule.newIndexSettings("test", settings), randomNonNegativeLong(), UNASSIGNED_SEQ_NO, updatedGlobalCheckpoint, currentTimeMillisSupplier, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + remote ? REMOTE_DISCOVERY_NODE : NON_REMOTE_DISCOVERY_NODE ); } @@ -80,8 +84,21 @@ ReplicationTracker newTracker( return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, Settings.EMPTY); } + ReplicationTracker newTracker( + final AllocationId allocationId, + final LongConsumer updatedGlobalCheckpoint, + final LongSupplier currentTimeMillisSupplier, + final Settings settings + ) { + return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, settings, false); + } + static final Supplier OPS_BASED_RECOVERY_ALWAYS_REASONABLE = () -> SafeCommitInfo.EMPTY; + static final Function NON_REMOTE_DISCOVERY_NODE = shardId -> false; + + static final Function REMOTE_DISCOVERY_NODE = shardId -> true; + static String nodeIdFromAllocationId(final AllocationId allocationId) { return "n-" + allocationId.getId().substring(0, 8); } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 7971591e82bab..233a99cbe4a73 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -446,6 +446,10 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { private AtomicLong updatedGlobalCheckpoint = new AtomicLong(UNASSIGNED_SEQ_NO); + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings, boolean remote) { + return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings, remote); + } + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings) { return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings); } @@ -759,7 +763,8 @@ public void testPrimaryContextHandoff() throws IOException { onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); ReplicationTracker newPrimary = new ReplicationTracker( shardId, @@ -770,7 +775,8 @@ public void testPrimaryContextHandoff() throws IOException { onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); @@ -1300,7 +1306,7 @@ public void testGlobalCheckpointUpdateWithRemoteTranslogEnabled() { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); @@ -1378,7 +1384,7 @@ public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); @@ -1476,7 +1482,7 @@ public void testMarkAllocationIdAsInSyncWithRemoteTranslogEnabled() throws Excep .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); tracker.activatePrimaryMode(localCheckpoint); @@ -1504,7 +1510,7 @@ public void testMissingActiveIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); List initializingRandomSubset = randomSubsetOf(initializing.keySet()); @@ -1537,7 +1543,7 @@ public void testMissingInSyncIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach( @@ -1606,8 +1612,8 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManagerWithRemoteTrans .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); - tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, active, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); @@ -1655,7 +1661,7 @@ public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); @@ -2080,7 +2086,8 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + REMOTE_DISCOVERY_NODE ); ReplicationTracker newPrimary = new ReplicationTracker( shardId, @@ -2091,7 +2098,8 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + REMOTE_DISCOVERY_NODE ); Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index ed04d9a20f18e..d5d7163b66698 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -60,6 +60,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; @@ -215,14 +216,14 @@ public void testBlocks() { public void testGetReplicationModeWithRemoteTranslog() { final RetentionLeaseBackgroundSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final RetentionLeaseBackgroundSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index 63a9ac2f2e8ec..7610b8bc39296 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -60,6 +60,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.emptyMap; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; @@ -215,15 +216,15 @@ public void testBlocks() { public void testGetReplicationModeWithRemoteTranslog() { final RetentionLeaseSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final RetentionLeaseSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); - assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } private RetentionLeaseSyncAction createAction() { diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 537bfcf8f8a6b..e5bfa8caee79a 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -697,7 +697,8 @@ public void testPrimaryPromotionRollsGeneration() throws Exception { (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build() + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(primaryRouting) ); /* @@ -764,7 +765,8 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), - new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build() + new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(primaryRouting) ); latch.await(); assertThat(indexShard.getActiveOperationsCount(), is(oneOf(0, IndexShard.OPERATIONS_BLOCKED))); @@ -1446,7 +1448,8 @@ public void onFailure(Exception e) { (s, r) -> resyncLatch.countDown(), 1L, Collections.singleton(newRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build() + new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(newRouting) ); resyncLatch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); @@ -3284,7 +3287,7 @@ public void testRecoverFromTranslog() throws IOException { Translog.Snapshot snapshot = TestTranslog.newSnapshotFromOperations(operations); primary.markAsRecovering( "store", - new RecoveryState(primary.routingEntry(), getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) + new RecoveryState(primary.routingEntry(), IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) ); recoverFromStore(primary); @@ -4029,15 +4032,19 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { if (isPrimary) { newShard.markAsRecovering( "store", - new RecoveryState(newShard.routingEntry(), getFakeDiscoNode(newShard.routingEntry().currentNodeId()), null) + new RecoveryState( + newShard.routingEntry(), + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()), + null + ) ); } else { newShard.markAsRecovering( "peer", new RecoveryState( newShard.routingEntry(), - getFakeDiscoNode(newShard.routingEntry().currentNodeId()), - getFakeDiscoNode(newShard.routingEntry().currentNodeId()) + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()), + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()) ) ); } diff --git a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java index b1bcaac2c1947..09903a8b44cb5 100644 --- a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java @@ -111,7 +111,8 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build() + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); assertEquals(globalCheckPoint, shard.getLastKnownGlobalCheckpoint()); @@ -190,7 +191,8 @@ public void testSyncerOnClosingShard() throws Exception { null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build() + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); CountDownLatch syncCalledLatch = new CountDownLatch(1); diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 0e16e81b1bb70..0428bdf0655b0 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingHelper; @@ -164,7 +165,8 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem null, null, localNode, - null + null, + DiscoveryNodes.builder().add(localNode).build() ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index c455101ff4549..0490228a5cc16 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; @@ -264,7 +265,8 @@ public MockIndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final DiscoveryNodes discoveryNodes ) throws IOException { failRandomly(); RecoveryState recoveryState = new RecoveryState(shardRouting, targetNode, sourceNode); @@ -387,7 +389,8 @@ public void updateShardState( BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable + IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException { failRandomly(); assertThat(this.shardId(), equalTo(shardRouting.shardId())); diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java index 4fbae4b0d53ca..ded174fb98eef 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java @@ -38,6 +38,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.test.NodeRoles; @@ -65,8 +66,8 @@ public void testDuplicateRecoveries() throws IOException { StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest( primary.shardId(), randomAlphaOfLength(10), - getFakeDiscoNode("source"), - getFakeDiscoNode("target"), + IndexShardTestUtils.getFakeDiscoNode("source"), + IndexShardTestUtils.getFakeDiscoNode("target"), Store.MetadataSnapshot.EMPTY, randomBoolean(), randomLong(), diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 1e6cc43703672..a8e5a02011538 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -56,6 +56,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; @@ -92,8 +93,8 @@ public void testWriteFileChunksConcurrently() throws Exception { mdFiles.add(md); } final IndexShard targetShard = newShard(false); - final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); - final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); + final DiscoveryNode pNode = IndexShardTestUtils.getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); + final DiscoveryNode rNode = IndexShardTestUtils.getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode)); final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null, threadPool); final PlainActionFuture receiveFileInfoFuture = new PlainActionFuture<>(); diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 2cf006176022d..352f827c74cb2 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -15,6 +15,7 @@ import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; @@ -35,9 +36,12 @@ import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -128,7 +132,9 @@ public void testPublishCheckpointActionOnReplica() { final ShardId shardId = new ShardId(index, id); when(indexShard.shardId()).thenReturn(shardId); - + when(indexShard.indexSettings()).thenReturn( + createIndexSettings(false, Settings.builder().put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), "SEGMENT").build()) + ); final SegmentReplicationTargetService mockTargetService = mock(SegmentReplicationTargetService.class); final PublishCheckpointAction action = new PublishCheckpointAction( @@ -160,17 +166,46 @@ public void testPublishCheckpointActionOnReplica() { } + public void testPublishCheckpointActionOnDocrepReplicaDuringMigration() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + final SegmentReplicationTargetService mockTargetService = mock(SegmentReplicationTargetService.class); + + final PublishCheckpointAction action = new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + mockTargetService + ); + // no interaction with SegmentReplicationTargetService object + verify(mockTargetService, never()).onNewCheckpoint(any(), any()); + } + public void testGetReplicationModeWithRemoteTranslog() { final PublishCheckpointAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final PublishCheckpointAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 43289a7c89524..1cb5501810c5d 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -898,7 +898,8 @@ public EngineConfig config( update -> {}, () -> 0L, (leases, listener) -> listener.onResponse(new ReplicationResponse()), - () -> SafeCommitInfo.EMPTY + () -> SafeCommitInfo.EMPTY, + sId -> false ); globalCheckpointSupplier = replicationTracker; retentionLeasesSupplier = replicationTracker::getRetentionLeases; diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index e6e20ce8f8566..3226035bba97b 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -64,6 +64,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; @@ -96,6 +97,7 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; @@ -340,6 +342,23 @@ public synchronized void startAll() throws IOException { startReplicas(replicas.size()); } + public synchronized DiscoveryNodes generateFakeDiscoveryNodes() { + DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(); + if (primary.indexSettings() != null && primary.indexSettings().isRemoteNode()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId())); + } + for (IndexShard replica : replicas) { + if (replica.indexSettings() != null && replica.indexSettings().isRemoteNode()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId())); + } + } + return builder.build(); + } + public synchronized int startReplicas(int numOfReplicasToStart) throws IOException { if (primary.routingEntry().initializing()) { startPrimary(); @@ -371,7 +390,8 @@ public void startPrimary() throws IOException { null, currentClusterStateVersion.incrementAndGet(), activeIds, - routingTable + routingTable, + generateFakeDiscoveryNodes() ); for (final IndexShard replica : replicas) { recoverReplica(replica); @@ -492,7 +512,8 @@ public synchronized void promoteReplicaToPrimary( primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(), activeIds(), - routingTable + routingTable, + generateFakeDiscoveryNodes() ); } @@ -638,14 +659,16 @@ public void syncGlobalCheckpoint() { } private void updateAllocationIDsOnPrimary() throws IOException { - primary.updateShardState( primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), activeIds(), - routingTable(Function.identity()) + routingTable(Function.identity()), + primary.indexSettings().isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable(Function.identity()).getShards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable(Function.identity()).getShards()) ); } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 80d16a8243634..b2ece9c813802 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -619,12 +620,14 @@ protected IndexShard newShard( IndexingOperationListener... listeners ) throws IOException { Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); + DiscoveryNodes discoveryNodes = IndexShardTestUtils.getFakeDiscoveryNodes(routing); // To simulate that the node is remote backed if (indexMetadata.getSettings().get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) == "true") { nodeSettings = Settings.builder() .put("node.name", routing.currentNodeId()) .put("node.attr.remote_store.translog.repository", "seg_repo") .build(); + discoveryNodes = DiscoveryNodes.builder().add(IndexShardTestUtils.getFakeRemoteEnabledNode(routing.currentNodeId())).build(); } final IndexSettings indexSettings = new IndexSettings(indexMetadata, nodeSettings); final IndexShard indexShard; @@ -712,7 +715,8 @@ protected IndexShard newShard( "dummy-node", DefaultRecoverySettings.INSTANCE, DefaultRemoteStoreSettings.INSTANCE, - false + false, + discoveryNodes ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { @@ -986,7 +990,7 @@ protected void closeShards(Iterable shards) throws IOException { protected void recoverShardFromStore(IndexShard primary) throws IOException { primary.markAsRecovering( "store", - new RecoveryState(primary.routingEntry(), getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) + new RecoveryState(primary.routingEntry(), IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) ); recoverFromStore(primary); updateRoutingEntry(primary, ShardRoutingHelper.moveToStarted(primary.routingEntry())); @@ -1003,7 +1007,19 @@ public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRoutin null, currentClusterStateVersion.incrementAndGet(), inSyncIds, - newRoutingTable + newRoutingTable, + DiscoveryNodes.builder() + .add( + new DiscoveryNode( + shardRouting.currentNodeId(), + shardRouting.currentNodeId(), + buildNewFakeTransportAddress(), + Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ) + ) + .build() ); } @@ -1017,17 +1033,6 @@ protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) th } } - protected DiscoveryNode getFakeDiscoNode(String id) { - return new DiscoveryNode( - id, - id, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - DiscoveryNodeRole.BUILT_IN_ROLES, - Version.CURRENT - ); - } - protected void recoverReplica(IndexShard replica, IndexShard primary, boolean startReplica) throws IOException { recoverReplica(replica, primary, startReplica, getReplicationFunc(replica)); } @@ -1104,7 +1109,7 @@ protected void recoverReplica( * @param targetSupplier supplies an instance of {@link RecoveryTarget} * @param markAsRecovering set to {@code false} if the replica is marked as recovering */ - protected final void recoverUnstartedReplica( + public final void recoverUnstartedReplica( final IndexShard replica, final IndexShard primary, final BiFunction targetSupplier, @@ -1113,8 +1118,18 @@ protected final void recoverUnstartedReplica( final IndexShardRoutingTable routingTable, final Function, List> replicatePrimaryFunction ) throws IOException { - final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId()); - final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId()); + final DiscoveryNode pNode; + final DiscoveryNode rNode; + if (primary.isRemoteTranslogEnabled()) { + pNode = IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId()); + } else { + pNode = IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()); + } + if (replica.isRemoteTranslogEnabled()) { + rNode = IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId()); + } else { + rNode = IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId()); + } if (markAsRecovering) { replica.markAsRecovering("remote", new RecoveryState(replica.routingEntry(), pNode, rNode)); } else { @@ -1155,7 +1170,10 @@ protected final void recoverUnstartedReplica( null, currentClusterStateVersion.incrementAndGet(), inSyncIds, - routingTable + routingTable, + primary.isRemoteTranslogEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.getShards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.getShards()) ); try { PlainActionFuture future = new PlainActionFuture<>(); @@ -1189,7 +1207,10 @@ protected void startReplicaAfterRecovery( null, currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, - newRoutingTable + newRoutingTable, + primary.indexSettings.isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.shards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.shards()) ); replica.updateShardState( replica.routingEntry().moveToStarted(), @@ -1197,7 +1218,10 @@ protected void startReplicaAfterRecovery( null, currentClusterStateVersion.get(), inSyncIdsWithReplica, - newRoutingTable + newRoutingTable, + replica.indexSettings.isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.shards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.shards()) ); } @@ -1226,7 +1250,8 @@ protected void promoteReplica(IndexShard replica, Set inSyncIds, IndexSh ), currentClusterStateVersion.incrementAndGet(), inSyncIds, - newRoutingTable + newRoutingTable, + IndexShardTestUtils.getFakeDiscoveryNodes(routingEntry) ); } @@ -1370,7 +1395,7 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot s final Version version = Version.CURRENT; final ShardId shardId = shard.shardId(); final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID()); - final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId()); + final DiscoveryNode node = IndexShardTestUtils.getFakeDiscoNode(shard.routingEntry().currentNodeId()); final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java new file mode 100644 index 0000000000000..d3a4a95c3bdef --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class IndexShardTestUtils { + public static DiscoveryNode getFakeDiscoNode(String id) { + return new DiscoveryNode( + id, + id, + IndexShardTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + } + + public static DiscoveryNode getFakeRemoteEnabledNode(String id) { + Map remoteNodeAttributes = new HashMap(); + remoteNodeAttributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + return new DiscoveryNode( + id, + id, + IndexShardTestCase.buildNewFakeTransportAddress(), + remoteNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + } + + public static DiscoveryNodes getFakeDiscoveryNodes(List shardRoutings) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting routing : shardRoutings) { + builder.add(getFakeDiscoNode(routing.currentNodeId())); + } + return builder.build(); + } + + public static DiscoveryNodes getFakeRemoteEnabledDiscoveryNodes(List shardRoutings) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting routing : shardRoutings) { + builder.add(getFakeRemoteEnabledNode(routing.currentNodeId())); + } + return builder.build(); + } + + public static DiscoveryNodes getFakeDiscoveryNodes(ShardRouting shardRouting) { + return DiscoveryNodes.builder().add(getFakeDiscoNode(shardRouting.currentNodeId())).build(); + } +} From f33db50069d328d5a3c30d867f59eded0d12a320 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 2 Apr 2024 18:22:14 +0530 Subject: [PATCH 66/74] Fix build due to multiple commits to same file causing compilation failure (#13019) Signed-off-by: Gaurav Bafna --- .../org/opensearch/action/bulk/TransportShardBulkAction.java | 2 +- .../support/replication/TransportReplicationAction.java | 4 ++-- .../opensearch/index/seqno/GlobalCheckpointSyncAction.java | 2 +- .../java/org/opensearch/index/seqno/ReplicationTracker.java | 3 ++- .../org/opensearch/index/shard/CheckpointRefreshListener.java | 2 +- .../src/main/java/org/opensearch/index/shard/IndexShard.java | 4 ++-- .../indices/replication/SegmentReplicationSourceFactory.java | 2 +- .../replication/checkpoint/PublishCheckpointAction.java | 4 ++-- .../replication/OpenSearchIndexLevelReplicationTestCase.java | 4 ++-- 9 files changed, 14 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index c1d13128c18b1..fdba8a42c0170 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -442,7 +442,7 @@ protected long primaryOperationSize(BulkShardRequest request) { @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteNode()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.PRIMARY_TERM_VALIDATION; } return super.getReplicationMode(indexShard); diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 8d86128e36441..49a96603f6802 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -356,7 +356,7 @@ public void performOn( * @return the overridden replication mode. */ public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteNode()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.NO_REPLICATION; } return ReplicationMode.FULL_REPLICATION; @@ -642,7 +642,7 @@ public void handleException(TransportException exp) { primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, retryTimeout, - indexShard.indexSettings().isRemoteNode() + indexShard.indexSettings().isAssignedOnRemoteNode() ? new ReplicationModeAwareProxy<>( getReplicationMode(indexShard), clusterState.getNodes(), diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index 0c167d6d80b5c..c6a1f5f27a875 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -135,7 +135,7 @@ protected void shardOperationOnReplica(Request shardRequest, IndexShard replica, private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint() - && indexShard.indexSettings().isRemoteNode() == false) { + && indexShard.indexSettings().isAssignedOnRemoteNode() == false) { indexShard.sync(); } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index b2eb2f03486ac..599c54f293e89 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1378,7 +1378,8 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { final ShardRouting primaryShard = routingTable.primaryShard(); final String leaseId = getPeerRecoveryRetentionLeaseId(primaryShard); if (retentionLeases.get(leaseId) == null) { - if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) || indexSettings.isRemoteNode()) { + if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) + || indexSettings.isAssignedOnRemoteNode()) { assert primaryShard.allocationId().getId().equals(shardAllocationId) : routingTable.assignedShards() + " vs " + shardAllocationId; diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index b47025d75282c..7f0806059155a 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -43,7 +43,7 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { if (didRefresh && shard.state() == IndexShardState.STARTED && shard.getReplicationTracker().isPrimaryMode() - && shard.indexSettings.isRemoteNode() == false) { + && shard.indexSettings.isAssignedOnRemoteNode() == false) { publisher.publish(shard, shard.getLatestReplicationCheckpoint()); } return true; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 484083a5b1260..be9a7872cd89c 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3520,8 +3520,8 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * When remote translog is enabled for an index, replication operation is limited to primary term validation and does not * update local checkpoint at replica, so the local checkpoint at replica can be less than globalCheckpoint. */ - assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) || indexSettings.isRemoteNode() - : "supposedly in-sync shard copy received a global checkpoint [" + assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) + || indexSettings.isAssignedOnRemoteNode() : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 657705a8cd725..81eb38757aebe 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -38,7 +38,7 @@ public SegmentReplicationSourceFactory( } public SegmentReplicationSource get(IndexShard shard) { - if (shard.indexSettings().isRemoteNode()) { + if (shard.indexSettings().isAssignedOnRemoteNode()) { return new RemoteStoreReplicationSource(shard); } else { return new PrimaryShardReplicationSource( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 4d7d0a8633b5c..8f39aa194b06c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -98,7 +98,7 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteNode()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.FULL_REPLICATION; } return super.getReplicationMode(indexShard); @@ -201,7 +201,7 @@ protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexSh logger.trace(() -> new ParameterizedMessage("Checkpoint {} received on replica {}", request, replica.shardId())); // Condition for ensuring that we ignore Segrep checkpoints received on Docrep shard copies. // This case will hit iff the replica hosting node is not remote enabled and replication type != SEGMENT - if (replica.indexSettings().isRemoteNode() == false && replica.indexSettings().isSegRepLocalEnabled() == false) { + if (replica.indexSettings().isAssignedOnRemoteNode() == false && replica.indexSettings().isSegRepLocalEnabled() == false) { logger.trace("Received segrep checkpoint on a docrep shard copy during an ongoing remote migration. NoOp."); return new ReplicaResult(); } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 3226035bba97b..a5dc13c334513 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -344,13 +344,13 @@ public synchronized void startAll() throws IOException { public synchronized DiscoveryNodes generateFakeDiscoveryNodes() { DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(); - if (primary.indexSettings() != null && primary.indexSettings().isRemoteNode()) { + if (primary.indexSettings() != null && primary.indexSettings().isAssignedOnRemoteNode()) { builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId())); } else { builder.add(IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId())); } for (IndexShard replica : replicas) { - if (replica.indexSettings() != null && replica.indexSettings().isRemoteNode()) { + if (replica.indexSettings() != null && replica.indexSettings().isAssignedOnRemoteNode()) { builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId())); } else { builder.add(IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId())); From a6bbc096f77b65f1b1d4947a88e58f6dc228644a Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Tue, 2 Apr 2024 07:27:59 -0700 Subject: [PATCH 67/74] Add more identifiers to gradle-check job webhook payload (#12938) * Add more identifiers to gradle-check job webhook payload Signed-off-by: Rishabh Singh * Update .github/workflows/gradle-check.yml Co-authored-by: Peter Nied Signed-off-by: Rishabh Singh * Add more identifiers to gradle-check job webhook payload Signed-off-by: Rishabh Singh --------- Signed-off-by: Rishabh Singh Signed-off-by: Rishabh Singh Co-authored-by: Peter Nied --- .github/workflows/gradle-check.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 1f5c187c28e7d..e38aafec0521f 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -30,11 +30,15 @@ jobs: - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | + echo "event_name=pull_request_target" >> $GITHUB_ENV + echo "branch_name=$(jq --raw-output .pull_request.base.ref $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output .pull_request.user.login $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --raw-output .pull_request.body $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Setup environment variables (Push) if: github.event_name == 'push' @@ -42,12 +46,15 @@ jobs: repo_url="https://github.com/opensearch-project/OpenSearch" ref_id=$(git rev-parse HEAD) branch_name=$(git rev-parse --abbrev-ref HEAD) + echo "event_name=push" >> $GITHUB_ENV echo "branch_name=$branch_name" >> $GITHUB_ENV echo "pr_from_sha=$ref_id" >> $GITHUB_ENV echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV echo "pr_number=Null" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output '.commits[0].author.username' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --raw-output .head_commit.message $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Checkout opensearch-build repo uses: actions/checkout@v4 From 1beec654eb8884c65f1ed8441fb61867a9bfb96e Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Tue, 2 Apr 2024 11:13:50 -0400 Subject: [PATCH 68/74] Revert "Add more identifiers to gradle-check job webhook payload (#12938)" (#13027) This reverts commit a6bbc096f77b65f1b1d4947a88e58f6dc228644a. Signed-off-by: dblock --- .github/workflows/gradle-check.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index e38aafec0521f..1f5c187c28e7d 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -30,15 +30,11 @@ jobs: - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | - echo "event_name=pull_request_target" >> $GITHUB_ENV - echo "branch_name=$(jq --raw-output .pull_request.base.ref $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_owner=$(jq --raw-output .pull_request.user.login $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_or_commit_description=$(jq --raw-output .pull_request.body $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Setup environment variables (Push) if: github.event_name == 'push' @@ -46,15 +42,12 @@ jobs: repo_url="https://github.com/opensearch-project/OpenSearch" ref_id=$(git rev-parse HEAD) branch_name=$(git rev-parse --abbrev-ref HEAD) - echo "event_name=push" >> $GITHUB_ENV echo "branch_name=$branch_name" >> $GITHUB_ENV echo "pr_from_sha=$ref_id" >> $GITHUB_ENV echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV echo "pr_number=Null" >> $GITHUB_ENV - echo "pr_owner=$(jq --raw-output '.commits[0].author.username' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_or_commit_description=$(jq --raw-output .head_commit.message $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Checkout opensearch-build repo uses: actions/checkout@v4 From 9ba8b4f4151f8a95659d09ec2d62b81b2da58231 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 2 Apr 2024 12:49:44 -0400 Subject: [PATCH 69/74] Fix detect-breaking-change Github action configuration (#13030) Signed-off-by: Andriy Redko --- .github/workflows/detect-breaking-change.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/detect-breaking-change.yml b/.github/workflows/detect-breaking-change.yml index fec605c58f9c7..1913d070e8c24 100644 --- a/.github/workflows/detect-breaking-change.yml +++ b/.github/workflows/detect-breaking-change.yml @@ -1,5 +1,7 @@ name: "Detect Breaking Changes" -on: [push, pull_request] +on: + pull_request + jobs: detect-breaking-change: runs-on: ubuntu-latest @@ -11,6 +13,7 @@ jobs: java-version: 21 - uses: gradle/gradle-build-action@v3 with: + cache-disabled: true arguments: japicmp gradle-version: 8.7 build-root-directory: server From 8ca3e6ad51d73b6b89cb88eca8f4c759f708e59b Mon Sep 17 00:00:00 2001 From: Mohammad Qureshi <47198598+qreshi@users.noreply.github.com> Date: Tue, 2 Apr 2024 11:47:47 -0700 Subject: [PATCH 70/74] Add DerivedFieldMapper and support parsing it in mappings (#12569) Adds a DerivedFieldMapper to support the Derived Fields feature enhancement as well as updating the mapper parsing logic to recognize and currently parse derived fields in the mappings. --------- Signed-off-by: Mohammad Qureshi Signed-off-by: Rishabh Maurya Co-authored-by: Rishabh Maurya --- CHANGELOG.md | 1 + .../index/mapper/DerivedFieldMapper.java | 129 ++++++++ .../opensearch/index/mapper/ObjectMapper.java | 85 +++++- .../index/mapper/ParametrizedFieldMapper.java | 6 +- .../org/opensearch/indices/IndicesModule.java | 2 + .../mapper/DerivedFieldMapperQueryTests.java | 279 ++++++++++++++++++ .../index/mapper/DerivedFieldMapperTests.java | 117 ++++++++ .../index/mapper/ObjectMapperTests.java | 47 +++ .../index/mapper/MapperServiceTestCase.java | 21 +- .../aggregations/AggregatorTestCase.java | 2 + 10 files changed, 686 insertions(+), 3 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 5371a8372c56d..a83f93e358f53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -109,6 +109,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) +- Derived fields support to derive field values at query time without indexing ([#12569](https://github.com/opensearch-project/OpenSearch/pull/12569)) - Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) - Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java new file mode 100644 index 0000000000000..b448487a4f810 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java @@ -0,0 +1,129 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.script.Script; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +/** + * A field mapper for derived fields + * + * @opensearch.internal + */ +public class DerivedFieldMapper extends ParametrizedFieldMapper { + + public static final String CONTENT_TYPE = "derived"; + + private static DerivedFieldMapper toType(FieldMapper in) { + return (DerivedFieldMapper) in; + } + + /** + * Builder for this field mapper + * + * @opensearch.internal + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + // TODO: The type of parameter may change here if the actual underlying FieldType object is needed + private final Parameter type = Parameter.stringParam("type", false, m -> toType(m).type, "text"); + + private final Parameter