Skip to content

Commit

Permalink
Merge branch 'main' into add-google-to-version-properties
Browse files Browse the repository at this point in the history
Signed-off-by: Craig Perkins <[email protected]>
  • Loading branch information
cwperks authored Apr 16, 2024
2 parents 7f469fa + 5e72e1d commit 55de801
Show file tree
Hide file tree
Showing 26 changed files with 895 additions and 87 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Add changes for overriding remote store and replication settings during snapshot restore. ([#11868](https://github.com/opensearch-project/OpenSearch/pull/11868))
- Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959))
- [Streaming Indexing] Ensure support of the new transport by security plugin ([#13174](https://github.com/opensearch-project/OpenSearch/pull/13174))
- Add cluster setting to dynamically configure the buckets for filter rewrite optimization. ([#13179](https://github.com/opensearch-project/OpenSearch/pull/13179))

### Dependencies
- Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896))
Expand All @@ -35,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627))
- Bump `com.gradle.enterprise` from 3.16.2 to 3.17.1 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191))
- Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192))
- Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193))
- Update google dependencies in repository-gcs and discovery-gce ([#13213](https://github.com/opensearch-project/OpenSearch/pull/13213))

### Changed
Expand Down
4 changes: 2 additions & 2 deletions TRIAGING.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ Each meeting we seek to address all new issues. However, should we run out of ti

### How do I join the Triage meeting?

Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Chime](https://aws.amazon.com/chime/), with this [meeting link](https://chime.aws/1988437365).
Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Zoom](https://zoom.us/download), with this [meeting link](https://us02web.zoom.us/j/86287450465) and passcode `805212`.

After joining the Chime meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat.
After joining the Zoom meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat.

If you have an issue you'd like to bring forth please prepare a link to the issue so it can be presented and viewed by everyone in the meeting.

Expand Down
2 changes: 1 addition & 1 deletion buildSrc/version.properties
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ google_http_client = 1.44.1
jna = 5.13.0

netty = 4.1.108.Final
joda = 2.12.2
joda = 2.12.7

# project reactor
reactor_netty = 1.1.17
Expand Down
1 change: 0 additions & 1 deletion server/licenses/joda-time-2.12.2.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions server/licenses/joda-time-2.12.7.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
d015b997eccd511e5567218a51651ff0625f6f25
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ public class RemoteDualReplicationIT extends MigrationBaseTestCase {
private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica";
private final String REMOTE_PRI_DOCREP_REMOTE_REP = "remote-primary-docrep-remote-replica";
private final String FAILOVER_REMOTE_TO_DOCREP = "failover-remote-to-docrep";
private final String FAILOVER_REMOTE_TO_REMOTE = "failover-remote-to-remote";

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
Expand Down Expand Up @@ -241,14 +242,63 @@ RLs on remote enabled copies are brought up to (GlobalCkp + 1) upon a flush requ
*/
extraSettings = Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), "3s").build();
testRemotePrimaryDocRepAndRemoteReplica();
DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes();
assertBusy(() -> {
for (ShardStats shardStats : internalCluster().client()
pollAndCheckRetentionLeases(REMOTE_PRI_DOCREP_REMOTE_REP);
}

public void testMissingRetentionLeaseCreatedOnFailedOverRemoteReplica() throws Exception {
internalCluster().startClusterManagerOnlyNode();

logger.info("---> Starting docrep data node");
internalCluster().startDataOnlyNode();

Settings zeroReplicasAndOverridenSyncIntervals = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms")
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms")
.build();
createIndex(FAILOVER_REMOTE_TO_REMOTE, zeroReplicasAndOverridenSyncIntervals);
ensureGreen(FAILOVER_REMOTE_TO_REMOTE);

indexBulk(FAILOVER_REMOTE_TO_REMOTE, 100);

logger.info("---> Starting first remote node");
initDocRepToRemoteMigration();
addRemote = true;
String firstRemoteNode = internalCluster().startDataOnlyNode();
String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_REMOTE);
logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, firstRemoteNode);
assertAcked(
internalCluster().client()
.admin()
.cluster()
.prepareReroute()
.add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_REMOTE, 0, primaryShardHostingNode, firstRemoteNode))
.get()
);
ensureGreen(FAILOVER_REMOTE_TO_REMOTE);
assertReplicaAndPrimaryConsistency(FAILOVER_REMOTE_TO_REMOTE, 100, 0);

String secondRemoteNode = internalCluster().startDataOnlyNode();
Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build();
assertAcked(
internalCluster().client()
.admin()
.indices()
.prepareStats(REMOTE_PRI_DOCREP_REMOTE_REP)
.prepareUpdateSettings()
.setIndices(FAILOVER_REMOTE_TO_REMOTE)
.setSettings(twoReplicas)
.get()
.getShards()) {
);
ensureGreen(FAILOVER_REMOTE_TO_REMOTE);

logger.info("---> Checking retention leases");
pollAndCheckRetentionLeases(FAILOVER_REMOTE_TO_REMOTE);
}

private void pollAndCheckRetentionLeases(String indexName) throws Exception {
DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes();
assertBusy(() -> {
for (ShardStats shardStats : internalCluster().client().admin().indices().prepareStats(indexName).get().getShards()) {
ShardRouting shardRouting = shardStats.getShardRouting();
DiscoveryNode discoveryNode = nodes.get(shardRouting.currentNodeId());
RetentionLeases retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG;
import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA;
import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA;
import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
Expand Down Expand Up @@ -229,7 +229,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() {
client(clusterManagerNode).admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED))
.setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED))
.get();
createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true));
Client client = client();
Expand Down Expand Up @@ -260,7 +260,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() {
client(clusterManagerNode).admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX))
.setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX))
.get();

restoreSnapshotResponse = client.admin()
Expand All @@ -272,13 +272,13 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() {
.get();
assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status());
ensureGreen(restoredIndexName1version2);
validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A);
validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1);

// Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix.
// Create index with cluster setting cluster.remote_store.index.path.type as hashed_prefix.
indexSettings = getIndexSettings(1, 0).build();
createIndex(indexName2, indexSettings);
ensureGreen(indexName2);
validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A);
validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1);

// Validating that custom data has not changed for indexes which were created before the cluster setting got updated
validatePathType(indexName1, PathType.FIXED);
Expand All @@ -294,7 +294,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() {
client(clusterManagerNode).admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED))
.setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED))
.get();

// Close index 2
Expand All @@ -309,7 +309,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() {
ensureGreen(indexName2);

// Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated
validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A);
validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1);
}

private void validatePathType(String index, PathType pathType) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;

import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.opensearch.action.index.IndexRequestBuilder;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.common.settings.Settings;
Expand All @@ -34,6 +35,7 @@

import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS;
import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;

Expand Down Expand Up @@ -93,7 +95,7 @@ public void testMinDocCountOnDateHistogram() throws Exception {
final SearchResponse allResponse = client().prepareSearch("idx")
.setSize(0)
.setQuery(QUERY)
.addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0))
.addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0))
.get();

final Histogram allHisto = allResponse.getAggregations().get("histo");
Expand All @@ -104,4 +106,36 @@ public void testMinDocCountOnDateHistogram() throws Exception {
assertEquals(entry.getValue(), results.get(entry.getKey()));
}
}

public void testDisableOptimizationGivesSameResults() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.setSize(0)
.addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0))
.get();

final Histogram allHisto1 = response.getAggregations().get("histo");

final ClusterUpdateSettingsResponse updateSettingResponse = client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(MAX_AGGREGATION_REWRITE_FILTERS.getKey(), 0))
.get();

assertEquals(updateSettingResponse.getTransientSettings().get(MAX_AGGREGATION_REWRITE_FILTERS.getKey()), "0");

response = client().prepareSearch("idx")
.setSize(0)
.addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(0))
.get();

final Histogram allHisto2 = response.getAggregations().get("histo");

assertEquals(allHisto1, allHisto2);

client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().putNull(MAX_AGGREGATION_REWRITE_FILTERS.getKey()))
.get();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,7 @@ public void apply(Settings value, Settings current, Settings previous) {
SearchService.MAX_OPEN_SCROLL_CONTEXT,
SearchService.MAX_OPEN_PIT_CONTEXT,
SearchService.MAX_PIT_KEEPALIVE_SETTING,
SearchService.MAX_AGGREGATION_REWRITE_FILTERS,
CreatePitController.PIT_INIT_KEEP_ALIVE,
Node.WRITE_PORTS_FILE_SETTING,
Node.NODE_NAME_SETTING,
Expand Down Expand Up @@ -712,7 +713,8 @@ public void apply(Settings value, Settings current, Settings previous) {
RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING,
IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING,
IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING,
IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING,
IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING,
IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING,

// Admission Control Settings
AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
import static java.util.Collections.unmodifiableMap;
import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA;
import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA;
import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding;
import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64;

/**
* This class contains the different enums related to remote store like data categories and types, path types
Expand Down Expand Up @@ -216,13 +218,26 @@ public static PathType parseString(String pathType) {
@PublicApi(since = "2.14.0")
public enum PathHashAlgorithm {

FNV_1A(0) {
FNV_1A_BASE64(0) {
@Override
String hash(PathInput pathInput) {
String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType()
.getName();
long hash = FNV1a.hash64(input);
return RemoteStoreUtils.longToUrlBase64(hash);
return longToUrlBase64(hash);
}
},
/**
* This hash algorithm will generate a hash value which will use 1st 6 bits to create bas64 character and next 14
* bits to create binary string.
*/
FNV_1A_COMPOSITE_1(1) {
@Override
String hash(PathInput pathInput) {
String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType()
.getName();
long hash = FNV1a.hash64(input);
return longToCompositeBase64AndBinaryEncoding(hash, 20);
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,16 @@ public class RemoteStorePathStrategyResolver {

private volatile PathType type;

private volatile PathHashAlgorithm hashAlgorithm;

private final Supplier<Version> minNodeVersionSupplier;

public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier<Version> minNodeVersionSupplier) {
this.minNodeVersionSupplier = minNodeVersionSupplier;
type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING);
clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType);
type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING);
hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING);
clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType);
clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm);
}

public RemoteStorePathStrategy get() {
Expand All @@ -39,11 +43,15 @@ public RemoteStorePathStrategy get() {
// Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it.
pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED;
// If the path type is fixed, hash algorithm is not applicable.
pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A;
pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm;
return new RemoteStorePathStrategy(pathType, pathHashAlgorithm);
}

private void setType(PathType type) {
this.type = type;
}

private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) {
this.hashAlgorithm = hashAlgorithm;
}
}
Loading

0 comments on commit 55de801

Please sign in to comment.